file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Transform.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Transform.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/VectorTransformer.h> // for transformVectors() #include <UT/UT_Interrupt.h> #include <hboost/math/constants/constants.hpp> #include <set> #include <sstream> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Transform: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Transform(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Transform() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be transformed.") .setDocumentation( "A subset of the input VDBs to be transformed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "xOrd", "Transform Order") .setDefault("tsr") ///< @todo Houdini default is "srt" .setChoiceList(&PRMtrsMenu) .setTypeExtended(PRM_TYPE_JOIN_PAIR) .setTooltip("The order in which transformations and rotations occur")); parms.add(hutil::ParmFactory( PRM_STRING | PRM_Type(PRM_Type::PRM_INTERFACE_LABEL_NONE), "rOrd", "") .setDefault("zyx") ///< @todo Houdini default is "xyz" .setChoiceList(&PRMxyzMenu)); parms.add(hutil::ParmFactory(PRM_XYZ_J, "t", "Translate") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The amount of translation along the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "r", "Rotate") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The amount of rotation about the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "s", "Scale") .setVectorSize(3) .setDefault(PRMoneDefaults) .setDocumentation("Nonuniform scaling along the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "p", "Pivot") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The pivot point for scaling and rotation")); parms.add(hutil::ParmFactory(PRM_FLT_J, "uniformScale", "Uniform Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_FREE, 10) .setDocumentation("Uniform scaling along all three axes")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert Transformation") .setDefault(PRMzeroDefaults) .setDocumentation("Perform the inverse transformation.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "xformvectors", "Transform Vectors") .setDefault(PRMzeroDefaults) .setTooltip( "Apply the transform to the voxel values of vector-valued VDBs,\n" "in accordance with those VDBs' Vector Type attributes.\n") .setDocumentation( "Apply the transform to the voxel values of vector-valued VDBs," " in accordance with those VDBs' __Vector Type__ attributes (as set," " for example, with the [OpenVDB Create|Node:sop/DW_OpenVDBCreate] node).")); hvdb::OpenVDBOpFactory("VDB Transform", SOP_OpenVDB_Transform::factory, parms, *table) .setNativeName("") .addInput("VDBs to transform") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Transform::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Modify the transforms of VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node modifies the transform associated with each input VDB volume.\n\ It is usually preferable to use Houdini's native [Transform|Node:sop/xform] node,\n\ except if you want to also transform the _values_ of a vector-valued VDB.\n\ \n\ @related\n\ - [Node:sop/xform]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Transform::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Transform(net, name, op); } SOP_OpenVDB_Transform::SOP_OpenVDB_Transform(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } namespace { // Functor for use with GEOvdbApply() to apply a transform // to the voxel values of vector-valued grids struct VecXformOp { openvdb::Mat4d mat; VecXformOp(const openvdb::Mat4d& _mat): mat(_mat) {} template<typename GridT> void operator()(GridT& grid) const { openvdb::tools::transformVectors(grid, mat); } }; } // unnamed namespace OP_ERROR SOP_OpenVDB_Transform::Cache::cookVDBSop(OP_Context& context) { try { using MapBase = openvdb::math::MapBase; using AffineMap = openvdb::math::AffineMap; using NonlinearFrustumMap = openvdb::math::NonlinearFrustumMap; using Transform = openvdb::math::Transform; const fpreal time = context.getTime(); // Get UI parameters openvdb::Vec3R t(evalVec3R("t", time)), r(evalVec3R("r", time)), s(evalVec3R("s", time)), p(evalVec3R("p", time)); s *= evalFloat("uniformScale", 0, time); const auto xformOrder = evalStdString("xOrd", time); const auto rotOrder = evalStdString("rOrd", time); const bool flagInverse = evalInt("invert", 0, time); const bool xformVec = evalInt("xformvectors", 0, time); const auto isValidOrder = [](const std::string& expected, const std::string& actual) { if (actual.size() != expected.size()) return false; using CharSet = std::set<std::string::value_type>; return (CharSet(actual.begin(), actual.end()) == CharSet(expected.begin(), expected.end())); }; if (!isValidOrder("rst", xformOrder)) { std::ostringstream mesg; mesg << "Invalid transform order \"" << xformOrder << "\"; expected \"tsr\", \"rst\", etc."; throw std::runtime_error(mesg.str()); } if (!isValidOrder("xyz", rotOrder)) { std::ostringstream mesg; mesg << "Invalid rotation order \"" << rotOrder << "\"; expected \"xyz\", \"zyx\", etc."; throw std::runtime_error(mesg.str()); } // Get the group of grids to be transformed. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); UT_AutoInterrupt progress("Transform"); // Build up the transform matrix from the UI parameters const double deg2rad = hboost::math::constants::pi<double>() / 180.0; openvdb::Mat4R mat(openvdb::Mat4R::identity()); const auto rotate = [&]() { for (auto axis = rotOrder.rbegin(); axis != rotOrder.rend(); ++axis) { switch (*axis) { case 'x': mat.preRotate(openvdb::math::X_AXIS, deg2rad*r[0]); break; case 'y': mat.preRotate(openvdb::math::Y_AXIS, deg2rad*r[1]); break; case 'z': mat.preRotate(openvdb::math::Z_AXIS, deg2rad*r[2]); break; } } }; if (xformOrder == "trs") { mat.preTranslate(p); mat.preScale(s); rotate(); mat.preTranslate(-p); mat.preTranslate(t); } else if (xformOrder == "tsr") { mat.preTranslate(p); rotate(); mat.preScale(s); mat.preTranslate(-p); mat.preTranslate(t); } else if (xformOrder == "rts") { mat.preTranslate(p); mat.preScale(s); mat.preTranslate(-p); mat.preTranslate(t); mat.preTranslate(p); rotate(); mat.preTranslate(-p); } else if (xformOrder == "rst") { mat.preTranslate(t); mat.preTranslate(p); mat.preScale(s); rotate(); mat.preTranslate(-p); } else if (xformOrder == "str") { mat.preTranslate(p); rotate(); mat.preTranslate(-p); mat.preTranslate(t); mat.preTranslate(p); mat.preScale(s); mat.preTranslate(-p); } else /*if (xformOrder == "srt")*/ { mat.preTranslate(t); mat.preTranslate(p); rotate(); mat.preScale(s); mat.preTranslate(-p); } if (flagInverse) mat = mat.inverse(); const VecXformOp xformOp(mat); // Construct an affine map. AffineMap map(mat); // For each VDB primitive in the given group... for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) throw std::runtime_error("Interrupted"); GU_PrimVDB* vdb = *it; // No need to make the grid unique at this point, since we might not need // to modify its voxel data. hvdb::Grid& grid = vdb->getGrid(); const auto& transform = grid.constTransform(); // Merge the transform's current affine representation with the new affine map. AffineMap::Ptr compound( new AffineMap(*transform.baseMap()->getAffineMap(), map)); // Simplify the affine compound map auto affineMap = openvdb::math::simplify(compound); Transform::Ptr newTransform; if (transform.isLinear()) { newTransform.reset(new Transform(affineMap)); } else { auto frustumMap = transform.constMap<NonlinearFrustumMap>(); if (!frustumMap) { throw std::runtime_error{"Unsupported non-linear map - " + transform.mapType()}; } // Create a new NonlinearFrustumMap that replaces the affine map with the transformed one. MapBase::Ptr newFrustumMap(new NonlinearFrustumMap( frustumMap->getBBox(), frustumMap->getTaper(), frustumMap->getDepth(), affineMap)); newTransform.reset(new Transform(newFrustumMap)); } // Replace the transform. grid.setTransform(newTransform); // Update the primitive's vertex position. /// @todo Need a simpler way to do this. hvdb::GridPtr copyOfGrid = grid.copyGrid(); copyOfGrid->setTransform(grid.constTransform().copy()); vdb->setGrid(*copyOfGrid); if (xformVec && vdb->getConstGrid().isInWorldSpace() && vdb->getConstGrid().getVectorType() != openvdb::VEC_INVARIANT) { // If (and only if) the grid is vector-valued, deep copy it, // then apply the transform to each voxel's value. hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, xformOp); } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
11,597
C++
34.907121
106
0.596189
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SHOP_OpenVDB_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SHOP_OpenVDB_Points.cc /// /// @authors Dan Bailey, Richard Kwok /// /// @brief The Delayed Load Procedural SHOP for OpenVDB Points. #include <UT/UT_DSOVersion.h> #include <UT/UT_Version.h> #include <UT/UT_Ramp.h> #include <OP/OP_OperatorTable.h> #include <SHOP/SHOP_Node.h> #include <SHOP/SHOP_Operator.h> #include <PRM/PRM_Include.h> #include <houdini_utils/ParmFactory.h> #include <sstream> namespace hutil = houdini_utils; class SHOP_OpenVDB_Points : public SHOP_Node { public: static const char* nodeName() { return "openvdb_points"; } SHOP_OpenVDB_Points(OP_Network *parent, const char *name, OP_Operator *entry); ~SHOP_OpenVDB_Points() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); bool buildShaderString(UT_String &result, fpreal now, const UT_Options *options, OP_Node *obj=0, OP_Node *sop=0, SHOP_TYPE interpretType = SHOP_INVALID) override; protected: OP_ERROR cookMe(OP_Context&) override; bool updateParmsFlags() override; }; // class SHOP_OpenVDB_Points //////////////////////////////////////// OP_Node* SHOP_OpenVDB_Points::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SHOP_OpenVDB_Points(net, name, op); } SHOP_OpenVDB_Points::SHOP_OpenVDB_Points(OP_Network *parent, const char *name, OP_Operator *entry) : SHOP_Node(parent, name, entry, SHOP_GEOMETRY) { } bool SHOP_OpenVDB_Points::buildShaderString(UT_String &result, fpreal now, const UT_Options*, OP_Node*, OP_Node*, SHOP_TYPE) { UT_String fileStr = ""; evalString(fileStr, "file", 0, now); UT_String groupMaskStr = ""; evalString(groupMaskStr, "groupmask", 0, now); UT_String attrMaskStr = ""; evalString(attrMaskStr, "attrmask", 0, now); std::stringstream ss; ss << SHOP_OpenVDB_Points::nodeName(); ss << " file \"" << fileStr.toStdString() << "\""; ss << " streamdata " << evalInt("streamdata", 0, now); ss << " groupmask \"" << groupMaskStr.toStdString() << "\""; ss << " attrmask \"" << attrMaskStr.toStdString() << "\""; ss << " speedtocolor " << evalInt("speedtocolor", 0, now); ss << " maxspeed " << evalFloat("maxspeed", 0, now); // write the speed/color ramp into the ifd UT_Ramp ramp; updateRampFromMultiParm(now, getParm("function"), ramp); ss << " ramp \""; for(int n = 0, N = ramp.getNodeCount(); n < N; n++){ const UT_ColorNode* rampNode = ramp.getNode(n); ss << rampNode->t << " "; ss << rampNode->rgba.r << " " << rampNode->rgba.g << " " << rampNode->rgba.b << " "; ss << static_cast<int>(rampNode->basis) << " "; } ss << "\""; result = ss.str(); return true; } OP_ERROR SHOP_OpenVDB_Points::cookMe(OP_Context& context) { return SHOP_Node::cookMe(context); } bool SHOP_OpenVDB_Points::updateParmsFlags() { bool changed = false; const bool speedToColor = evalInt("speedtocolor", 0, 0); changed |= enableParm("sep1", speedToColor); changed |= setVisibleState("sep1", speedToColor); changed |= enableParm("maxspeed", speedToColor); changed |= setVisibleState("maxspeed", speedToColor); changed |= enableParm("function", speedToColor); changed |= setVisibleState("function", speedToColor); return changed; } //////////////////////////////////////// // Build UI and register this operator. void newShopOperator(OP_OperatorTable *table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_FILE, "file", "File") .setDefault("./filename.vdb") .setHelpText("File path to the VDB to load.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "streamdata", "Stream Data for Maximum Memory Efficiency") .setDefault(PRMoneDefaults) .setHelpText( "Stream the data from disk to keep the memory footprint as small as possible." " This will make the initial conversion marginally slower because the data" " will be loaded twice, once for pre-computation to evaluate the bounding box" " and once for the actual conversion.")); parms.add(hutil::ParmFactory(PRM_STRING, "groupmask", "Group Mask") .setDefault("") .setHelpText("Specify VDB Points Groups to use. (Default is all groups)")); parms.add(hutil::ParmFactory(PRM_STRING, "attrmask", "Attribute Mask") .setDefault("") .setHelpText("Specify VDB Points Attributes to use. (Default is all attributes)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "speedtocolor", "Map Speed To Color") .setDefault(PRMzeroDefaults) .setHelpText( "Replaces the 'Cd' point attribute with colors mapped from the" " 'v' point attribute using a ramp.")); parms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); parms.add(hutil::ParmFactory(PRM_FLT_J, "maxspeed", "Max Speed") .setDefault(1.0f) .setHelpText("Reference for 1.0 on the color gradient.")); parms.add(hutil::ParmFactory(PRM_MULTITYPE_RAMP_RGB, "function", "Speed to Color Function") .setDefault(PRMtwoDefaults) .setHelpText("Function mapping speeds between 0 and 1 to a color.")); ////////// // Register this operator. SHOP_Operator* shop = new SHOP_Operator(SHOP_OpenVDB_Points::nodeName(), "OpenVDB Points", SHOP_OpenVDB_Points::factory, parms.get(), /*child_table_name=*/nullptr, /*min_sources=*/0, /*max_sources=*/0, SHOP_Node::myVariableList, OP_FLAG_GENERATOR, SHOP_AUTOADD_NONE); shop->setIconName("SHOP_geometry"); table->addOperator(shop); ////////// // Set the SHOP-specific data SHOP_OperatorInfo* info = UTverify_cast<SHOP_OperatorInfo*>(shop->getOpSpecificData()); info->setShaderType(SHOP_GEOMETRY); // Set the rendermask to "*" and try to support *all* renderers. info->setRenderMask("*"); }
6,057
C++
30.226804
98
0.638105
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/PointUtils.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointUtils.cc /// @authors Dan Bailey, Nick Avramoussis, Richard Kwok #include "PointUtils.h" #include "AttributeTransferUtil.h" #include "Utils.h" #include <openvdb/openvdb.h> #include <openvdb/points/AttributeArrayString.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointDataGrid.h> #include <GA/GA_AIFTuple.h> #include <GA/GA_ElementGroup.h> #include <GA/GA_Iterator.h> #include <CH/CH_Manager.h> // for CHgetEvalTime #include <PRM/PRM_SpareData.h> #include <SOP/SOP_Node.h> #include <UT/UT_UniquePtr.h> #include <algorithm> #include <map> #include <memory> #include <sstream> #include <stdexcept> #include <string> #include <type_traits> #include <vector> using namespace openvdb; using namespace openvdb::points; namespace hvdb = openvdb_houdini; namespace { inline GA_Storage gaStorageFromAttrString(const openvdb::Name& type) { if (type == "string") return GA_STORE_STRING; else if (type == "bool") return GA_STORE_BOOL; else if (type == "int8") return GA_STORE_INT8; else if (type == "int16") return GA_STORE_INT16; else if (type == "int32") return GA_STORE_INT32; else if (type == "int64") return GA_STORE_INT64; else if (type == "float") return GA_STORE_REAL32; else if (type == "double") return GA_STORE_REAL64; else if (type == "vec3i") return GA_STORE_INT32; else if (type == "vec3s") return GA_STORE_REAL32; else if (type == "vec3d") return GA_STORE_REAL64; else if (type == "quats") return GA_STORE_REAL32; else if (type == "quatd") return GA_STORE_REAL64; else if (type == "mat3s") return GA_STORE_REAL32; else if (type == "mat3d") return GA_STORE_REAL64; else if (type == "mat4s") return GA_STORE_REAL32; else if (type == "mat4d") return GA_STORE_REAL64; return GA_STORE_INVALID; } // @{ // Houdini GA Handle Traits template<typename T> struct GAHandleTraits { using RW = GA_RWHandleF; using RO = GA_ROHandleF; }; template<> struct GAHandleTraits<bool> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int8_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int16_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int32_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int64_t> { using RW = GA_RWHandleID; using RO = GA_ROHandleID; }; template<> struct GAHandleTraits<half> { using RW = GA_RWHandleH; using RO = GA_ROHandleH; }; template<> struct GAHandleTraits<float> { using RW = GA_RWHandleF; using RO = GA_ROHandleF; }; template<> struct GAHandleTraits<double> { using RW = GA_RWHandleD; using RO = GA_ROHandleD; }; template<> struct GAHandleTraits<std::string> { using RW = GA_RWHandleS; using RO = GA_ROHandleS; }; template<> struct GAHandleTraits<openvdb::math::Vec3<int>> { using RW=GA_RWHandleV3; using RO=GA_ROHandleV3; }; template<> struct GAHandleTraits<openvdb::Vec3s> { using RW = GA_RWHandleV3; using RO = GA_ROHandleV3; }; template<> struct GAHandleTraits<openvdb::Vec3d> { using RW = GA_RWHandleV3D; using RO = GA_ROHandleV3D; }; template<> struct GAHandleTraits<openvdb::math::Mat3s> { using RW = GA_RWHandleM3; using RO = GA_ROHandleM3; }; template<> struct GAHandleTraits<openvdb::math::Mat3d> { using RW = GA_RWHandleM3D; using RO = GA_ROHandleM3D; }; template<> struct GAHandleTraits<openvdb::Mat4s> { using RW = GA_RWHandleM4; using RO = GA_ROHandleM4; }; template<> struct GAHandleTraits<openvdb::Mat4d> { using RW = GA_RWHandleM4D; using RO = GA_ROHandleM4D; }; template<> struct GAHandleTraits<openvdb::math::Quats> { using RW = GA_RWHandleQ; using RO = GA_ROHandleQ; }; template<> struct GAHandleTraits<openvdb::math::Quatd> { using RW = GA_RWHandleQD; using RO = GA_ROHandleQD; }; // @} template<typename HandleType, typename ValueType> inline ValueType readAttributeValue(const HandleType& handle, const GA_Offset offset, const openvdb::Index component = 0) { return ValueType(handle.get(offset, component)); } template<> inline openvdb::math::Vec3<float> readAttributeValue(const GA_ROHandleV3& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<float> dstValue; const UT_Vector3F value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; return dstValue; } template<> inline openvdb::math::Vec3<int> readAttributeValue(const GA_ROHandleV3& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<int> dstValue; const UT_Vector3 value(handle.get(offset, component)); dstValue[0] = static_cast<int>(value[0]); dstValue[1] = static_cast<int>(value[1]); dstValue[2] = static_cast<int>(value[2]); return dstValue; } template<> inline openvdb::math::Vec3<double> readAttributeValue(const GA_ROHandleV3D& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<double> dstValue; const UT_Vector3D value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; return dstValue; } template<> inline openvdb::math::Quat<float> readAttributeValue(const GA_ROHandleQ& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Quat<float> dstValue; const UT_QuaternionF value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; dstValue[3] = value[3]; return dstValue; } template<> inline openvdb::math::Quat<double> readAttributeValue(const GA_ROHandleQD& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Quat<double> dstValue; const UT_QuaternionD value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; dstValue[3] = value[3]; return dstValue; } template<> inline openvdb::math::Mat3<float> readAttributeValue(const GA_ROHandleM3& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix3F value(handle.get(offset, component)); openvdb::math::Mat3<float> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat3<double> readAttributeValue(const GA_ROHandleM3D& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix3D value(handle.get(offset, component)); openvdb::math::Mat3<double> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat4<float> readAttributeValue(const GA_ROHandleM4& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix4F value(handle.get(offset, component)); openvdb::math::Mat4<float> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat4<double> readAttributeValue(const GA_ROHandleM4D& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix4D value(handle.get(offset, component)); openvdb::math::Mat4<double> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::Name readAttributeValue(const GA_ROHandleS& handle, const GA_Offset offset, const openvdb::Index component) { return openvdb::Name(UT_String(handle.get(offset, component)).toStdString()); } template<typename HandleType, typename ValueType> inline void writeAttributeValue(const HandleType& handle, const GA_Offset offset, const openvdb::Index component, const ValueType& value) { handle.set(offset, component, static_cast<typename HandleType::BASETYPE>(value)); } template<> inline void writeAttributeValue(const GA_RWHandleV3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<int>& value) { handle.set(offset, component, UT_Vector3F( static_cast<float>(value.x()), static_cast<float>(value.y()), static_cast<float>(value.z()))); } template<> inline void writeAttributeValue(const GA_RWHandleV3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<float>& value) { handle.set(offset, component, UT_Vector3(value.x(), value.y(), value.z())); } template<> inline void writeAttributeValue(const GA_RWHandleV3D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<double>& value) { handle.set(offset, component, UT_Vector3D(value.x(), value.y(), value.z())); } template<> inline void writeAttributeValue(const GA_RWHandleQ& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Quat<float>& value) { handle.set(offset, component, UT_QuaternionF(value.x(), value.y(), value.z(), value.w())); } template<> inline void writeAttributeValue(const GA_RWHandleQD& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Quat<double>& value) { handle.set(offset, component, UT_QuaternionD(value.x(), value.y(), value.z(), value.w())); } template<> inline void writeAttributeValue(const GA_RWHandleM3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat3<float>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const float* data(value.asPointer()); handle.set(offset, component, UT_Matrix3F(data[0], data[3], data[6], data[1], data[4], data[7], data[2], data[5], data[8])); } template<> inline void writeAttributeValue(const GA_RWHandleM3D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat3<double>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const double* data(value.asPointer()); handle.set(offset, component, UT_Matrix3D(data[0], data[3], data[6], data[1], data[4], data[7], data[2], data[5], data[8])); } template<> inline void writeAttributeValue(const GA_RWHandleM4& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat4<float>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const float* data(value.asPointer()); handle.set(offset, component, UT_Matrix4F(data[0], data[4], data[8], data[12], data[1], data[5], data[9], data[13], data[2], data[6], data[10], data[14], data[3], data[7], data[11], data[15])); } template<> inline void writeAttributeValue(const GA_RWHandleM4D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat4<double>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const double* data(value.asPointer()); handle.set(offset, component, UT_Matrix4D(data[0], data[4], data[8], data[12], data[1], data[5], data[9], data[13], data[2], data[6], data[10], data[14], data[3], data[7], data[11], data[15])); } template<> inline void writeAttributeValue(const GA_RWHandleS& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::Name& value) { handle.set(offset, component, value.c_str()); } /// @brief Writeable wrapper class around Houdini point attributes which hold /// a reference to the GA Attribute to write template <typename T> struct HoudiniWriteAttribute { using ValueType = T; struct Handle { explicit Handle(HoudiniWriteAttribute<T>& attribute) : mHandle(&attribute.mAttribute) { } template <typename ValueType> void set(openvdb::Index offset, openvdb::Index stride, const ValueType& value) { writeAttributeValue(mHandle, GA_Offset(offset), stride, T(value)); } private: typename GAHandleTraits<T>::RW mHandle; }; // struct Handle explicit HoudiniWriteAttribute(GA_Attribute& attribute) : mAttribute(attribute) { } void expand() { mAttribute.hardenAllPages(); } void compact() { mAttribute.tryCompressAllPages(); } private: GA_Attribute& mAttribute; }; // struct HoudiniWriteAttribute /// @brief Readable wrapper class around Houdini point attributes which hold /// a reference to the GA Attribute to access and optionally a list of offsets template <typename T> struct HoudiniReadAttribute { using value_type = T; using PosType = T; using ReadHandleType = typename GAHandleTraits<T>::RO; explicit HoudiniReadAttribute(const GA_Attribute& attribute, hvdb::OffsetListPtr offsets = hvdb::OffsetListPtr()) : mHandle(&attribute) , mAttribute(attribute) , mOffsets(offsets) { } static void get(const GA_Attribute& attribute, T& value, const GA_Offset offset, const openvdb::Index component) { const ReadHandleType handle(&attribute); value = readAttributeValue<ReadHandleType, T>(handle, offset, component); } // Return the value of the nth point in the array (scalar type only) void get(T& value, const size_t n, const openvdb::Index component = 0) const { value = readAttributeValue<ReadHandleType, T>(mHandle, getOffset(n), component); } // Only provided to match the required interface for the PointPartitioner void getPos(size_t n, T& xyz) const { return this->get(xyz, n); } size_t size() const { return mOffsets ? mOffsets->size() : size_t(mAttribute.getIndexMap().indexSize()); } private: GA_Offset getOffset(size_t n) const { return mOffsets ? (*mOffsets)[n] : mAttribute.getIndexMap().offsetFromIndex(GA_Index(n)); } const ReadHandleType mHandle; const GA_Attribute& mAttribute; hvdb::OffsetListPtr mOffsets; }; // HoudiniReadAttribute struct HoudiniGroup { explicit HoudiniGroup(GA_PointGroup& group, openvdb::Index64 startOffset, openvdb::Index64 total) : mGroup(group) , mStartOffset(startOffset) , mTotal(total) { mBackingArray.resize(total, 0); } HoudiniGroup(const HoudiniGroup &) = delete; HoudiniGroup& operator=(const HoudiniGroup &) = delete; void setOffsetOn(openvdb::Index index) { mBackingArray[index - mStartOffset] = 1; } void finalize() { for (openvdb::Index64 i = 0, n = mTotal; i < n; i++) { if (mBackingArray[i]) { mGroup.addOffset(GA_Offset(i + mStartOffset)); } } } private: GA_PointGroup& mGroup; openvdb::Index64 mStartOffset; openvdb::Index64 mTotal; // This is not a bit field as we need to allow threadsafe updates: std::vector<unsigned char> mBackingArray; }; // HoudiniGroup template <typename ValueType, typename CodecType = NullCodec> inline void convertAttributeFromHoudini(PointDataTree& tree, const tools::PointIndexTree& indexTree, const Name& name, const GA_Attribute* const attribute, const GA_Defaults& defaults, const Index stride = 1) { static_assert(!std::is_base_of<AttributeArray, ValueType>::value, "ValueType must not be derived from AttributeArray"); static_assert(!std::is_same<ValueType, Name>::value, "ValueType must not be Name/std::string"); using HoudiniAttribute = HoudiniReadAttribute<ValueType>; ValueType value = hvdb::evalAttrDefault<ValueType>(defaults, 0); // empty metadata if default is zero if (!math::isZero<ValueType>(value)) { TypedMetadata<ValueType> defaultValue(value); appendAttribute<ValueType, CodecType>(tree, name, zeroVal<ValueType>(), stride, /*constantstride=*/true, &defaultValue); } else { appendAttribute<ValueType, CodecType>(tree, name, zeroVal<ValueType>(), stride, /*constantstride=*/true); } HoudiniAttribute houdiniAttribute(*attribute); populateAttribute<PointDataTree, tools::PointIndexTree, HoudiniAttribute>( tree, indexTree, name, houdiniAttribute, stride); } inline void convertAttributeFromHoudini(PointDataTree& tree, const tools::PointIndexTree& indexTree, const Name& name, const GA_Attribute* const attribute, const int compression = 0) { using namespace openvdb::math; using HoudiniStringAttribute = HoudiniReadAttribute<Name>; if (!attribute) { std::stringstream ss; ss << "Invalid attribute - " << attribute->getName(); throw std::runtime_error(ss.str()); } const GA_Storage storage(hvdb::attributeStorageType(attribute)); if (storage == GA_STORE_INVALID) { std::stringstream ss; ss << "Invalid attribute type - " << attribute->getName(); throw std::runtime_error(ss.str()); } const int16_t width(hvdb::attributeTupleSize(attribute)); UT_ASSERT(width > 0); // explicitly handle string attributes if (storage == GA_STORE_STRING) { appendAttribute<Name>(tree, name); HoudiniStringAttribute houdiniAttribute(*attribute); populateAttribute<PointDataTree, tools::PointIndexTree, HoudiniStringAttribute>( tree, indexTree, name, houdiniAttribute); return; } const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { std::stringstream ss; ss << "Invalid attribute type - " << attribute->getName(); throw std::runtime_error(ss.str()); } GA_Defaults defaults = tupleAIF->getDefaults(attribute); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isVector = width == 3 && (typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL || typeInfo == GA_TYPE_COLOR); const bool isQuaternion = width == 4 && (typeInfo == GA_TYPE_QUATERNION); const bool isMatrix3 = width == 9 && (typeInfo == GA_TYPE_TRANSFORM); const bool isMatrix4 = width == 16 && (typeInfo == GA_TYPE_TRANSFORM); if (isVector) { if (storage == GA_STORE_INT32) { convertAttributeFromHoudini<Vec3<int>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into truncated 32-bit float convertAttributeFromHoudini<Vec3<float>, TruncateCodec>( tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { if (compression == hvdb::COMPRESSION_NONE) { convertAttributeFromHoudini<Vec3<float>>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_TRUNCATE) { convertAttributeFromHoudini<Vec3<float>, TruncateCodec>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_VECTOR) { convertAttributeFromHoudini<Vec3<float>, UnitVecCodec>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_8) { convertAttributeFromHoudini<Vec3<float>, FixedPointCodec<true, UnitRange>>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_16) { convertAttributeFromHoudini<Vec3<float>, FixedPointCodec<false, UnitRange>>( tree, indexTree, name, attribute, defaults); } } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Vec3<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown vector attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isQuaternion) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Quat<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Quat<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Quat<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown quaternion attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isMatrix3) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Mat3<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Mat3<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Mat3<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown matrix3 attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isMatrix4) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Mat4<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Mat4<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Mat4<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown matrix4 attribute type - " << name; throw std::runtime_error(ss.str()); } } else { if (storage == GA_STORE_BOOL) { convertAttributeFromHoudini<bool>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT8) { convertAttributeFromHoudini<int8_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT16) { convertAttributeFromHoudini<int16_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT32) { convertAttributeFromHoudini<int32_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT64) { convertAttributeFromHoudini<int64_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL16) { convertAttributeFromHoudini<float, TruncateCodec>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_NONE) { convertAttributeFromHoudini<float>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_TRUNCATE) { convertAttributeFromHoudini<float, TruncateCodec>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_8) { convertAttributeFromHoudini<float, FixedPointCodec<true, UnitRange>>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_16) { convertAttributeFromHoudini<float, FixedPointCodec<false, UnitRange>>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<double>(tree, indexTree, name, attribute, defaults, width); } else { std::stringstream ss; ss << "Unknown attribute type - " << name; throw std::runtime_error(ss.str()); } } } template <typename ValueType> void populateHoudiniDetailAttribute(GA_RWAttributeRef& attrib, const openvdb::MetaMap& metaMap, const Name& key, const int index) { using WriteHandleType = typename GAHandleTraits<ValueType>::RW; using TypedMetadataT = TypedMetadata<ValueType>; typename TypedMetadataT::ConstPtr typedMetadata = metaMap.getMetadata<TypedMetadataT>(key); if (!typedMetadata) return; const ValueType& value = typedMetadata->value(); WriteHandleType handle(attrib.getAttribute()); writeAttributeValue<WriteHandleType, ValueType>(handle, GA_Offset(0), index, value); } template<typename ValueType> Metadata::Ptr createTypedMetadataFromAttribute(const GA_Attribute* const attribute, const uint32_t component = 0) { using HoudiniAttribute = HoudiniReadAttribute<ValueType>; ValueType value; HoudiniAttribute::get(*attribute, value, GA_Offset(0), component); return openvdb::TypedMetadata<ValueType>(value).copy(); } template<typename HoudiniType, typename ValueType> GA_Defaults buildDefaults(const ValueType& value) { HoudiniType values[1]; values[0] = value; return GA_Defaults(values, 1); } template<> GA_Defaults buildDefaults<int32>(const openvdb::math::Vec3<int>& value) { int32 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Vec3<float>& value) { fpreal32 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Vec3<double>& value) { fpreal64 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Quat<float>& value) { fpreal32 values[4]; for (unsigned i = 0; i < 4; ++i) { values[i] = value(i); } return GA_Defaults(values, 4); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Quat<double>& value) { fpreal64 values[4]; for (unsigned i = 0; i < 4; ++i) { values[i] = value(i); } return GA_Defaults(values, 4); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Mat3<float>& value) { fpreal32 values[9]; const float* data = value.asPointer(); for (unsigned i = 0; i < 9; ++i) { values[i] = data[i]; } return GA_Defaults(values, 9); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Mat3<double>& value) { fpreal64 values[9]; const double* data = value.asPointer(); for (unsigned i = 0; i < 9; ++i) { values[i] = data[i]; } return GA_Defaults(values, 9); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Mat4<float>& value) { fpreal32 values[16]; const float* data = value.asPointer(); for (unsigned i = 0; i < 16; ++i) { values[i] = data[i]; } return GA_Defaults(values, 16); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Mat4<double>& value) { fpreal64 values[16]; const double* data = value.asPointer(); for (unsigned i = 0; i < 16; ++i) { values[i] = data[i]; } return GA_Defaults(values, 16); } template <typename ValueType, typename HoudiniType> GA_Defaults gaDefaultsFromDescriptorTyped(const openvdb::points::AttributeSet::Descriptor& descriptor, const openvdb::Name& name) { ValueType defaultValue = descriptor.getDefaultValue<ValueType>(name); return buildDefaults<HoudiniType, ValueType>(defaultValue); } inline GA_Defaults gaDefaultsFromDescriptor(const openvdb::points::AttributeSet::Descriptor& descriptor, const openvdb::Name& name) { const size_t pos = descriptor.find(name); if (pos == openvdb::points::AttributeSet::INVALID_POS) return GA_Defaults(0); const openvdb::Name type = descriptor.type(pos).first; if (type == "bool") { return gaDefaultsFromDescriptorTyped<bool, int32>(descriptor, name); } else if (type == "int8") { return gaDefaultsFromDescriptorTyped<int8_t, int32>(descriptor, name); } else if (type == "int16") { return gaDefaultsFromDescriptorTyped<int16_t, int32>(descriptor, name); } else if (type == "int32") { return gaDefaultsFromDescriptorTyped<int32_t, int32>(descriptor, name); } else if (type == "int64") { return gaDefaultsFromDescriptorTyped<int64_t, int64>(descriptor, name); } else if (type == "float") { return gaDefaultsFromDescriptorTyped<float, fpreal32>(descriptor, name); } else if (type == "double") { return gaDefaultsFromDescriptorTyped<double, fpreal64>(descriptor, name); } else if (type == "vec3i") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3<int>, int32>(descriptor, name); } else if (type == "vec3s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3s, fpreal32>(descriptor, name); } else if (type == "vec3d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3d, fpreal64>(descriptor, name); } else if (type == "quats") { return gaDefaultsFromDescriptorTyped<openvdb::math::Quats, fpreal32>(descriptor, name); } else if (type == "quatd") { return gaDefaultsFromDescriptorTyped<openvdb::math::Quatd, fpreal64>(descriptor, name); } else if (type == "mat3s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat3s, fpreal32>(descriptor, name); } else if (type == "mat3d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat3d, fpreal64>(descriptor, name); } else if (type == "mat4s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat4s, fpreal32>(descriptor, name); } else if (type == "mat4d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat4d, fpreal64>(descriptor, name); } return GA_Defaults(0); } } // unnamed namespace //////////////////////////////////////// namespace openvdb_houdini { float computeVoxelSizeFromHoudini(const GU_Detail& detail, const uint32_t pointsPerVoxel, const openvdb::math::Mat4d& matrix, const Index decimalPlaces, hvdb::Interrupter& interrupter) { HoudiniReadAttribute<openvdb::Vec3R> positions(*(detail.getP())); return openvdb::points::computeVoxelSize( positions, pointsPerVoxel, matrix, decimalPlaces, &interrupter); } PointDataGrid::Ptr convertHoudiniToPointDataGrid(const GU_Detail& ptGeo, const int compression, const AttributeInfoMap& attributes, const math::Transform& transform, const WarnFunc& warnings) { using HoudiniPositionAttribute = HoudiniReadAttribute<Vec3d>; // initialize primitive offsets hvdb::OffsetListPtr offsets; for (GA_Iterator primitiveIt(ptGeo.getPrimitiveRange()); !primitiveIt.atEnd(); ++primitiveIt) { const GA_Primitive* primitive = ptGeo.getPrimitiveList().get(*primitiveIt); if (primitive->getTypeId() != GA_PRIMNURBCURVE) continue; const size_t vertexCount = primitive->getVertexCount(); if (vertexCount == 0) continue; if (!offsets) offsets.reset(new hvdb::OffsetList); const GA_Offset firstOffset = primitive->getPointOffset(0); offsets->push_back(firstOffset); } // Create PointPartitioner compatible P attribute wrapper (for now no offset filtering) const GA_Attribute& positionAttribute = *ptGeo.getP(); HoudiniPositionAttribute points(positionAttribute, offsets); // Create PointIndexGrid used for consistent index ordering in all attribute conversion const tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(points, transform); // Create PointDataGrid using position attribute PointDataGrid::Ptr pointDataGrid; if (compression == 1 /*FIXED_POSITION_16*/) { pointDataGrid = createPointDataGrid<FixedPointCodec<false>, PointDataGrid>( *pointIndexGrid, points, transform); } else if (compression == 2 /*FIXED_POSITION_8*/) { pointDataGrid = createPointDataGrid<FixedPointCodec<true>, PointDataGrid>( *pointIndexGrid, points, transform); } else /*NONE*/ { pointDataGrid = createPointDataGrid<NullCodec, PointDataGrid>( *pointIndexGrid, points, transform); } const tools::PointIndexTree& indexTree = pointIndexGrid->tree(); PointDataTree& tree = pointDataGrid->tree(); const GA_Size numHoudiniPoints = ptGeo.getNumPoints(); UT_ASSERT(numHoudiniPoints >= 0); const Index64 numVDBPoints = pointCount(tree); UT_ASSERT(numVDBPoints <= static_cast<Index64>(numHoudiniPoints)); if (numVDBPoints < static_cast<Index64>(numHoudiniPoints)) { warnings("Points contain NAN positional values. These points will not be converted."); } if (!tree.cbeginLeaf()) return pointDataGrid; // store point group information const GA_ElementGroupTable& elementGroups = ptGeo.getElementGroupTable(GA_ATTRIB_POINT); const int64_t numGroups = elementGroups.entries(); // including internal groups if (numGroups > 0) { // Append (empty) groups to tree std::vector<Name> groupNames; groupNames.reserve(numGroups); for (auto it = elementGroups.beginTraverse(), itEnd = elementGroups.endTraverse(); it != itEnd; ++it) { groupNames.emplace_back((*it)->getName().toStdString()); } appendGroups(tree, groupNames); // create the group membership vector at a multiple of 1024 for fast parallel resetting const size_t groupVectorSize = numHoudiniPoints + (1024 - (numHoudiniPoints % 1024)); std::vector<short> inGroup(groupVectorSize, short(0)); // Set group membership in tree for (auto it = elementGroups.beginTraverse(), itEnd = elementGroups.endTraverse(); it != itEnd; ++it) { const GA_Range range(**it); tbb::parallel_for(GA_SplittableRange(range), [&ptGeo, &inGroup](const GA_SplittableRange& r) { for (GA_PageIterator pit = r.beginPages(); !pit.atEnd(); ++pit) { GA_Offset start, end; for (GA_Iterator iter = pit.begin(); iter.blockAdvance(start, end);) { for (GA_Offset off = start; off < end; ++off) { const GA_Index idx = ptGeo.pointIndex(off); UT_ASSERT(idx < GA_Index(inGroup.size())); inGroup[idx] = short(1); } } } }); const Name groupName = (*it)->getName().toStdString(); setGroup(tree, indexTree, inGroup, groupName); // reset groups to 0 tbb::parallel_for(tbb::blocked_range<size_t>(0, groupVectorSize / 1024), [&inGroup](const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { std::fill_n(inGroup.begin() + n*1024, 1024, 0); } }); } } // Add other attributes to PointDataGrid for (const auto& attrInfo : attributes) { const Name& name = attrInfo.first; // skip position as this has already been added if (name == "P") continue; GA_ROAttributeRef attrRef = ptGeo.findPointAttribute(name.c_str()); if (!attrRef.isValid()) continue; GA_Attribute const * gaAttribute = attrRef.getAttribute(); if (!gaAttribute) continue; const GA_AIFSharedStringTuple* sharedStringTupleAIF = gaAttribute->getAIFSharedStringTuple(); const bool isString = bool(sharedStringTupleAIF); // Extract all the string values from the string table and insert them // into the Descriptor Metadata if (isString) { // Iterate over the strings in the table and insert them into the Metadata MetaMap& metadata = makeDescriptorUnique(tree)->getMetadata(); StringMetaInserter inserter(metadata); for (auto it = sharedStringTupleAIF->begin(gaAttribute), itEnd = sharedStringTupleAIF->end(); !(it == itEnd); ++it) { Name str(it.getString()); if (!str.empty()) inserter.insert(str); } } convertAttributeFromHoudini(tree, indexTree, name, gaAttribute, /*compression=*/attrInfo.second.first); } // Attempt to compact attributes compactAttributes(tree); return pointDataGrid; } void convertPointDataGridToHoudini( GU_Detail& detail, const PointDataGrid& grid, const std::vector<std::string>& attributes, const std::vector<std::string>& includeGroups, const std::vector<std::string>& excludeGroups, const bool inCoreOnly) { using namespace openvdb::math; const PointDataTree& tree = grid.tree(); auto leafIter = tree.cbeginLeaf(); if (!leafIter) return; // position attribute is mandatory const AttributeSet& attributeSet = leafIter->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSet.descriptor(); const bool hasPosition = descriptor.find("P") != AttributeSet::INVALID_POS; if (!hasPosition) return; // sort for binary search std::vector<std::string> sortedAttributes(attributes); std::sort(sortedAttributes.begin(), sortedAttributes.end()); // obtain cumulative point offsets and total points std::vector<Index64> offsets; MultiGroupFilter filter(includeGroups, excludeGroups, leafIter->attributeSet()); const Index64 total = pointOffsets(offsets, tree, filter, inCoreOnly); // a block's global offset is needed to transform its point offsets to global offsets const Index64 startOffset = detail.appendPointBlock(total); HoudiniWriteAttribute<Vec3f> positionAttribute(*detail.getP()); convertPointDataGridPosition(positionAttribute, grid, offsets, startOffset, filter, inCoreOnly); // add other point attributes to the hdk detail const AttributeSet::Descriptor::NameToPosMap& nameToPosMap = descriptor.map(); for (const auto& namePos : nameToPosMap) { const Name& name = namePos.first; // position handled explicitly if (name == "P") continue; // filter attributes if (!sortedAttributes.empty() && !std::binary_search(sortedAttributes.begin(), sortedAttributes.end(), name)) { continue; } const auto index = static_cast<unsigned>(namePos.second); const AttributeArray& array = leafIter->constAttributeArray(index); // don't convert group attributes if (isGroup(array)) continue; const unsigned stride = array.stride(); GA_RWAttributeRef attributeRef = detail.findPointAttribute(name.c_str()); const NamePair& type = descriptor.type(index); const Name valueType(isString(array) ? "string" : type.first); // create the attribute if it doesn't already exist in the detail if (attributeRef.isInvalid()) { const bool truncate(type.second == TruncateCodec::name()); GA_Storage storage(gaStorageFromAttrString(valueType)); if (storage == GA_STORE_INVALID) continue; if (storage == GA_STORE_REAL32 && truncate) { storage = GA_STORE_REAL16; } unsigned width = stride; const bool isVector = valueType.compare(0, 4, "vec3") == 0; const bool isQuaternion = valueType.compare(0, 4, "quat") == 0; const bool isMatrix3 = valueType.compare(0, 4, "mat3") == 0; const bool isMatrix4 = valueType.compare(0, 4, "mat4") == 0; if (isVector) width = 3; else if (isQuaternion) width = 4; else if (isMatrix3) width = 9; else if (isMatrix4) width = 16; const GA_Defaults defaults = gaDefaultsFromDescriptor(descriptor, name); attributeRef = detail.addTuple(storage, GA_ATTRIB_POINT, name.c_str(), width, defaults); // apply type info to some recognised types if (isVector) { if (name == "Cd") attributeRef->getOptions().setTypeInfo(GA_TYPE_COLOR); else if (name == "N") attributeRef->getOptions().setTypeInfo(GA_TYPE_NORMAL); else attributeRef->getOptions().setTypeInfo(GA_TYPE_VECTOR); } if (isQuaternion) { attributeRef->getOptions().setTypeInfo(GA_TYPE_QUATERNION); } if (isMatrix4 || isMatrix3) { attributeRef->getOptions().setTypeInfo(GA_TYPE_TRANSFORM); } // '|' and ':' characters are valid in OpenVDB Points names but // will make Houdini Attribute names invalid if (attributeRef.isInvalid()) { OPENVDB_THROW( RuntimeError, "Unable to create Houdini Points Attribute with name '" + name + "'. '|' and ':' characters are not supported by Houdini."); } } if (valueType == "string") { HoudiniWriteAttribute<Name> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "bool") { HoudiniWriteAttribute<bool> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int8") { HoudiniWriteAttribute<int8_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int16") { HoudiniWriteAttribute<int16_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int32") { HoudiniWriteAttribute<int32_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int64") { HoudiniWriteAttribute<int64_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "float") { HoudiniWriteAttribute<float> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "double") { HoudiniWriteAttribute<double> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3i") { HoudiniWriteAttribute<Vec3<int> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3s") { HoudiniWriteAttribute<Vec3<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3d") { HoudiniWriteAttribute<Vec3<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "quats") { HoudiniWriteAttribute<Quat<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "quatd") { HoudiniWriteAttribute<Quat<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat3s") { HoudiniWriteAttribute<Mat3<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat3d") { HoudiniWriteAttribute<Mat3<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat4s") { HoudiniWriteAttribute<Mat4<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat4d") { HoudiniWriteAttribute<Mat4<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else { throw std::runtime_error("Unknown Attribute Type for Conversion: " + valueType); } } // add point groups to the hdk detail const AttributeSet::Descriptor::NameToPosMap& groupMap = descriptor.groupMap(); for (const auto& namePos : groupMap) { const Name& name = namePos.first; UT_ASSERT(!name.empty()); GA_PointGroup* pointGroup = detail.findPointGroup(name.c_str()); if (!pointGroup) pointGroup = detail.newPointGroup(name.c_str()); const AttributeSet::Descriptor::GroupIndex index = attributeSet.groupIndex(name); HoudiniGroup group(*pointGroup, startOffset, total); convertPointDataGridGroup(group, tree, offsets, startOffset, index, filter, inCoreOnly); } } void populateMetadataFromHoudini(openvdb::points::PointDataGrid& grid, const GU_Detail& detail, const WarnFunc& warnings) { using namespace openvdb::math; for (GA_AttributeDict::iterator iter = detail.attribs().begin(GA_SCOPE_PUBLIC); !iter.atEnd(); ++iter) { const GA_Attribute* const attribute = *iter; if (!attribute) continue; const Name name("global:" + Name(attribute->getName())); Metadata::Ptr metadata = grid[name]; if (metadata) continue; const GA_Storage storage(attributeStorageType(attribute)); const int16_t width(attributeTupleSize(attribute)); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isVector = width == 3 && (typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL || typeInfo == GA_TYPE_COLOR); const bool isQuaternion = width == 4 && (typeInfo == GA_TYPE_QUATERNION); const bool isMatrix3 = width == 9 && (typeInfo == GA_TYPE_TRANSFORM); const bool isMatrix4 = width == 16 && (typeInfo == GA_TYPE_TRANSFORM); if (isVector) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Vec3<float> >(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Vec3<float> >(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Vec3<double> >(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported vector type for metadata conversion."; warnings(ss.str()); continue; } UT_ASSERT(metadata); grid.insertMeta(name, *metadata); } else if (isQuaternion) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Quat<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Quat<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Quat<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported quaternion type for metadata conversion."; warnings(ss.str()); continue; } } else if (isMatrix3) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Mat3<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Mat3<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Mat3<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported matrix3 type for metadata conversion."; warnings(ss.str()); continue; } } else if (isMatrix4) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Mat4<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Mat4<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Mat4<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported matrix4 type for metadata conversion."; warnings(ss.str()); continue; } } else { for (int i = 0; i < width; i++) { if (storage == GA_STORE_BOOL) { metadata = createTypedMetadataFromAttribute<bool>(attribute, i); } else if (storage == GA_STORE_INT8) { metadata = createTypedMetadataFromAttribute<int8_t>(attribute, i); } else if (storage == GA_STORE_INT16) { metadata = createTypedMetadataFromAttribute<int16_t>(attribute, i); } else if (storage == GA_STORE_INT32) { metadata = createTypedMetadataFromAttribute<int32_t>(attribute, i); } else if (storage == GA_STORE_INT64) { metadata = createTypedMetadataFromAttribute<int64_t>(attribute, i); } else if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<float>(attribute, i); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<float>(attribute, i); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<double>(attribute, i); } else if (storage == GA_STORE_STRING) { metadata = createTypedMetadataFromAttribute<openvdb::Name>(attribute, i); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported type for metadata conversion."; warnings(ss.str()); continue; } UT_ASSERT(metadata); if (width > 1) { const Name arrayName(name + Name("[") + std::to_string(i) + Name("]")); grid.insertMeta(arrayName, *metadata); } else { grid.insertMeta(name, *metadata); } } } } } void convertMetadataToHoudini(GU_Detail& detail, const openvdb::MetaMap& metaMap, const WarnFunc& warnings) { struct Local { static bool isGlobalMetadata(const Name& name) { return name.compare(0, 7, "global:") == 0; } static Name toDetailName(const Name& name) { Name detailName(name); detailName.erase(0, 7); const size_t open = detailName.find('['); if (open != std::string::npos) { detailName = detailName.substr(0, open); } return detailName; } static int toDetailIndex(const Name& name) { const size_t open = name.find('['); const size_t close = name.find(']'); int index = 0; if (open != std::string::npos && close != std::string::npos && close == name.length()-1 && open > 0 && open+1 < close) { try { // parse array index index = std::stoi(name.substr(open+1, close-open-1)); } catch (const std::exception&) {} } return index; } }; using namespace openvdb::math; using DetailInfo = std::pair<Name, int>; using DetailMap = std::map<Name, DetailInfo>; DetailMap detailCreate; DetailMap detailPopulate; for(MetaMap::ConstMetaIterator iter = metaMap.beginMeta(); iter != metaMap.endMeta(); ++iter) { const Metadata::Ptr metadata = iter->second; if (!metadata) continue; const Name& key = iter->first; if (!Local::isGlobalMetadata(key)) continue; Name name = Local::toDetailName(key); int index = Local::toDetailIndex(key); // add to creation map if (detailCreate.find(name) == detailCreate.end()) { detailCreate[name] = DetailInfo(metadata->typeName(), index); } else { if (index > detailCreate[name].second) detailCreate[name].second = index; } // add to populate map detailPopulate[key] = DetailInfo(name, index); } // add all detail attributes for (const auto& item : detailCreate) { const Name& name = item.first; const DetailInfo& info = item.second; const Name& type = info.first; const int size = info.second; GA_RWAttributeRef attribute = detail.findGlobalAttribute(name); if (attribute.isInvalid()) { const GA_Storage storage = gaStorageFromAttrString(type); if (storage == GA_STORE_INVALID) { throw std::runtime_error("Invalid attribute storage type \"" + name + "\"."); } if (type == "vec3s" || type == "vec3d") { attribute = detail.addTuple(storage, GA_ATTRIB_GLOBAL, name.c_str(), 3); attribute.setTypeInfo(GA_TYPE_VECTOR); } else { attribute = detail.addTuple(storage, GA_ATTRIB_GLOBAL, name.c_str(), size+1); } if (!attribute.isValid()) { throw std::runtime_error("Error creating attribute with name \"" + name + "\"."); } } } // populate the values for (const auto& item : detailPopulate) { const Name& key = item.first; const DetailInfo& info = item.second; const Name& name = info.first; const int index = info.second; const Name& type = metaMap[key]->typeName(); GA_RWAttributeRef attrib = detail.findGlobalAttribute(name); UT_ASSERT(!attrib.isInvalid()); if (type == openvdb::typeNameAsString<bool>()) populateHoudiniDetailAttribute<bool>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int8_t>()) populateHoudiniDetailAttribute<int8_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int16_t>()) populateHoudiniDetailAttribute<int16_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int32_t>()) populateHoudiniDetailAttribute<int32_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int64_t>()) populateHoudiniDetailAttribute<int64_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<float>()) populateHoudiniDetailAttribute<float>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<double>()) populateHoudiniDetailAttribute<double>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<int32_t> >()) populateHoudiniDetailAttribute<Vec3<int32_t> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<float> >()) populateHoudiniDetailAttribute<Vec3<float> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<double> >()) populateHoudiniDetailAttribute<Vec3<double> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Name>()) populateHoudiniDetailAttribute<Name>(attrib, metaMap, key, index); else { std::stringstream ss; ss << "Metadata value \"" << key << "\" unsupported type for detail attribute conversion."; warnings(ss.str()); } } } //////////////////////////////////////// int16_t attributeTupleSize(const GA_Attribute* const attribute) { if (!attribute) return int16_t(0); const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { const GA_AIFStringTuple* tupleAIFString = attribute->getAIFStringTuple(); if (tupleAIFString) { return static_cast<int16_t>(tupleAIFString->getTupleSize(attribute)); } } else { return static_cast<int16_t>(tupleAIF->getTupleSize(attribute)); } return int16_t(0); } GA_Storage attributeStorageType(const GA_Attribute* const attribute) { if (!attribute) return GA_STORE_INVALID; const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { if (attribute->getAIFStringTuple()) { return GA_STORE_STRING; } } else { return tupleAIF->getStorage(attribute); } return GA_STORE_INVALID; } //////////////////////////////////////// void collectPointInfo(const PointDataGrid& grid, std::string& countStr, std::string& groupStr, std::string& attributeStr) { using AttributeSet = openvdb::points::AttributeSet; using Descriptor = openvdb::points::AttributeSet::Descriptor; const PointDataTree& tree = grid.constTree(); // iterate through all leaf nodes to find out if all are out-of-core bool allOutOfCore = true; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { if (!iter->buffer().isOutOfCore()) { allOutOfCore = false; break; } } openvdb::Index64 totalPointCount = 0; // it is more technically correct to rely on the voxel count as this may be // out of sync with the attribute size, however for faster node preview when // the voxel buffers are all out-of-core, count up the sizes of the first // attribute array instead if (allOutOfCore) { for (auto iter = tree.cbeginLeaf(); iter; ++iter) { if (iter->attributeSet().size() > 0) { totalPointCount += iter->constAttributeArray(0).size(); } } } else { totalPointCount = openvdb::points::pointCount(tree); } std::ostringstream os; os << openvdb::util::formattedInt(totalPointCount); countStr = os.str(); os.clear(); os.str(""); const auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); std::string viewportGroupName = ""; if (StringMetadata::ConstPtr stringMeta = grid.getMetadata<StringMetadata>(META_GROUP_VIEWPORT)) { viewportGroupName = stringMeta->value(); } const Descriptor::NameToPosMap& groupMap = descriptor.groupMap(); bool first = true; for (const auto& it : groupMap) { if (first) first = false; else os << ", "; // add an asterisk as a viewport group indicator if (it.first == viewportGroupName) os << "*"; os << it.first << "("; // for faster node preview when all the voxel buffers are out-of-core, // don't load the group arrays to display the group sizes, just print // "out-of-core" instead @todo - put the group sizes into the grid // metadata on write for this use case if (allOutOfCore) os << "out-of-core"; else { const openvdb::points::GroupFilter filter(it.first, attributeSet); os << openvdb::util::formattedInt(pointCount(tree, filter)); } os << ")"; } groupStr = (os.str().empty() ? "none" : os.str()); os.clear(); os.str(""); const Descriptor::NameToPosMap& nameToPosMap = descriptor.map(); first = true; for (const auto& it : nameToPosMap) { const openvdb::points::AttributeArray& array = *(attributeSet.getConst(it.second)); if (isGroup(array)) continue; if (first) first = false; else os << ", "; const openvdb::NamePair& type = descriptor.type(it.second); const openvdb::Name& codecType = type.second; if (isString(array)) { os << it.first << "[str]"; } else { os << it.first << "[" << type.first; // if no value compression, hide the codec os << (codecType != "null" ? "_" + codecType : ""); os << "]"; } if (!array.hasConstantStride()) os << " [dynamic]"; else if (array.stride() > 1) os << " [" << array.stride() << "]"; } attributeStr = (os.str().empty() ? "none" : os.str()); } void pointDataGridSpecificInfoText(std::ostream& infoStr, const GridBase& grid) { const PointDataGrid* pointDataGrid = dynamic_cast<const PointDataGrid*>(&grid); if (!pointDataGrid) return; // match native OpenVDB convention as much as possible infoStr << " voxel size: " << pointDataGrid->transform().voxelSize()[0] << ","; infoStr << " type: points,"; if (pointDataGrid->activeVoxelCount() != 0) { const Coord dim = grid.evalActiveVoxelDim(); infoStr << " dim: " << dim[0] << "x" << dim[1] << "x" << dim[2] << ","; } else { infoStr <<" <empty>,"; } std::string countStr, groupStr, attributeStr; collectPointInfo(*pointDataGrid, countStr, groupStr, attributeStr); infoStr << " count: " << countStr << ","; infoStr << " groups: " << groupStr << ","; infoStr << " attributes: " << attributeStr; } namespace { inline int lookupGroupInput(const PRM_SpareData* spare) { if (!spare) return 0; const char* istring = spare->getValue("sop_input"); return istring ? atoi(istring) : 0; } void sopBuildVDBPointsGroupMenu(void* data, PRM_Name* menuEntries, int /*themenusize*/, const PRM_SpareData* spare, const PRM_Parm* /*parm*/) { SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); int inputIndex = lookupGroupInput(spare); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); // const cast as iterator requires non-const access, however data is not modified VdbPrimIterator vdbIt(const_cast<GU_Detail*>(gdp)); int n_entries = 0; for (; vdbIt; ++vdbIt) { GU_PrimVDB* vdbPrim = *vdbIt; PointDataGrid::ConstPtr grid = gridConstPtrCast<PointDataGrid>(vdbPrim->getConstGridPtr()); // ignore all but point data grids if (!grid) continue; auto leafIter = grid->tree().cbeginLeaf(); if (!leafIter) continue; const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); for (const auto& it : descriptor.groupMap()) { // add each VDB Points group to the menu menuEntries[n_entries].setToken(it.first.c_str()); menuEntries[n_entries].setLabel(it.first.c_str()); n_entries++; } } // zero value ends the menu menuEntries[n_entries].setToken(0); menuEntries[n_entries].setLabel(0); } } // unnamed namespace #ifdef _MSC_VER OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput1(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput2(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput3(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput4(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenu(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); #else const PRM_ChoiceList VDBPointsGroupMenuInput1(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput2(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput3(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput4(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenu(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); #endif } // namespace openvdb_houdini
68,670
C++
36.361806
147
0.622892
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Points_Delete.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Points_Delete.cc /// /// @author Francisco Gochez, Dan Bailey /// /// @brief Delete points that are members of specific groups #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointDelete.h> #include <UT/UT_Version.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/Utils.h> #include <houdini_utils/geometry.h> #include <houdini_utils/ParmFactory.h> #include <algorithm> #include <stdexcept> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; using namespace openvdb::math; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_Points_Delete: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Points_Delete(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Points_Delete() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; // class SOP_OpenVDB_Points_Delete //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { openvdb::initialize(); if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setHelpText("Specify a subset of the input point data grids to delete from.") .setChoiceList(&hutil::PrimGroupMenu)); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroups", "VDB Points Groups") .setHelpText("Specify VDB points groups to delete.") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert") .setDefault(PRMzeroDefaults) .setHelpText("Invert point deletion so that points not belonging to any of the " "groups will be deleted.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "dropgroups", "Drop Points Groups") .setDefault(PRMoneDefaults) .setHelpText("Drop the VDB points groups that were used for deletion. This option is " "ignored if \"invert\" is enabled.")); ////////// // Register this operator. hvdb::OpenVDBOpFactory("VDB Points Delete", SOP_OpenVDB_Points_Delete::factory, parms, *table) #if UT_VERSION_INT < 0x11050000 // earlier than 17.5.0 .setNativeName("") #endif .addInput("VDB Points") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Points_Delete::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Delete points that are members of specific groups.\"\"\"\n\ \n\ @overview\n\ \n\ The OpenVDB Points Delete SOP allows deletion of points that are members\n\ of a supplied group(s).\n\ An invert toggle may be enabled to allow deleting points that are not\n\ members of the supplied group(s).\n\ \n\ @related\n\ - [OpenVDB Points Convert|Node:sop/DW_OpenVDBPointsConvert]\n\ - [OpenVDB Points Group|Node:sop/DW_OpenVDBPointsGroup]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Points_Delete::updateParmsFlags() { const bool invert = evalInt("invert", 0, 0) != 0; return enableParm("dropgroups", !invert); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Points_Delete::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Points_Delete(net, name, op); } SOP_OpenVDB_Points_Delete::SOP_OpenVDB_Points_Delete(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Delete::Cache::cookVDBSop(OP_Context& context) { try { const std::string groups = evalStdString("vdbpointsgroups", context.getTime()); // early exit if the VDB points group field is empty if (groups.empty()) return error(); UT_AutoInterrupt progress("Processing points group deletion"); const bool invert = evalInt("invert", 0, context.getTime()); const bool drop = evalInt("dropgroups", 0, context.getTime()); // select Houdini primitive groups we wish to use hvdb::VdbPrimIterator vdbIt(gdp, matchGroup(*gdp, evalStdString("group", context.getTime()))); for (; vdbIt; ++vdbIt) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } GU_PrimVDB* vdbPrim = *vdbIt; // Limit the lifetime of our const shared copies so // we don't have false-sharing when we go to make the // grid unique. std::vector<std::string> pointGroups; { PointDataGrid::ConstPtr inputGrid = openvdb::gridConstPtrCast<PointDataGrid>(vdbPrim->getConstGridPtr()); // early exit if the grid is of the wrong type if (!inputGrid) continue; // early exit if the tree is empty auto leafIter = inputGrid->tree().cbeginLeaf(); if (!leafIter) continue; // extract names of all selected VDB groups // the "exclude groups" parameter to parseNames is not used in this context, // so we disregard it by storing it in a temporary variable std::vector<std::string> tmp; AttributeSet::Descriptor::parseNames(pointGroups, tmp, groups); // determine in any of the requested groups are actually present in the tree const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); const bool hasPointsToDrop = std::any_of(pointGroups.begin(), pointGroups.end(), [&descriptor](const std::string& group) { return descriptor.hasGroup(group); }); if (!hasPointsToDrop) continue; } // deep copy the VDB tree if it is not already unique vdbPrim->makeGridUnique(); PointDataGrid& outputGrid = UTvdbGridCast<PointDataGrid>(vdbPrim->getGrid()); deleteFromGroups(outputGrid.tree(), pointGroups, invert, drop); } } catch (const std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
6,709
C++
29.639269
100
0.636011
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_To_Polygons.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_To_Polygons.cc /// /// @author FX R&D OpenVDB team /// /// @brief OpenVDB level set to polygon conversion #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/AttributeTransferUtil.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb/tools/VolumeToMesh.h> #include <openvdb/tools/Mask.h> // for tools::interiorMask() #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/Prune.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Mat3.h> #include <CH/CH_Manager.h> #include <GA/GA_PageIterator.h> #include <GEO/GEO_PolyCounts.h> #include <GU/GU_ConvertParms.h> #include <GU/GU_Detail.h> #include <GU/GU_PolyReduce.h> #include <GU/GU_PrimPoly.h> #include <GU/GU_PrimPolySoup.h> #include <GU/GU_Surfacer.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <UT/UT_UniquePtr.h> #include <hboost/algorithm/string/join.hpp> #include <list> #include <memory> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_To_Polygons: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_To_Polygons(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_To_Polygons() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; template<class GridType> void referenceMeshing( std::list<openvdb::GridBase::ConstPtr>&, openvdb::tools::VolumeToMesh&, const GU_Detail* refGeo, hvdb::Interrupter&, const fpreal time); }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to surface.") .setDocumentation( "A subset of the input VDB grids to be surfaced" " (see [specifying volumes|/model/volumes#group])")); // Geometry Type parms.add(hutil::ParmFactory(PRM_ORD, "geometrytype", "Geometry Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "polysoup", "Polygon Soup", "poly", "Polygons" }) .setTooltip( "Specify the type of geometry to output. A polygon soup is a primitive" " that stores polygons using a compact memory representation." " Not all geometry nodes can operate directly on this primitive.") .setDocumentation( "The type of geometry to output, either polygons or a polygon soup\n\n" "A [polygon soup|/model/primitives#polysoup] is a primitive" " that stores polygons using a compact memory representation.\n\n" "WARNING:\n" " Not all geometry nodes can operate directly on polygon soups.\n")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "The voxel value that determines the surface\n\n" "Zero works for signed distance fields, while fog volumes require" " a larger positive value (0.5 is a good initial guess).")); parms.add(hutil::ParmFactory(PRM_FLT_J, "adaptivity", "Adaptivity") .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip( "The adaptivity threshold determines how closely the output mesh follows" " the isosurface. A higher threshold enables more variation in polygon size," " allowing the surface to be represented with fewer polygons.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "computenormals", "Compute Vertex Normals") .setTooltip("Compute edge-preserving vertex normals.") .setDocumentation( "Compute edge-preserving vertex normals." " This uses the optional second input to help eliminate seams.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "keepvdbname", "Preserve VDB Name") .setTooltip("Mark each primitive with the corresponding VDB name.")); ////////// parms.add(hutil::ParmFactory(PRM_HEADING,"sep1", "Reference Options")); parms.add(hutil::ParmFactory(PRM_FLT_J, "internaladaptivity", "Internal Adaptivity") .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip("Overrides the adaptivity threshold for all internal surfaces.") .setDocumentation( "When a reference surface is provided, this is the adaptivity threshold" " for regions that are inside the surface.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "transferattributes", "Transfer Surface Attributes") .setTooltip( "Transfer all attributes (primitive, vertex and point) from the reference surface.") .setDocumentation( "When a reference surface is provided, this option transfers all attributes\n" "(primitive, vertex and point) from the reference surface to the output geometry.\n" "\n" "NOTE:\n" " Primitive attribute values can't meaningfully be transferred to a\n" " polygon soup, because the entire polygon soup is a single primitive.\n" "\n" "NOTE:\n" " Computed vertex normals for primitives in the surface group\n" " will be overridden.\n")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "sharpenfeatures", "Sharpen Features") .setDefault(PRMoneDefaults) .setTooltip("Sharpen edges and corners.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "edgetolerance", "Edge Tolerance") .setDefault(0.5) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip("Controls the edge adaptivity mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "surfacegroup", "Surface Group") .setDefault("surface_polygons") .setTooltip( "Specify a group for all polygons that are coincident with the reference surface.\n\n" "The group is useful for transferring attributes such as UV coordinates," " normals, etc. from the reference surface.")); parms.add(hutil::ParmFactory(PRM_STRING, "interiorgroup", "Interior Group") .setDefault("interior_polygons") .setTooltip( "Specify a group for all polygons that are interior to the reference surface.\n\n" "The group can be used to identify surface regions that might require" " projected UV coordinates or new materials.")); parms.add(hutil::ParmFactory(PRM_STRING, "seamlinegroup", "Seam Line Group") .setDefault("seam_polygons") .setTooltip( "Specify a group for all polygons that are in proximity to the seam lines.\n\n" "This group can be used to drive secondary elements such as debris and dust.")); parms.add(hutil::ParmFactory(PRM_STRING, "seampoints", "Seam Points") .setDefault("seam_points") .setTooltip( "Specify a group of the fracture seam points.\n\n" "This can be used to drive local pre-fracture dynamics," " e.g., local surface buckling.")); ////////// parms.add(hutil::ParmFactory(PRM_HEADING,"sep2", "Masking Options")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "surfacemask", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the surface mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "surfacemaskname", "Surface Mask") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip( "A single VDB whose active voxels or (if the VDB is a level set or SDF)\n" "interior voxels define the region to be meshed")); parms.add(hutil::ParmFactory(PRM_FLT_J, "surfacemaskoffset", "Offset") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "Isovalue that determines the interior of the surface mask\n" "when the mask is a level set or SDF")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invertsurfacemask", "Invert Surface Mask") .setTooltip("If enabled, mesh the complement of the mask.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "adaptivityfield", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the the adaptivity field.")); parms.add(hutil::ParmFactory(PRM_STRING, "adaptivityfieldname", "Adaptivity Field") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip( "A single scalar VDB to be used as a spatial multiplier" " for the adaptivity threshold")); ////////// hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "smoothseams", "Smooth Seams")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "invertmask", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "automaticpartitions", "")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "activepart", "")); hvdb::OpenVDBOpFactory("VDB to Polygons", SOP_OpenVDB_To_Polygons::factory, parms, *table) .setNativeName("") #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBToPolygons") #endif .setObsoleteParms(obsoleteParms) .addInput("OpenVDB grids to surface") .addOptionalInput("Optional reference surface. Can be used " "to transfer attributes, sharpen features and to " "eliminate seams from fractured pieces.") .addOptionalInput("Optional VDB masks") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_To_Polygons::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Convert VDB volumes into polygonal meshes.\"\"\"\n\ \n\ @overview\n\ \n\ This node converts the surfaces of VDB volumes, including level sets,\n\ into polygonal meshes.\n\ \n\ The second and third inputs are optional.\n\ The second input provides a reference polygon surface, which is useful\n\ for converting fractured VDBs back to polygons.\n\ The third input provides additional VDBs that can be used for masking\n\ (specifying which voxels to convert to polygons) and/or to specify\n\ an adaptivity multiplier.\n\ \n\ @related\n\ - [OpenVDB Convert|Node:sop/DW_OpenVDBConvert]\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [Node:sop/convert]\n\ - [Node:sop/convertvolume]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_To_Polygons::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_To_Polygons(net, name, op); } SOP_OpenVDB_To_Polygons::SOP_OpenVDB_To_Polygons(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } void SOP_OpenVDB_To_Polygons::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; // using the invertmask attribute to detect old houdini files that // had the regular polygon representation. PRM_Parm* parm = obsoleteParms->getParmPtr("invertmask"); if (parm && !parm->isFactoryDefault()) { setInt("geometrytype", 0, 0, 1); } hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_To_Polygons::updateParmsFlags() { bool changed = false; const fpreal time = CHgetEvalTime(); const bool refexists = (nInputs() == 2); bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; changed |= enableParm("computenormals", !usePolygonSoup); changed |= enableParm("internaladaptivity", refexists); changed |= enableParm("surfacegroup", refexists); changed |= enableParm("interiorgroup", refexists); changed |= enableParm("seamlinegroup", refexists); changed |= enableParm("seampoints", refexists); changed |= enableParm("transferattributes", refexists); changed |= enableParm("sharpenfeatures", refexists); changed |= enableParm("edgetolerance", refexists); const bool maskexists = (nInputs() == 3); changed |= enableParm("surfacemask", maskexists); changed |= enableParm("adaptivitymask", maskexists); const bool surfacemask = bool(evalInt("surfacemask", 0, 0)); changed |= enableParm("surfacemaskname", maskexists && surfacemask); changed |= enableParm("surfacemaskoffset", maskexists && surfacemask); changed |= enableParm("invertsurfacemask", maskexists && surfacemask); const bool adaptivitymask = bool(evalInt("adaptivityfield", 0, 0)); changed |= enableParm("adaptivityfieldname", maskexists && adaptivitymask); return changed; } //////////////////////////////////////// void copyMesh(GU_Detail&, openvdb::tools::VolumeToMesh&, hvdb::Interrupter&, const bool usePolygonSoup = true, const char* gridName = nullptr, GA_PrimitiveGroup* surfaceGroup = nullptr, GA_PrimitiveGroup* interiorGroup = nullptr, GA_PrimitiveGroup* seamGroup = nullptr, GA_PointGroup* seamPointGroup = nullptr); void copyMesh( GU_Detail& detail, openvdb::tools::VolumeToMesh& mesher, hvdb::Interrupter&, const bool usePolygonSoup, const char* gridName, GA_PrimitiveGroup* surfaceGroup, GA_PrimitiveGroup* interiorGroup, GA_PrimitiveGroup* seamGroup, GA_PointGroup* seamPointGroup) { const openvdb::tools::PointList& points = mesher.pointList(); openvdb::tools::PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); const char exteriorFlag = char(openvdb::tools::POLYFLAG_EXTERIOR); const char seamLineFlag = char(openvdb::tools::POLYFLAG_FRACTURE_SEAM); const GA_Index firstPrim = detail.getNumPrimitives(); GA_Size npoints = mesher.pointListSize(); const GA_Offset startpt = detail.appendPointBlock(npoints); UT_ASSERT_COMPILETIME(sizeof(openvdb::tools::PointList::element_type) == sizeof(UT_Vector3)); GA_RWHandleV3 pthandle(detail.getP()); pthandle.setBlock(startpt, npoints, reinterpret_cast<UT_Vector3*>(points.get())); // group fracture seam points if (seamPointGroup && GA_Size(mesher.pointFlags().size()) == npoints) { GA_Offset ptoff = startpt; for (GA_Size i = 0; i < npoints; ++i) { if (mesher.pointFlags()[i]) { seamPointGroup->addOffset(ptoff); } ++ptoff; } } // index 0 --> interior, not on seam // index 1 --> interior, on seam // index 2 --> surface, not on seam // index 3 --> surface, on seam GA_Size nquads[4] = {0, 0, 0, 0}; GA_Size ntris[4] = {0, 0, 0, 0}; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { int flags = (((polygons.quadFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.quadFlags(i) & seamLineFlag)!=0); ++nquads[flags]; } for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { int flags = (((polygons.triangleFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.triangleFlags(i) & seamLineFlag)!=0); ++ntris[flags]; } } GA_Size nverts[4] = { nquads[0]*4 + ntris[0]*3, nquads[1]*4 + ntris[1]*3, nquads[2]*4 + ntris[2]*3, nquads[3]*4 + ntris[3]*3 }; UT_IntArray verts[4]; for (int flags = 0; flags < 4; ++flags) { verts[flags].setCapacity(nverts[flags]); verts[flags].entries(nverts[flags]); } GA_Size iquad[4] = {0, 0, 0, 0}; GA_Size itri[4] = {nquads[0]*4, nquads[1]*4, nquads[2]*4, nquads[3]*4}; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; // Copy quads for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { const openvdb::Vec4I& quad = polygons.quad(i); int flags = (((polygons.quadFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.quadFlags(i) & seamLineFlag)!=0); verts[flags](iquad[flags]++) = quad[0]; verts[flags](iquad[flags]++) = quad[1]; verts[flags](iquad[flags]++) = quad[2]; verts[flags](iquad[flags]++) = quad[3]; } // Copy triangles (adaptive mesh) for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { const openvdb::Vec3I& triangle = polygons.triangle(i); int flags = (((polygons.triangleFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.triangleFlags(i) & seamLineFlag)!=0); verts[flags](itri[flags]++) = triangle[0]; verts[flags](itri[flags]++) = triangle[1]; verts[flags](itri[flags]++) = triangle[2]; } } bool shared_vertices = true; if (usePolygonSoup) { // NOTE: Since we could be using the same points for multiple // polysoups, and the shared vertices option assumes that // the points are only used by this polysoup, we have to // use the unique vertices option. int num_prims = 0; for (int flags = 0; flags < 4; ++flags) { if (!nquads[flags] && !ntris[flags]) continue; num_prims++; } shared_vertices = (num_prims <= 1); } for (int flags = 0; flags < 4; ++flags) { if (!nquads[flags] && !ntris[flags]) continue; GEO_PolyCounts sizelist; if (nquads[flags]) sizelist.append(4, nquads[flags]); if (ntris[flags]) sizelist.append(3, ntris[flags]); GA_Detail::OffsetMarker marker(detail); if (usePolygonSoup) { GU_PrimPolySoup::build( &detail, startpt, npoints, sizelist, verts[flags].array(), shared_vertices); } else { GU_PrimPoly::buildBlock(&detail, startpt, npoints, sizelist, verts[flags].array()); } GA_Range range(marker.primitiveRange()); //GA_Range pntRange(marker.pointRange()); /*GU_ConvertParms parms; parms.preserveGroups = true; GUconvertCopySingleVertexPrimAttribsAndGroups(parms, *srcvdb->getParent(), srcvdb->getMapOffset(), detail, range, pntRange);*/ //if (delgroup) delgroup->removeRange(range); if (seamGroup && (flags & 1)) seamGroup->addRange(range); if (surfaceGroup && (flags & 2)) surfaceGroup->addRange(range); if (interiorGroup && !(flags & 2)) interiorGroup->addRange(range); } // Keep VDB grid name const GA_Index lastPrim = detail.getNumPrimitives(); if (gridName != nullptr && firstPrim != lastPrim) { GA_RWAttributeRef aRef = detail.findPrimitiveAttribute("name"); if (!aRef.isValid()) aRef = detail.addStringTuple(GA_ATTRIB_PRIMITIVE, "name", 1); GA_Attribute * nameAttr = aRef.getAttribute(); if (nameAttr) { const GA_AIFSharedStringTuple * stringAIF = nameAttr->getAIFSharedStringTuple(); if (stringAIF) { GA_Range range(detail.getPrimitiveMap(), detail.primitiveOffset(firstPrim), detail.primitiveOffset(lastPrim)); stringAIF->setString(nameAttr, range, gridName, 0); } } } } //////////////////////////////////////// namespace { struct InteriorMaskOp { InteriorMaskOp(double iso = 0.0): inIsovalue(iso) {} template<typename GridType> void operator()(const GridType& grid) { outGridPtr = openvdb::tools::interiorMask(grid, inIsovalue); } const double inIsovalue; openvdb::BoolGrid::Ptr outGridPtr; }; // Extract a boolean mask from a grid of any type. inline hvdb::GridCPtr getMaskFromGrid(const hvdb::GridCPtr& gridPtr, double isovalue = 0.0) { hvdb::GridCPtr maskGridPtr; if (gridPtr) { if (gridPtr->isType<openvdb::BoolGrid>()) { // If the input grid is already boolean, return it. maskGridPtr = gridPtr; } else { InteriorMaskOp op{isovalue}; gridPtr->apply<hvdb::AllGridTypes>(op); maskGridPtr = op.outGridPtr; } } return maskGridPtr; } } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_To_Polygons::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Surfacing VDB primitives"); const GU_Detail* vdbGeo = inputGeo(0); if (vdbGeo == nullptr) return error(); // Get the group of grids to surface. const GA_PrimitiveGroup* group = matchGroup(*vdbGeo, evalStdString("group", time)); hvdb::VdbPrimCIterator vdbIt(vdbGeo, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDB primitives found."); return error(); } // Eval attributes const bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; const double adaptivity = double(evalFloat("adaptivity", 0, time)); const double iso = double(evalFloat("isovalue", 0, time)); const bool computeNormals = !usePolygonSoup && evalInt("computenormals", 0, time); const bool keepVdbName = evalInt("keepvdbname", 0, time); const float maskoffset = static_cast<float>(evalFloat("surfacemaskoffset", 0, time)); const bool invertmask = evalInt("invertsurfacemask", 0, time); // Setup level set mesher openvdb::tools::VolumeToMesh mesher(iso, adaptivity); // Check mask input const GU_Detail* maskGeo = inputGeo(2); if (maskGeo) { if (evalInt("surfacemask", 0, time)) { const auto maskStr = evalStdString("surfacemaskname", time); const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups(maskStr.c_str(), GroupCreator(maskGeo)); if (!maskGroup && !maskStr.empty()) { addWarning(SOP_MESSAGE, "Surface mask not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { if (auto maskGridPtr = getMaskFromGrid(maskIt->getGridPtr(), maskoffset)) { mesher.setSurfaceMask(maskGridPtr, invertmask); } else { std::string mesg = "Surface mask " + maskIt.getPrimitiveNameOrIndex().toStdString() + " of type " + maskIt->getGrid().type() + " is not supported."; addWarning(SOP_MESSAGE, mesg.c_str()); } } } } if (evalInt("adaptivityfield", 0, time)) { const auto maskStr = evalStdString("adaptivityfieldname", time); const GA_PrimitiveGroup* maskGroup = matchGroup(*maskGeo, maskStr); if (!maskGroup && !maskStr.empty()) { addWarning(SOP_MESSAGE, "Adaptivity field not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { openvdb::FloatGrid::ConstPtr grid = openvdb::gridConstPtrCast<openvdb::FloatGrid>(maskIt->getGridPtr()); mesher.setSpatialAdaptivity(grid); } } } } // Check reference input const GU_Detail* refGeo = inputGeo(1); if (refGeo) { // Collect all level set grids. std::list<openvdb::GridBase::ConstPtr> grids; std::vector<std::string> nonLevelSetList, nonLinearList; for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; const openvdb::GridClass gridClass = vdbIt->getGrid().getGridClass(); if (gridClass != openvdb::GRID_LEVEL_SET) { nonLevelSetList.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } if (!vdbIt->getGrid().transform().isLinear()) { nonLinearList.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } // (We need a shallow copy to sync primitive & grid names). grids.push_back(vdbIt->getGrid().copyGrid()); openvdb::ConstPtrCast<openvdb::GridBase>(grids.back())->setName( vdbIt->getGridName()); } if (!nonLevelSetList.empty()) { std::string s = "Reference meshing is only supported for " "Level Set grids, the following grids were skipped: '" + hboost::algorithm::join(nonLevelSetList, ", ") + "'."; addWarning(SOP_MESSAGE, s.c_str()); } if (!nonLinearList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(nonLinearList, ", ") + "' because they don't have a linear/affine transform."; addWarning(SOP_MESSAGE, s.c_str()); } // Mesh using a reference surface if (!grids.empty() && !boss.wasInterrupted()) { if (grids.front()->isType<openvdb::FloatGrid>()) { referenceMeshing<openvdb::FloatGrid>(grids, mesher, refGeo, boss, time); } else if (grids.front()->isType<openvdb::DoubleGrid>()) { referenceMeshing<openvdb::DoubleGrid>(grids, mesher, refGeo, boss, time); } else { addError(SOP_MESSAGE, "Unsupported grid type."); } } } else { // Mesh each VDB primitive independently for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; hvdb::GEOvdbApply<hvdb::ScalarGridTypes>(**vdbIt, mesher); copyMesh(*gdp, mesher, boss, usePolygonSoup, keepVdbName ? vdbIt.getPrimitive()->getGridName() : nullptr); } if (!boss.wasInterrupted() && computeNormals) { UTparallelFor(GA_SplittableRange(gdp->getPrimitiveRange()), hvdb::VertexNormalOp(*gdp)); } } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } template<class GridType> void SOP_OpenVDB_To_Polygons::Cache::referenceMeshing( std::list<openvdb::GridBase::ConstPtr>& grids, openvdb::tools::VolumeToMesh& mesher, const GU_Detail* refGeo, hvdb::Interrupter& boss, const fpreal time) { if (refGeo == nullptr) return; const bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; const bool computeNormals = !usePolygonSoup && evalInt("computenormals", 0, time); const bool transferAttributes = evalInt("transferattributes", 0, time); const bool keepVdbName = evalInt("keepvdbname", 0, time); const bool sharpenFeatures = evalInt("sharpenfeatures", 0, time); const float edgetolerance = static_cast<float>(evalFloat("edgetolerance", 0, time)); using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; // Get the first grid's transform and background value. openvdb::math::Transform::Ptr transform = grids.front()->transform().copy(); typename GridType::ConstPtr firstGrid = openvdb::gridConstPtrCast<GridType>(grids.front()); if (!firstGrid) { addError(SOP_MESSAGE, "Unsupported grid type."); return; } const ValueType backgroundValue = firstGrid->background(); const openvdb::GridClass gridClass = firstGrid->getGridClass(); typename GridType::ConstPtr refGrid; using IntGridT = typename GridType::template ValueConverter<openvdb::Int32>::Type; typename IntGridT::Ptr indexGrid; // replace openvdb::tools::MeshToVoxelEdgeData edgeData; # if 0 // Check for reference VDB { const GA_PrimitiveGroup *refGroup = matchGroup(*refGeo, ""); hvdb::VdbPrimCIterator refIt(refGeo, refGroup); if (refIt) { const openvdb::GridClass refClass = refIt->getGrid().getGridClass(); if (refIt && refClass == openvdb::GRID_LEVEL_SET) { refGrid = openvdb::gridConstPtrCast<GridType>(refIt->getGridPtr()); } } } #endif // Check for reference mesh UT_UniquePtr<GU_Detail> geoPtr; if (!refGrid) { std::string warningStr; geoPtr = hvdb::convertGeometry(*refGeo, warningStr, &boss); if (geoPtr) { refGeo = geoPtr.get(); if (!warningStr.empty()) addWarning(SOP_MESSAGE, warningStr.c_str()); } std::vector<openvdb::Vec3s> pointList; std::vector<openvdb::Vec4I> primList; pointList.resize(refGeo->getNumPoints()); primList.resize(refGeo->getNumPrimitives()); UTparallelFor(GA_SplittableRange(refGeo->getPointRange()), hvdb::TransformOp(refGeo, *transform, pointList)); UTparallelFor(GA_SplittableRange(refGeo->getPrimitiveRange()), hvdb::PrimCpyOp(refGeo, primList)); if (boss.wasInterrupted()) return; openvdb::tools::QuadAndTriangleDataAdapter<openvdb::Vec3s, openvdb::Vec4I> mesh(pointList, primList); float bandWidth = 3.0; if (gridClass != openvdb::GRID_LEVEL_SET) { bandWidth = float(backgroundValue) / float(transform->voxelSize()[0]); } indexGrid.reset(new IntGridT(0)); refGrid = openvdb::tools::meshToVolume<GridType>(boss, mesh, *transform, bandWidth, bandWidth, 0, indexGrid.get()); if (sharpenFeatures) edgeData.convert(pointList, primList); } if (boss.wasInterrupted()) return; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; typename BoolTreeType::Ptr maskTree; if (sharpenFeatures) { maskTree = typename BoolTreeType::Ptr(new BoolTreeType(false)); maskTree->topologyUnion(indexGrid->tree()); openvdb::tree::LeafManager<BoolTreeType> maskLeafs(*maskTree); hvdb::GenAdaptivityMaskOp<typename IntGridT::TreeType, BoolTreeType> op(*refGeo, indexGrid->tree(), maskLeafs, edgetolerance); op.run(); openvdb::tools::pruneInactive(*maskTree); openvdb::tools::dilateVoxels(*maskTree, 2); mesher.setAdaptivityMask(maskTree); } if (boss.wasInterrupted()) return; const double iadaptivity = double(evalFloat("internaladaptivity", 0, time)); mesher.setRefGrid(refGrid, iadaptivity); std::vector<std::string> badTransformList, badBackgroundList, badTypeList; GA_PrimitiveGroup *surfaceGroup = nullptr, *interiorGroup = nullptr, *seamGroup = nullptr; GA_PointGroup* seamPointGroup = nullptr; { UT_String newGroupStr; evalString(newGroupStr, "surfacegroup", 0, time); if(newGroupStr.length() > 0) { surfaceGroup = gdp->findPrimitiveGroup(newGroupStr); if (!surfaceGroup) surfaceGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "interiorgroup", 0, time); if(newGroupStr.length() > 0) { interiorGroup = gdp->findPrimitiveGroup(newGroupStr); if (!interiorGroup) interiorGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "seamlinegroup", 0, time); if(newGroupStr.length() > 0) { seamGroup = gdp->findPrimitiveGroup(newGroupStr); if (!seamGroup) seamGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "seampoints", 0, time); if(newGroupStr.length() > 0) { seamPointGroup = gdp->findPointGroup(newGroupStr); if (!seamPointGroup) seamPointGroup = gdp->newPointGroup(newGroupStr); } } for (auto it = grids.begin(); it != grids.end(); ++it) { if (boss.wasInterrupted()) break; typename GridType::ConstPtr grid = openvdb::gridConstPtrCast<GridType>(*it); if (!grid) { badTypeList.push_back(grid->getName()); continue; } if (grid->transform() != *transform) { badTransformList.push_back(grid->getName()); continue; } if (!openvdb::math::isApproxEqual(grid->background(), backgroundValue)) { badBackgroundList.push_back(grid->getName()); continue; } mesher(*grid); copyMesh(*gdp, mesher, boss, usePolygonSoup, keepVdbName ? grid->getName().c_str() : nullptr, surfaceGroup, interiorGroup, seamGroup, seamPointGroup); } grids.clear(); // Sharpen Features if (!boss.wasInterrupted() && sharpenFeatures) { UTparallelFor(GA_SplittableRange(gdp->getPointRange()), hvdb::SharpenFeaturesOp( *gdp, *refGeo, edgeData, *transform, surfaceGroup, maskTree.get())); } // Compute vertex normals if (!boss.wasInterrupted() && computeNormals) { UTparallelFor(GA_SplittableRange(gdp->getPrimitiveRange()), hvdb::VertexNormalOp(*gdp, interiorGroup, (transferAttributes ? -1.0f : 0.7f) )); if (!interiorGroup) { addWarning(SOP_MESSAGE, "More accurate vertex normals can be generated " "if the interior polygon group is enabled."); } } // Transfer primitive attributes if (!boss.wasInterrupted() && transferAttributes && refGeo && indexGrid) { hvdb::transferPrimitiveAttributes(*refGeo, *gdp, *indexGrid, boss, surfaceGroup); } if (!badTransformList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTransformList, ", ") + "' because they don't match the transform of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badBackgroundList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badBackgroundList, ", ") + "' because they don't match the background value of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badTypeList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTypeList, ", ") + "' because they don't have the same data type as the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } }
35,996
C++
36.263975
99
0.613679
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Combine.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Combine.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Math.h> // for isFinite() #include <openvdb/tools/ChangeBackground.h> #include <openvdb/tools/Composite.h> #include <openvdb/tools/GridTransformer.h> // for resampleToMatch() #include <openvdb/tools/LevelSetRebuild.h> // for levelSetRebuild() #include <openvdb/tools/Morphology.h> // for deactivate() #include <openvdb/tools/Prune.h> #include <openvdb/tools/SignedFloodFill.h> #include <openvdb/util/NullInterrupter.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <algorithm> // for std::min() #include <cctype> // for isspace() #include <iomanip> #include <set> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { // // Operations // enum Operation { OP_COPY_A, // A OP_COPY_B, // B OP_INVERT, // 1 - A OP_ADD, // A + B OP_SUBTRACT, // A - B OP_MULTIPLY, // A * B OP_DIVIDE, // A / B OP_MAXIMUM, // max(A, B) OP_MINIMUM, // min(A, B) OP_BLEND1, // (1 - A) * B OP_BLEND2, // A + (1 - A) * B OP_UNION, // CSG A u B OP_INTERSECTION, // CSG A n B OP_DIFFERENCE, // CSG A / B OP_REPLACE, // replace A with B OP_TOPO_UNION, // A u active(B) OP_TOPO_INTERSECTION, // A n active(B) OP_TOPO_DIFFERENCE // A / active(B) }; enum { OP_FIRST = OP_COPY_A, OP_LAST = OP_TOPO_DIFFERENCE }; //#define TIMES " \xd7 " // ISO-8859 multiplication symbol #define TIMES " * " const char* const sOpMenuItems[] = { "copya", "Copy A", "copyb", "Copy B", "inverta", "Invert A", "add", "Add", "subtract", "Subtract", "multiply", "Multiply", "divide", "Divide", "maximum", "Maximum", "minimum", "Minimum", "compatimesb", "(1 - A)" TIMES "B", "apluscompatimesb", "A + (1 - A)" TIMES "B", "sdfunion", "SDF Union", "sdfintersect", "SDF Intersection", "sdfdifference", "SDF Difference", "replacewithactive", "Replace A with Active B", "topounion", "Activity Union", "topointersect", "Activity Intersection", "topodifference", "Activity Difference", nullptr }; #undef TIMES inline Operation asOp(int i, Operation defaultOp = OP_COPY_A) { return (i >= OP_FIRST && i <= OP_LAST) ? static_cast<Operation>(i) : defaultOp; } inline bool needAGrid(Operation op) { return (op != OP_COPY_B); } inline bool needBGrid(Operation op) { return (op != OP_COPY_A && op != OP_INVERT); } inline bool needLevelSets(Operation op) { return (op == OP_UNION || op == OP_INTERSECTION || op == OP_DIFFERENCE); } // // Resampling options // enum ResampleMode { RESAMPLE_OFF, // don't auto-resample grids RESAMPLE_B, // resample B to match A RESAMPLE_A, // resample A to match B RESAMPLE_HI_RES, // resample higher-res grid to match lower-res RESAMPLE_LO_RES // resample lower-res grid to match higher-res }; enum { RESAMPLE_MODE_FIRST = RESAMPLE_OFF, RESAMPLE_MODE_LAST = RESAMPLE_LO_RES }; const char* const sResampleModeMenuItems[] = { "off", "Off", "btoa", "B to Match A", "atob", "A to Match B", "hitolo", "Higher-res to Match Lower-res", "lotohi", "Lower-res to Match Higher-res", nullptr }; inline ResampleMode asResampleMode(exint i, ResampleMode defaultMode = RESAMPLE_B) { return (i >= RESAMPLE_MODE_FIRST && i <= RESAMPLE_MODE_LAST) ? static_cast<ResampleMode>(i) : defaultMode; } // // Collation options // enum CollationMode { COLL_PAIRS = 0, COLL_A_WITH_1ST_B, COLL_FLATTEN_A, COLL_FLATTEN_B_TO_A, COLL_FLATTEN_A_GROUPS }; inline CollationMode asCollation(const std::string& str) { if (str == "pairs") return COLL_PAIRS; if (str == "awithfirstb") return COLL_A_WITH_1ST_B; if (str == "flattena") return COLL_FLATTEN_A; if (str == "flattenbtoa") return COLL_FLATTEN_B_TO_A; if (str == "flattenagroups") return COLL_FLATTEN_A_GROUPS; throw std::runtime_error{"invalid collation mode \"" + str + "\""}; } } // anonymous namespace /// @brief SOP to combine two VDB grids via various arithmetic operations class SOP_OpenVDB_Combine: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Combine(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Combine() override {} static OP_Node* factory(OP_Network*, const char*, OP_Operator*); class Cache: public SOP_VDBCacheOptions { public: fpreal getTime() const { return mTime; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: hvdb::GridPtr combineGrids(Operation, hvdb::GridCPtr aGrid, hvdb::GridCPtr bGrid, const UT_String& aGridName, const UT_String& bGridName, ResampleMode resample); fpreal mTime = 0.0; }; // class Cache protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; private: template<typename> struct DispatchOp; struct CombineOp; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group A parms.add(hutil::ParmFactory(PRM_STRING, "agroup", "Group A") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Use a subset of the first input as the A VDB(s).") .setDocumentation( "The VDBs to be used from the first input" " (see [specifying volumes|/model/volumes#group])")); // Group B parms.add(hutil::ParmFactory(PRM_STRING, "bgroup", "Group B") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Use a subset of the second input as the B VDB(s).") .setDocumentation( "The VDBs to be used from the second input" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "collation", "Collation") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "pairs", "Combine A/B Pairs", "awithfirstb", "Combine Each A With First B", "flattena", "Flatten All A", "flattenbtoa", "Flatten All B Into First A", "flattenagroups", "Flatten A Groups" }) .setDefault("pairs") .setTooltip("Specify the order in which to combine VDBs from the A and/or B groups.") .setDocumentation("\ The order in which to combine VDBs from the _A_ and/or _B_ groups\n\ \n\ Combine _A_/_B_ Pairs:\n\ Combine pairs of _A_ and _B_ VDBs, in the order in which they appear\n\ in their respective groups.\n\ Combine Each _A_ With First _B_:\n\ Combine each _A_ VDB with the first _B_ VDB.\n\ Flatten All _A_:\n\ Collapse all of the _A_ VDBs into a single output VDB.\n\ Flatten All _B_ Into First _A_:\n\ Accumulate each _B_ VDB into the first _A_ VDB, producing a single output VDB.\n\ Flatten _A_ Groups:\n\ Collapse VDBs within each _A_ group, producing one output VDB for each group.\n\ \n\ Space-separated group patterns are treated as distinct groups in this mode.\n\ For example, \"`@name=x* @name=y*`\" results in two output VDBs\n\ (provided that there is at least one _A_ VDB whose name starts with `x`\n\ and at least one whose name starts with `y`).\n\ ")); // Menu of available operations parms.add(hutil::ParmFactory(PRM_ORD, "operation", "Operation") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, sOpMenuItems) .setDocumentation("\ Each voxel that is active in either of the input VDBs\n\ will be processed with this operation.\n\ \n\ Copy _A_:\n\ Use _A_, ignore _B_.\n\ \n\ Copy _B_:\n\ Use _B_, ignore _A_.\n\ \n\ Invert _A_:\n\ Use 0 &minus; _A_.\n\ \n\ Add:\n\ Add the values of _A_ and _B_.\n\ \n\ NOTE:\n\ Using this for fog volumes, which have density values between 0 and 1,\n\ will push densities over 1 and cause a bright interface between the\n\ input volumes when rendered. To avoid this problem, try using the\n\ _A_&nbsp;+&nbsp;(1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_\n\ operation.\n\ \n\ Subtract:\n\ Subtract the values of _B_ from the values of _A_.\n\ \n\ Multiply:\n\ Multiply the values of _A_ and _B_.\n\ \n\ Divide:\n\ Divide the values of _A_ by _B_.\n\ \n\ Maximum:\n\ Use the maximum of each corresponding value from _A_ and _B_.\n\ \n\ NOTE:\n\ Using this for fog volumes, which have density values between 0 and 1,\n\ can produce a dark interface between the inputs when rendered, due to\n\ the binary nature of choosing a value from either from _A_ or _B_.\n\ To avoid this problem, try using the\n\ (1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_ operation.\n\ \n\ Minimum:\n\ Use the minimum of each corresponding value from _A_ and _B_.\n\ \n\ (1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_:\n\ This is similar to SDF Difference, except for fog volumes,\n\ and can also be viewed as \"soft cutout\" operation.\n\ It is typically used to clear out an area around characters\n\ in a dust simulation or some other environmental volume.\n\ \n\ _A_&nbsp;+&nbsp;(1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_:\n\ This is similar to SDF Union, except for fog volumes, and\n\ can also be viewed as a \"soft union\" or \"merge\" operation.\n\ Consider using this over the Maximum or Add operations\n\ for fog volumes.\n\ \n\ SDF Union:\n\ Generate the union of signed distance fields _A_ and _B_.\n\ \n\ SDF Intersection:\n\ Generate the intersection of signed distance fields _A_ and _B_.\n\ \n\ SDF Difference:\n\ Remove signed distance field _B_ from signed distance field _A_.\n\ \n\ Replace _A_ with Active _B_:\n\ Copy the active voxels of _B_ into _A_.\n\ \n\ Activity Union:\n\ Make voxels active if they are active in either _A_ or _B_.\n\ \n\ Activity Intersection:\n\ Make voxels active if they are active in both _A_ and _B_.\n\ \n\ It is recommended to enable pruning when using this operation.\n\ \n\ Activity Difference:\n\ Make voxels active if they are active in _A_ but not in _B_.\n\ \n\ It is recommended to enable pruning when using this operation.\n")); // Scalar multiplier on the A grid parms.add(hutil::ParmFactory(PRM_FLT_J, "amult", "A Multiplier") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, -10, PRM_RANGE_UI, 10) .setTooltip( "Multiply voxel values in the A VDB by a scalar\n" "before combining the A VDB with the B VDB.")); // Scalar multiplier on the B grid parms.add(hutil::ParmFactory(PRM_FLT_J, "bmult", "B Multiplier") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, -10, PRM_RANGE_UI, 10) .setTooltip( "Multiply voxel values in the B VDB by a scalar\n" "before combining the A VDB with the B VDB.")); // Menu of resampling options parms.add(hutil::ParmFactory(PRM_ORD, "resample", "Resample") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, sResampleModeMenuItems) .setTooltip( "If the A and B VDBs have different transforms, one VDB should\n" "be resampled to match the other before the two are combined.\n" "Also, level set VDBs should have matching background values\n" "(i.e., matching narrow band widths).")); // Menu of resampling interpolation order options parms.add(hutil::ParmFactory(PRM_ORD, "resampleinterp", "Interpolation") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "point", "Nearest", "linear", "Linear", "quadratic", "Quadratic" }) .setTooltip( "Specify the type of interpolation to be used when\n" "resampling one VDB to match the other's transform.") .setDocumentation( "The type of interpolation to be used when resampling one VDB" " to match the other's transform\n\n" "Nearest neighbor interpolation is fast but can introduce noticeable" " sampling artifacts. Quadratic interpolation is slow but high-quality." " Linear interpolation is intermediate in speed and quality.")); // Deactivate background value toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "deactivate", "Deactivate Background Voxels") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDocumentation( "Deactivate active output voxels whose values equal" " the output VDB's background value.")); // Deactivation tolerance slider parms.add(hutil::ParmFactory(PRM_FLT_J, "bgtolerance", "Deactivate Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip( "Deactivate active output voxels whose values\n" "equal the output VDB's background value.\n" "Voxel values are considered equal if they differ\n" "by less than the specified tolerance.") .setDocumentation( "When deactivation of background voxels is enabled," " voxel values are considered equal to the background" " if they differ by less than this tolerance.")); // Prune toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "prune", "Prune") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDocumentation( "Reduce the memory footprint of output VDBs that have" " (sufficiently large) regions of voxels with the same value.\n\n" "NOTE:\n" " Pruning affects only the memory usage of a VDB.\n" " It does not remove voxels, apart from inactive voxels\n" " whose value is equal to the background.")); // Pruning tolerance slider parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Prune Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip( "Collapse regions of constant value in output VDBs.\n" "Voxel values are considered equal if they differ\n" "by less than the specified tolerance.") .setDocumentation( "When pruning is enabled, voxel values are considered equal" " if they differ by less than the specified tolerance.")); // Flood fill toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "flood", "Signed-Flood-Fill Output SDFs") .setDefault(PRMzeroDefaults) .setTooltip( "Reclassify inactive voxels of level set VDBs as either inside or outside.") .setDocumentation( "Test inactive voxels to determine if they are inside or outside of an SDF" " and hence whether they should have negative or positive sign.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "combination", "Operation") .setDefault(-2)); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "flatten", "Flatten All B into A") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "pairs", "Combine A/B Pairs") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "groupA", "Group A")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "groupB", "Group B")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "mult_a", "A Multiplier") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "mult_b", "B Multiplier") .setDefault(PRMoneDefaults)); // Register SOP hvdb::OpenVDBOpFactory("VDB Combine", SOP_OpenVDB_Combine::factory, parms, *table) .addInput("A VDBs") .addOptionalInput("B VDBs") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Combine::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Combine the values of VDB volumes in various ways.\"\"\"\n\ \n\ @related\n\ \n\ - [Node:sop/vdbcombine]\n\ - [Node:sop/volumevop]\n\ - [Node:sop/volumemix]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Combine::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Combine(net, name, op); } SOP_OpenVDB_Combine::SOP_OpenVDB_Combine(OP_Network* net, const char* name, OP_Operator* op) : SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Combine::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; if (PRM_Parm* parm = obsoleteParms->getParmPtr("combination")) { if (!parm->isFactoryDefault()) { // The "combination" choices (union, intersection, difference) from // the old CSG SOP were appended to this SOP's "operation" list. switch (obsoleteParms->evalInt("combination", 0, time)) { case 0: setInt("operation", 0, 0.0, OP_UNION); break; case 1: setInt("operation", 0, 0.0, OP_INTERSECTION); break; case 2: setInt("operation", 0, 0.0, OP_DIFFERENCE); break; } } } { PRM_Parm *flatten = obsoleteParms->getParmPtr("flatten"), *pairs = obsoleteParms->getParmPtr("pairs"); if (flatten && !flatten->isFactoryDefault()) { // factory default was Off setString("flattenbtoa", CH_STRING_LITERAL, "collation", 0, time); } else if (pairs && !pairs->isFactoryDefault()) { // factory default was On setString("awithfirstb", CH_STRING_LITERAL, "collation", 0, time); } } resolveRenamedParm(*obsoleteParms, "groupA", "agroup"); resolveRenamedParm(*obsoleteParms, "groupB", "bgroup"); resolveRenamedParm(*obsoleteParms, "mult_a", "amult"); resolveRenamedParm(*obsoleteParms, "mult_b", "bmult"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable or disable parameters in the UI. bool SOP_OpenVDB_Combine::updateParmsFlags() { bool changed = false; changed |= enableParm("resampleinterp", evalInt("resample", 0, 0) != 0); changed |= enableParm("bgtolerance", evalInt("deactivate", 0, 0) != 0); changed |= enableParm("tolerance", evalInt("prune", 0, 0) != 0); return changed; } //////////////////////////////////////// namespace { using StringVec = std::vector<std::string>; // Split a string into group patterns separated by whitespace. // For example, given '@name=d* @id="1 2" {grp1 grp2}', return // ['@name=d*', '@id="1 2"', '{grp1 grp2}']. // (This is nonstandard. Normally, multiple patterns are unioned // to define a single group.) // Nesting of quotes and braces is not supported. inline StringVec splitPatterns(const std::string& str) { StringVec patterns; bool quoted = false, braced = false; std::string pattern; for (const auto c: str) { if (isspace(c)) { if (pattern.empty()) continue; // skip whitespace between patterns if (quoted || braced) { pattern.push_back(c); // keep whitespace within quotes or braces } else { // At the end of a pattern. Start a new pattern. patterns.push_back(pattern); pattern.clear(); quoted = braced = false; } } else { switch (c) { case '"': quoted = !quoted; break; case '{': braced = true; break; case '}': braced = false; break; default: break; } pattern.push_back(c); } } if (!pattern.empty()) { patterns.push_back(pattern); } // add the final pattern // If no patterns were found, add an empty pattern, which matches everything. if (patterns.empty()) { patterns.push_back(""); } return patterns; } inline UT_String getGridName(const GU_PrimVDB* vdb, const UT_String& defaultName = "") { UT_String name{UT_String::ALWAYS_DEEP}; if (vdb != nullptr) { name = vdb->getGridName(); if (!name.isstring()) name = defaultName; } return name; } } // anonymous namespace OP_ERROR SOP_OpenVDB_Combine::Cache::cookVDBSop(OP_Context& context) { try { UT_AutoInterrupt progress{"Combining VDBs"}; mTime = context.getTime(); const Operation op = asOp(static_cast<int>(evalInt("operation", 0, getTime()))); const ResampleMode resample = asResampleMode(evalInt("resample", 0, getTime())); const CollationMode collation = asCollation(evalStdString("collation", getTime())); const bool flattenA = ((collation == COLL_FLATTEN_A) || (collation == COLL_FLATTEN_A_GROUPS)), flatten = (flattenA || (collation == COLL_FLATTEN_B_TO_A)), needA = needAGrid(op), needB = (needBGrid(op) && !flattenA); GU_Detail* aGdp = gdp; const GU_Detail* bGdp = inputGeo(1, context); const auto aGroupStr = evalStdString("agroup", getTime()); const auto bGroupStr = evalStdString("bgroup", getTime()); const auto* bGroup = (!bGdp ? nullptr : matchGroup(*bGdp, bGroupStr)); // In Flatten A Groups mode, treat space-separated subpatterns // as specifying distinct groups to be processed independently. // (In all other modes, subpatterns are unioned into a single group.) std::vector<const GA_PrimitiveGroup*> aGroupVec; if (collation != COLL_FLATTEN_A_GROUPS) { aGroupVec.push_back(matchGroup(*aGdp, aGroupStr)); } else { for (const auto& pattern: splitPatterns(aGroupStr)) { aGroupVec.push_back(matchGroup(*aGdp, pattern)); } } // For diagnostic purposes, keep track of whether any input grids are left unused. bool unusedA = false, unusedB = false; // Iterate over one or more A groups. for (const auto* aGroup: aGroupVec) { hvdb::VdbPrimIterator aIt{aGdp, GA_Range::safedeletions{}, aGroup}; hvdb::VdbPrimCIterator bIt{bGdp, bGroup}; // Populate two vectors of primitives, one comprising the A grids // and the other the B grids. (In the case of flattening operations, // these grids might be taken from the same input.) // Note: the following relies on exhausted iterators returning nullptr // and on incrementing an exhausted iterator being a no-op. std::vector<GU_PrimVDB*> aVdbVec; std::vector<const GU_PrimVDB*> bVdbVec; switch (collation) { case COLL_PAIRS: for ( ; (!needA || aIt) && (!needB || bIt); ++aIt, ++bIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } unusedA = unusedA || (needA && bool(aIt)); unusedB = unusedB || (needB && bool(bIt)); break; case COLL_A_WITH_1ST_B: for ( ; aIt && (!needB || bIt); ++aIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } break; case COLL_FLATTEN_B_TO_A: if (*bIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } for (++bIt; bIt; ++bIt) { aVdbVec.push_back(nullptr); bVdbVec.push_back(*bIt); } break; case COLL_FLATTEN_A: case COLL_FLATTEN_A_GROUPS: aVdbVec.push_back(*aIt); for (++aIt; aIt; ++aIt) { bVdbVec.push_back(*aIt); } break; } if ((needA && aVdbVec.empty()) || (needB && bVdbVec.empty())) continue; std::set<GU_PrimVDB*> vdbsToRemove; // Combine grids. if (!flatten) { // Iterate over A and, optionally, B grids. for (size_t i = 0, N = std::min(aVdbVec.size(), bVdbVec.size()); i < N; ++i) { if (progress.wasInterrupted()) { throw std::runtime_error{"interrupted"}; } // Note: even if needA is false, we still need to delete A grids. GU_PrimVDB* aVdb = aVdbVec[i]; const GU_PrimVDB* bVdb = bVdbVec[i]; hvdb::GridPtr aGrid; hvdb::GridCPtr bGrid; if (aVdb) aGrid = aVdb->getGridPtr(); if (bVdb) bGrid = bVdb->getConstGridPtr(); // For error reporting, get the names of the A and B grids. const UT_String aGridName = getGridName(aVdb, /*default=*/"A"), bGridName = getGridName(bVdb, /*default=*/"B"); if (hvdb::GridPtr outGrid = combineGrids(op, aGrid, bGrid, aGridName, bGridName, resample)) { // Name the output grid after the A grid if the A grid is used, // or after the B grid otherwise. UT_String outGridName = needA ? getGridName(aVdb) : getGridName(bVdb); // Add a new VDB primitive for the output grid to the output gdp. GU_PrimVDB::buildFromGrid(*gdp, outGrid, /*copyAttrsFrom=*/needA ? aVdb : bVdb, outGridName); vdbsToRemove.insert(aVdb); } } // Flatten grids (i.e., combine all B grids into the first A grid). } else { GU_PrimVDB* aVdb = aVdbVec[0]; hvdb::GridPtr aGrid; if (aVdb) aGrid = aVdb->getGridPtr(); hvdb::GridPtr outGrid; UT_String outGridName; // Iterate over B grids. const GU_PrimVDB* bVdb = nullptr; for (const GU_PrimVDB* theBVdb: bVdbVec) { if (progress.wasInterrupted()) { throw std::runtime_error{"interrupted"}; } bVdb = theBVdb; hvdb::GridCPtr bGrid; if (bVdb) { bGrid = bVdb->getConstGridPtr(); if (flattenA) { // When flattening within the A group, remove B grids, // since they're actually copies of grids from input 0. vdbsToRemove.insert(const_cast<GU_PrimVDB*>(bVdb)); } } const UT_String aGridName = getGridName(aVdb, /*default=*/"A"), bGridName = getGridName(bVdb, /*default=*/"B"); // Name the output grid after the A grid if the A grid is used, // or after the B grid otherwise. outGridName = (needA ? getGridName(aVdb) : getGridName(bVdb)); outGrid = combineGrids(op, aGrid, bGrid, aGridName, bGridName, resample); aGrid = outGrid; } if (outGrid) { // Add a new VDB primitive for the output grid to the output gdp. GU_PrimVDB::buildFromGrid(*gdp, outGrid, /*copyAttrsFrom=*/needA ? aVdb : bVdb, outGridName); vdbsToRemove.insert(aVdb); } } // Remove primitives that were copied from input 0. for (GU_PrimVDB* vdb: vdbsToRemove) { if (vdb) gdp->destroyPrimitive(*vdb, /*andPoints=*/true); } } // for each A group if (unusedA || unusedB) { std::ostringstream ostr; ostr << "some grids were not processed because there were more " << (unusedA ? "A" : "B") << " grids than " << (unusedA ? "B" : "A") << " grids"; addWarning(SOP_MESSAGE, ostr.str().c_str()); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// namespace { /// Functor to compute scale * grid + offset, for scalars scale and offset template<typename GridT> struct MulAdd { using ValueT = typename GridT::ValueType; using GridPtrT = typename GridT::Ptr; float scale, offset; explicit MulAdd(float s, float t = 0.0): scale(s), offset(t) {} void operator()(const ValueT& a, const ValueT&, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT(a * scale + offset); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } /// @return true if the scale is 1 and the offset is 0 bool isIdentity() const { return (openvdb::math::isApproxEqual(scale, 1.f, 1.0e-6f) && openvdb::math::isApproxEqual(offset, 0.f, 1.0e-6f)); } /// Compute dest = src * scale + offset void process(const GridT& src, GridPtrT& dest) const { if (isIdentity()) { dest = src.deepCopy(); } else { if (!dest) dest = GridT::create(src); // same transform, new tree ValueT bg; (*this)(src.background(), ValueT(), bg); openvdb::tools::changeBackground(dest->tree(), bg); dest->tree().combine2(src.tree(), src.tree(), *this, /*prune=*/false); } } }; //////////////////////////////////////// /// Functor to compute (1 - A) * B for grids A and B template<typename ValueT> struct Blend1 { float aMult, bMult; const ValueT ONE; explicit Blend1(float a = 1.0, float b = 1.0): aMult(a), bMult(b), ONE(openvdb::zeroVal<ValueT>() + 1) {} void operator()(const ValueT& a, const ValueT& b, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT((ONE - aMult * a) * bMult * b); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } }; //////////////////////////////////////// /// Functor to compute A + (1 - A) * B for grids A and B template<typename ValueT> struct Blend2 { float aMult, bMult; const ValueT ONE; explicit Blend2(float a = 1.0, float b = 1.0): aMult(a), bMult(b), ONE(openvdb::zeroVal<ValueT>() + 1) {} void operator()(const ValueT& a, const ValueT& b, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT(a*aMult); out = out + ValueT((ONE - out) * bMult*b); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } }; //////////////////////////////////////// // Helper class to compare both scalar and vector values template<typename ValueT> struct ApproxEq { const ValueT &a, &b; ApproxEq(const ValueT& _a, const ValueT& _b): a(_a), b(_b) {} operator bool() const { return openvdb::math::isRelOrApproxEqual( a, b, /*rel*/ValueT(1e-6f), /*abs*/ValueT(1e-8f)); } }; // Specialization for Vec2 template<typename T> struct ApproxEq<openvdb::math::Vec2<T> > { using VecT = openvdb::math::Vec2<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; // Specialization for Vec3 template<typename T> struct ApproxEq<openvdb::math::Vec3<T> > { using VecT = openvdb::math::Vec3<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; // Specialization for Vec4 template<typename T> struct ApproxEq<openvdb::math::Vec4<T> > { using VecT = openvdb::math::Vec4<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; } // unnamed namespace //////////////////////////////////////// template<typename AGridT> struct SOP_OpenVDB_Combine::DispatchOp { SOP_OpenVDB_Combine::CombineOp* combineOp; DispatchOp(SOP_OpenVDB_Combine::CombineOp& op): combineOp(&op) {} template<typename BGridT> void operator()(const BGridT&); }; // struct DispatchOp // Helper class for use with GridBase::apply() struct SOP_OpenVDB_Combine::CombineOp { SOP_OpenVDB_Combine::Cache* self; Operation op; ResampleMode resample; UT_String aGridName, bGridName; hvdb::GridCPtr aBaseGrid, bBaseGrid; hvdb::GridPtr outGrid; hvdb::Interrupter interrupt; CombineOp(): self(nullptr) {} // Functor for use with GridBase::apply() to return // a scalar grid's background value as a floating-point quantity struct BackgroundOp { double value; BackgroundOp(): value(0.0) {} template<typename GridT> void operator()(const GridT& grid) { value = static_cast<double>(grid.background()); } }; static double getScalarBackgroundValue(const hvdb::Grid& baseGrid) { BackgroundOp bgOp; baseGrid.apply<hvdb::NumericGridTypes>(bgOp); return bgOp.value; } template<typename GridT> typename GridT::Ptr resampleToMatch(const GridT& src, const hvdb::Grid& ref, int order) { using ValueT = typename GridT::ValueType; const ValueT ZERO = openvdb::zeroVal<ValueT>(); const openvdb::math::Transform& refXform = ref.constTransform(); typename GridT::Ptr dest; if (src.getGridClass() == openvdb::GRID_LEVEL_SET) { // For level set grids, use the level set rebuild tool to both resample the // source grid to match the reference grid and to rebuild the resulting level set. const bool refIsLevelSet = ref.getGridClass() == openvdb::GRID_LEVEL_SET; OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT halfWidth = refIsLevelSet ? ValueT(ZERO + this->getScalarBackgroundValue(ref) * (1.0 / ref.voxelSize()[0])) : ValueT(src.background() * (1.0 / src.voxelSize()[0])); OPENVDB_NO_TYPE_CONVERSION_WARNING_END if (!openvdb::math::isFinite(halfWidth)) { std::stringstream msg; msg << "Resample to match: Illegal narrow band width = " << halfWidth << ", caused by grid '" << src.getName() << "' with background " << this->getScalarBackgroundValue(ref); throw std::invalid_argument(msg.str()); } try { dest = openvdb::tools::doLevelSetRebuild(src, /*iso=*/ZERO, /*exWidth=*/halfWidth, /*inWidth=*/halfWidth, &refXform, &interrupt); } catch (openvdb::TypeError&) { self->addWarning(SOP_MESSAGE, ("skipped rebuild of level set grid " + src.getName() + " of type " + src.type()).c_str()); dest.reset(); } } if (!dest && src.constTransform() != refXform) { // For non-level set grids or if level set rebuild failed due to an unsupported // grid type, use the grid transformer tool to resample the source grid to match // the reference grid. dest = src.copyWithNewTree(); dest->setTransform(refXform.copy()); using namespace openvdb; switch (order) { case 0: tools::resampleToMatch<tools::PointSampler>(src, *dest, interrupt); break; case 1: tools::resampleToMatch<tools::BoxSampler>(src, *dest, interrupt); break; case 2: tools::resampleToMatch<tools::QuadraticSampler>(src, *dest, interrupt); break; } } return dest; } // If necessary, resample one grid so that its index space registers // with the other grid's. // Note that one of the grid pointers might change as a result. template<typename AGridT, typename BGridT> void resampleGrids(const AGridT*& aGrid, const BGridT*& bGrid) { if (!aGrid || !bGrid) return; const bool needA = needAGrid(op), needB = needBGrid(op), needBoth = needA && needB; const int samplingOrder = static_cast<int>( self->evalInt("resampleinterp", 0, self->getTime())); // One of RESAMPLE_A, RESAMPLE_B or RESAMPLE_OFF, specifying whether // grid A, grid B or neither grid was resampled int resampleWhich = RESAMPLE_OFF; // Determine which of the two grids should be resampled. if (resample == RESAMPLE_HI_RES || resample == RESAMPLE_LO_RES) { const openvdb::Vec3d aVoxSize = aGrid->voxelSize(), bVoxSize = bGrid->voxelSize(); const double aVoxVol = aVoxSize[0] * aVoxSize[1] * aVoxSize[2], bVoxVol = bVoxSize[0] * bVoxSize[1] * bVoxSize[2]; resampleWhich = ((aVoxVol > bVoxVol && resample == RESAMPLE_LO_RES) || (aVoxVol < bVoxVol && resample == RESAMPLE_HI_RES)) ? RESAMPLE_A : RESAMPLE_B; } else { resampleWhich = resample; } if (aGrid->constTransform() != bGrid->constTransform()) { // If the A and B grid transforms don't match, one of the grids // should be resampled into the other's index space. if (resample == RESAMPLE_OFF) { if (needBoth) { // Resampling is disabled. Just log a warning. std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " transforms don't match"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } else { if (needA && resampleWhich == RESAMPLE_A) { // Resample grid A into grid B's index space. aBaseGrid = this->resampleToMatch(*aGrid, *bGrid, samplingOrder); aGrid = static_cast<const AGridT*>(aBaseGrid.get()); } else if (needB && resampleWhich == RESAMPLE_B) { // Resample grid B into grid A's index space. bBaseGrid = this->resampleToMatch(*bGrid, *aGrid, samplingOrder); bGrid = static_cast<const BGridT*>(bBaseGrid.get()); } } } if (aGrid->getGridClass() == openvdb::GRID_LEVEL_SET && bGrid->getGridClass() == openvdb::GRID_LEVEL_SET) { // If both grids are level sets, ensure that their background values match. // (If one of the grids was resampled, then the background values should // already match.) const double a = this->getScalarBackgroundValue(*aGrid), b = this->getScalarBackgroundValue(*bGrid); if (!ApproxEq<double>(a, b)) { if (resample == RESAMPLE_OFF) { if (needBoth) { // Resampling/rebuilding is disabled. Just log a warning. std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " background values don't match (" << std::setprecision(3) << a << " vs. " << b << ");\n" << " the output grid will not be a valid level set"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } else { // One of the two grids needs a level set rebuild. if (needA && resampleWhich == RESAMPLE_A) { // Rebuild A to match B's background value. aBaseGrid = this->resampleToMatch(*aGrid, *bGrid, samplingOrder); aGrid = static_cast<const AGridT*>(aBaseGrid.get()); } else if (needB && resampleWhich == RESAMPLE_B) { // Rebuild B to match A's background value. bBaseGrid = this->resampleToMatch(*bGrid, *aGrid, samplingOrder); bGrid = static_cast<const BGridT*>(bBaseGrid.get()); } } } } } void checkVectorTypes(const hvdb::Grid* aGrid, const hvdb::Grid* bGrid) { if (!aGrid || !bGrid || !needAGrid(op) || !needBGrid(op)) return; switch (op) { case OP_TOPO_UNION: case OP_TOPO_INTERSECTION: case OP_TOPO_DIFFERENCE: // No need to warn about different vector types for topology-only operations. break; default: { const openvdb::VecType aVecType = aGrid->getVectorType(), bVecType = bGrid->getVectorType(); if (aVecType != bVecType) { std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " have different vector types\n" << " (" << hvdb::Grid::vecTypeToString(aVecType) << " vs. " << hvdb::Grid::vecTypeToString(bVecType) << ")"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } break; } } } template <typename GridT> void doUnion(GridT &result, GridT &temp) { openvdb::tools::csgUnion(result, temp); } template <typename GridT> void doIntersection(GridT &result, GridT &temp) { openvdb::tools::csgIntersection(result, temp); } template <typename GridT> void doDifference(GridT &result, GridT &temp) { openvdb::tools::csgDifference(result, temp); } // Combine two grids of the same type. template<typename GridT> void combineSameType() { using ValueT = typename GridT::ValueType; const bool needA = needAGrid(op), needB = needBGrid(op); const float aMult = float(self->evalFloat("amult", 0, self->getTime())), bMult = float(self->evalFloat("bmult", 0, self->getTime())); const GridT *aGrid = nullptr, *bGrid = nullptr; if (aBaseGrid) aGrid = UTvdbGridCast<GridT>(aBaseGrid).get(); if (bBaseGrid) bGrid = UTvdbGridCast<GridT>(bBaseGrid).get(); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); // Warn if combining vector grids with different vector types. if (needA && needB && openvdb::VecTraits<ValueT>::IsVec) { this->checkVectorTypes(aGrid, bGrid); } // If necessary, resample one grid so that its index space // registers with the other grid's. if (aGrid && bGrid) this->resampleGrids(aGrid, bGrid); const ValueT ZERO = openvdb::zeroVal<ValueT>(); // A temporary grid is needed for binary operations, because they // cannibalize the B grid. typename GridT::Ptr resultGrid, tempGrid; switch (op) { case OP_COPY_A: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); break; case OP_COPY_B: MulAdd<GridT>(bMult).process(*bGrid, resultGrid); break; case OP_INVERT: MulAdd<GridT>(-aMult, 1.0).process(*aGrid, resultGrid); break; case OP_ADD: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compSum(*resultGrid, *tempGrid); break; case OP_SUBTRACT: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(-bMult).process(*bGrid, tempGrid); openvdb::tools::compSum(*resultGrid, *tempGrid); break; case OP_MULTIPLY: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMul(*resultGrid, *tempGrid); break; case OP_DIVIDE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compDiv(*resultGrid, *tempGrid); break; case OP_MAXIMUM: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMax(*resultGrid, *tempGrid); break; case OP_MINIMUM: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMin(*resultGrid, *tempGrid); break; case OP_BLEND1: // (1 - A) * B { const Blend1<ValueT> comp(aMult, bMult); ValueT bg; comp(aGrid->background(), ZERO, bg); resultGrid = aGrid->copyWithNewTree(); openvdb::tools::changeBackground(resultGrid->tree(), bg); resultGrid->tree().combine2(aGrid->tree(), bGrid->tree(), comp, /*prune=*/false); break; } case OP_BLEND2: // A + (1 - A) * B { const Blend2<ValueT> comp(aMult, bMult); ValueT bg; comp(aGrid->background(), ZERO, bg); resultGrid = aGrid->copyWithNewTree(); openvdb::tools::changeBackground(resultGrid->tree(), bg); resultGrid->tree().combine2(aGrid->tree(), bGrid->tree(), comp, /*prune=*/false); break; } case OP_UNION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doUnion(*resultGrid, *tempGrid); break; case OP_INTERSECTION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doIntersection(*resultGrid, *tempGrid); break; case OP_DIFFERENCE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doDifference(*resultGrid, *tempGrid); break; case OP_REPLACE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compReplace(*resultGrid, *tempGrid); break; case OP_TOPO_UNION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); // Note: no need to scale the B grid for topology-only operations. resultGrid->topologyUnion(*bGrid); break; case OP_TOPO_INTERSECTION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyIntersection(*bGrid); break; case OP_TOPO_DIFFERENCE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyDifference(*bGrid); break; } outGrid = this->postprocess<GridT>(resultGrid); } // Combine two grids of different types. /// @todo Currently, only topology operations can be performed on grids of different types. template<typename AGridT, typename BGridT> void combineDifferentTypes() { const bool needA = needAGrid(op), needB = needBGrid(op); const AGridT* aGrid = nullptr; const BGridT* bGrid = nullptr; if (aBaseGrid) aGrid = UTvdbGridCast<AGridT>(aBaseGrid).get(); if (bBaseGrid) bGrid = UTvdbGridCast<BGridT>(bBaseGrid).get(); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); // Warn if combining vector grids with different vector types. if (needA && needB && openvdb::VecTraits<typename AGridT::ValueType>::IsVec && openvdb::VecTraits<typename BGridT::ValueType>::IsVec) { this->checkVectorTypes(aGrid, bGrid); } // If necessary, resample one grid so that its index space // registers with the other grid's. if (aGrid && bGrid) this->resampleGrids(aGrid, bGrid); const float aMult = float(self->evalFloat("amult", 0, self->getTime())); typename AGridT::Ptr resultGrid; switch (op) { case OP_TOPO_UNION: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); // Note: no need to scale the B grid for topology-only operations. resultGrid->topologyUnion(*bGrid); break; case OP_TOPO_INTERSECTION: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyIntersection(*bGrid); break; case OP_TOPO_DIFFERENCE: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyDifference(*bGrid); break; default: { std::ostringstream ostr; ostr << "can't combine grid " << aGridName << " of type " << aGrid->type() << "\n with grid " << bGridName << " of type " << bGrid->type(); throw std::runtime_error(ostr.str()); break; } } outGrid = this->postprocess<AGridT>(resultGrid); } template<typename GridT> typename GridT::Ptr postprocess(typename GridT::Ptr resultGrid) { using ValueT = typename GridT::ValueType; const ValueT ZERO = openvdb::zeroVal<ValueT>(); const bool prune = self->evalInt("prune", 0, self->getTime()), flood = self->evalInt("flood", 0, self->getTime()), deactivate = self->evalInt("deactivate", 0, self->getTime()); if (deactivate) { const float deactivationTolerance = float(self->evalFloat("bgtolerance", 0, self->getTime())); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT tolerance(ZERO + deactivationTolerance); OPENVDB_NO_TYPE_CONVERSION_WARNING_END // Mark active output tiles and voxels as inactive if their // values match the output grid's background value. // Do this first to facilitate pruning. openvdb::tools::deactivate(*resultGrid, resultGrid->background(), tolerance); } if (flood && resultGrid->getGridClass() == openvdb::GRID_LEVEL_SET) { openvdb::tools::signedFloodFill(resultGrid->tree()); } if (prune) { const float pruneTolerance = float(self->evalFloat("tolerance", 0, self->getTime())); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT tolerance(ZERO + pruneTolerance); OPENVDB_NO_TYPE_CONVERSION_WARNING_END openvdb::tools::prune(resultGrid->tree(), tolerance); } return resultGrid; } template<typename AGridT> void operator()(const AGridT&) { const bool needA = needAGrid(op), needB = needBGrid(op), needBoth = needA && needB; if (!needBoth || !aBaseGrid || !bBaseGrid || aBaseGrid->type() == bBaseGrid->type()) { this->combineSameType<AGridT>(); } else { DispatchOp<AGridT> dispatcher(*this); // Dispatch on the B grid's type. int success = bBaseGrid->apply<hvdb::VolumeGridTypes>(dispatcher); if (!success) { std::ostringstream ostr; ostr << "grid " << bGridName << " has unsupported type " << bBaseGrid->type(); self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } } }; // struct CombineOp template <> void SOP_OpenVDB_Combine::CombineOp::doUnion(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template <> void SOP_OpenVDB_Combine::CombineOp::doIntersection(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template <> void SOP_OpenVDB_Combine::CombineOp::doDifference(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template<typename AGridT> template<typename BGridT> void SOP_OpenVDB_Combine::DispatchOp<AGridT>::operator()(const BGridT&) { combineOp->combineDifferentTypes<AGridT, BGridT>(); } //////////////////////////////////////// hvdb::GridPtr SOP_OpenVDB_Combine::Cache::combineGrids( Operation op, hvdb::GridCPtr aGrid, hvdb::GridCPtr bGrid, const UT_String& aGridName, const UT_String& bGridName, ResampleMode resample) { hvdb::GridPtr outGrid; const bool needA = needAGrid(op), needB = needBGrid(op), needLS = needLevelSets(op); if (!needA && !needB) throw std::runtime_error("nothing to do"); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); if (needLS && ((aGrid && aGrid->getGridClass() != openvdb::GRID_LEVEL_SET) || (bGrid && bGrid->getGridClass() != openvdb::GRID_LEVEL_SET))) { std::ostringstream ostr; ostr << "expected level set grids for the " << sOpMenuItems[op*2+1] << " operation,\n found " << hvdb::Grid::gridClassToString(aGrid->getGridClass()) << " (" << aGridName << ") and " << hvdb::Grid::gridClassToString(bGrid->getGridClass()) << " (" << bGridName << ");\n the output grid will not be a valid level set"; addWarning(SOP_MESSAGE, ostr.str().c_str()); } if (needA && needB && aGrid->type() != bGrid->type() && op != OP_TOPO_UNION && op != OP_TOPO_INTERSECTION && op != OP_TOPO_DIFFERENCE) { std::ostringstream ostr; ostr << "can't combine grid " << aGridName << " of type " << aGrid->type() << "\n with grid " << bGridName << " of type " << bGrid->type(); addWarning(SOP_MESSAGE, ostr.str().c_str()); return outGrid; } CombineOp compOp; compOp.self = this; compOp.op = op; compOp.resample = resample; compOp.aBaseGrid = aGrid; compOp.bBaseGrid = bGrid; compOp.aGridName = aGridName; compOp.bGridName = bGridName; compOp.interrupt = hvdb::Interrupter(); int success = false; if (needA || UTvdbGetGridType(*aGrid) == UTvdbGetGridType(*bGrid)) { success = aGrid->apply<hvdb::VolumeGridTypes>(compOp); } if (!success || !compOp.outGrid) { std::ostringstream ostr; if (aGrid->type() == bGrid->type()) { ostr << "grids " << aGridName << " and " << bGridName << " have unsupported type " << aGrid->type(); } else { ostr << "grid " << (needA ? aGridName : bGridName) << " has unsupported type " << (needA ? aGrid->type() : bGrid->type()); } addWarning(SOP_MESSAGE, ostr.str().c_str()); } return compOp.outGrid; }
57,222
C++
36.085548
103
0.566758
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Remove_Divergence.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Remove_Divergence.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/ConjGradient.h> // for JacobiPreconditioner #include <openvdb/tools/GridOperators.h> #include <openvdb/tools/LevelSetUtil.h> // for tools::sdfInteriorMask() #include <openvdb/tools/PoissonSolver.h> #include <openvdb/tools/Prune.h> #include <UT/UT_Interrupt.h> #include <UT/UT_StringArray.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <GA/GA_Handle.h> #include <GA/GA_PageIterator.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <sstream> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { using ColliderMaskGrid = openvdb::BoolGrid; ///< @todo really should derive from velocity grid using ColliderBBox = openvdb::BBoxd; using Coord = openvdb::Coord; enum ColliderType { CT_NONE, CT_BBOX, CT_STATIC, CT_DYNAMIC }; const int DEFAULT_MAX_ITERATIONS = 10000; const double DEFAULT_MAX_ERROR = 1.0e-20; } //////////////////////////////////////// struct SOP_OpenVDB_Remove_Divergence: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Remove_Divergence(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Names of vector-valued VDBs to be processed") .setDocumentation( "A subset of vector-valued input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])\n\n" "VDBs with nonuniform voxels, including frustum grids, are not supported.\n" "They should be [resampled|Node:sop/DW_OpenVDBResample]" " to have a linear transform with uniform scale.")); { std::ostringstream ostr; ostr << "If disabled, limit the pressure solver to " << DEFAULT_MAX_ITERATIONS << " iterations."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useiterations", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(1000) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 2000) .setTooltip("Maximum number of iterations of the pressure solver") .setDocumentation( ("Maximum number of iterations of the pressure solver\n\n" + tooltip).c_str())); } { std::ostringstream ostr; ostr << "If disabled, limit the pressure solver error to " << std::setprecision(3) << DEFAULT_MAX_ERROR << "."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usetolerance", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); ostr.str(""); ostr << "If disabled, limit the pressure solver error to 10<sup>" << int(std::log10(DEFAULT_MAX_ERROR)) << "</sup>."; parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Tolerance") .setDefault(openvdb::math::Delta<float>::value()) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 0.01) .setTooltip( "The pressure solver is deemed to have converged when\n" "the magnitude of the error is less than this tolerance.") .setDocumentation( ("The pressure solver is deemed to have converged when" " the magnitude of the error is less than this tolerance.\n\n" + ostr.str()).c_str())); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "usecollider", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "collidertype", "Collider Type") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "bbox", "Bounding Box", "static", "Static VDB", "dynamic", "Dynamic VDB" }) .setDefault("bbox") .setTooltip( "Bounding Box:\n" " Use the bounding box of any reference geometry as the collider.\n" "Static VDB:\n" " Treat the active voxels of the named VDB volume as solid, stationary obstacles." "\nDynamic VDB:\n" " If the named VDB volume is vector-valued, treat the values of active voxels\n" " as velocities of moving obstacles; otherwise, treat the active voxels as\n" " stationary obstacles." )); parms.add(hutil::ParmFactory(PRM_STRING, "collider", "Collider") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip( "Name of the reference VDB volume whose active voxels denote solid obstacles\n\n" "If multiple volumes are selected, only the first one will be used.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invertcollider", "Invert Collider") .setDefault(PRMzeroDefaults) .setTooltip( "Invert the collider so that active voxels denote empty space\n" "and inactive voxels denote solid obstacles.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "pressure", "Output Pressure") .setDefault(PRMzeroDefaults) .setTooltip( "Output the computed pressure for each input VDB \"v\"\n" "as a scalar VDB named \"v_pressure\".")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Project Non-Divergent", SOP_OpenVDB_Remove_Divergence::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBRemoveDivergence") #endif .addInput("Velocity field VDBs") .addOptionalInput("Optional collider VDB or geometry") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Remove_Divergence::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Remove divergence from VDB velocity fields.\"\"\"\n\ \n\ @overview\n\ \n\ A vector-valued VDB volume can represent a velocity field.\n\ When particles flow through the field, they might either expand\n\ from a voxel or collapse into a voxel.\n\ These source/sink behaviors indicate divergence in the field.\n\ \n\ This node computes a new vector field that is close to the input\n\ but has no divergence.\n\ This can be used to condition velocity fields to limit particle creation,\n\ creating more realistic flows.\n\ \n\ If the optional collider volume is provided, the output velocity field\n\ will direct flow around obstacles (i.e., active voxels) in that volume.\n\ The collider itself may be a velocity field, in which case the obstacles\n\ are considered to be moving with the given velocities.\n\ \n\ Combined with the [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ node and a [Solver|Node:sop/solver] node for feedback, this node\n\ can be used to build a simple FLIP solver.\n\ \n\ @related\n\ - [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ - [Node:sop/solver]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Remove_Divergence::updateParmsFlags() { bool changed = false; const bool useCollider = evalInt("usecollider", 0, 0); changed |= enableParm("collidertype", useCollider); changed |= enableParm("invertcollider", useCollider); changed |= enableParm("collider", useCollider && (evalStdString("collidertype", 0) != "bbox")); changed |= enableParm("iterations", bool(evalInt("useiterations", 0, 0))); changed |= enableParm("tolerance", bool(evalInt("usetolerance", 0, 0))); return changed; } OP_Node* SOP_OpenVDB_Remove_Divergence::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Remove_Divergence(net, name, op); } SOP_OpenVDB_Remove_Divergence::SOP_OpenVDB_Remove_Divergence( OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { struct SolverParms { SolverParms() : invertCollider(false) , colliderType(CT_NONE) , iterations(1) , absoluteError(-1.0) , outputState(openvdb::math::pcg::terminationDefaults<double>()) , interrupter(nullptr) {} hvdb::GridPtr velocityGrid; hvdb::GridCPtr colliderGrid; hvdb::GridPtr pressureGrid; hvdb::GridCPtr domainMaskGrid; ColliderBBox colliderBBox; bool invertCollider; ColliderType colliderType; int iterations; double absoluteError; openvdb::math::pcg::State outputState; hvdb::Interrupter* interrupter; }; //////////////////////////////////////// /// @brief Functor to extract an interior mask from a level set grid /// of arbitrary floating-point type struct LevelSetMaskOp { template<typename GridType> void operator()(const GridType& grid) { outputGrid = openvdb::tools::sdfInteriorMask(grid); } hvdb::GridPtr outputGrid; }; /// @brief Functor to extract a topology mask from a grid of arbitrary type struct ColliderMaskOp { template<typename GridType> void operator()(const GridType& grid) { if (mask) { mask->topologyUnion(grid); mask->setTransform(grid.transform().copy()); } } ColliderMaskGrid::Ptr mask; }; //////////////////////////////////////// /// @brief Generic grid accessor /// @details This just wraps a const accessor to a collider grid, but /// it changes the behavior of the copy constructor for thread safety. template<typename GridType> class GridConstAccessor { public: using ValueType = typename GridType::ValueType; explicit GridConstAccessor(const SolverParms& parms): mAcc(static_cast<const GridType&>(*parms.colliderGrid).getConstAccessor()) {} explicit GridConstAccessor(const GridType& grid): mAcc(grid.getConstAccessor()) {} // When copying, create a new, empty accessor, to avoid a data race // with the existing accessor, which might be updating on another thread. GridConstAccessor(const GridConstAccessor& other): mAcc(other.mAcc.tree()) {} bool isValueOn(const Coord& ijk) const { return mAcc.isValueOn(ijk); } const ValueType& getValue(const Coord& ijk) const { return mAcc.getValue(ijk); } bool probeValue(const Coord& ijk, ValueType& val) const { return mAcc.probeValue(ijk, val); } private: GridConstAccessor& operator=(const GridConstAccessor&); typename GridType::ConstAccessor mAcc; }; // class GridConstAccessor using ColliderMaskAccessor = GridConstAccessor<ColliderMaskGrid>; /// @brief Bounding box accessor class BBoxConstAccessor { public: using ValueType = double; explicit BBoxConstAccessor(const SolverParms& parms): mBBox(parms.velocityGrid->transform().worldToIndexNodeCentered(parms.colliderBBox)) {} BBoxConstAccessor(const BBoxConstAccessor& other): mBBox(other.mBBox) {} // Voxels outside the bounding box are solid, i.e., active. bool isValueOn(const Coord& ijk) const { return !mBBox.isInside(ijk); } ValueType getValue(const Coord&) const { return ValueType(0); } bool probeValue(const Coord& ijk, ValueType& v) const { v=ValueType(0); return isValueOn(ijk); } private: BBoxConstAccessor& operator=(const BBoxConstAccessor&); const openvdb::CoordBBox mBBox; }; // class BBoxConstAccessor //////////////////////////////////////// /// @brief Functor to compute pressure projection in parallel over leaf nodes template<typename TreeType> struct PressureProjectionOp { using LeafNodeType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; PressureProjectionOp(SolverParms& parms, LeafNodeType** velNodes, const LeafNodeType** gradPressureNodes, bool staggered) : mVelocityNodes(velNodes) , mGradientOfPressureNodes(gradPressureNodes) , mVoxelSize(parms.velocityGrid->transform().voxelSize()[0]) , mStaggered(staggered) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ElementType = typename ValueType::value_type; // Account for voxel size here, instead of in the Poisson solve. const ElementType scale = ElementType((mStaggered ? 1.0 : 4.0) * mVoxelSize * mVoxelSize); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { LeafNodeType& velocityNode = *mVelocityNodes[n]; ValueType* velocityData = velocityNode.buffer().data(); const ValueType* gradientOfPressureData = mGradientOfPressureNodes[n]->buffer().data(); for (typename LeafNodeType::ValueOnIter it = velocityNode.beginValueOn(); it; ++it) { const openvdb::Index pos = it.pos(); velocityData[pos] -= scale * gradientOfPressureData[pos]; } } } LeafNodeType* const * const mVelocityNodes; LeafNodeType const * const * const mGradientOfPressureNodes; const double mVoxelSize; const bool mStaggered; }; // class PressureProjectionOp //////////////////////////////////////// /// @brief Functor for use with Tree::modifyValue() to set a single element /// of a vector-valued voxel template<typename VectorType> struct SetVecElemOp { using ValueType = typename VectorType::ValueType; SetVecElemOp(int axis_, ValueType value_): axis(axis_), value(value_) {} void operator()(VectorType& v) const { v[axis] = value; } const int axis; const ValueType value; }; /// @brief Functor to correct the velocities of voxels adjacent to solid obstacles template<typename VelocityGridType> class CorrectCollisionVelocityOp { public: using VectorType = typename VelocityGridType::ValueType; using VectorElementType = typename VectorType::ValueType; using MaskGridType = typename VelocityGridType::template ValueConverter<bool>::Type; using MaskTreeType = typename MaskGridType::TreeType; explicit CorrectCollisionVelocityOp(SolverParms& parms): mParms(&parms) { const MaskGridType& domainMaskGrid = static_cast<const MaskGridType&>(*mParms->domainMaskGrid); typename MaskTreeType::Ptr interiorMask( new MaskTreeType(domainMaskGrid.tree(), /*background=*/false, openvdb::TopologyCopy())); mBorderMask.reset(new MaskTreeType(*interiorMask)); openvdb::tools::erodeVoxels(*interiorMask, /*iterations=*/1, openvdb::tools::NN_FACE); mBorderMask->topologyDifference(*interiorMask); } template<typename ColliderGridType> void operator()(const ColliderGridType&) { GridConstAccessor<ColliderGridType> collider( static_cast<const ColliderGridType&>(*mParms->colliderGrid)); correctVelocity(collider); } template<typename ColliderAccessorType> void correctVelocity(const ColliderAccessorType& collider) { using ColliderValueType = typename ColliderAccessorType::ValueType; VelocityGridType& velocityGrid = static_cast<VelocityGridType&>(*mParms->velocityGrid); typename VelocityGridType::Accessor velocity = velocityGrid.getAccessor(); const bool invert = mParms->invertCollider; switch (mParms->colliderType) { case CT_NONE: break; case CT_BBOX: case CT_STATIC: // For each border voxel of the velocity grid... /// @todo parallelize for (typename MaskTreeType::ValueOnCIter it = mBorderMask->cbeginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); // If the neighbor in a certain direction is a stationary obstacle, // set the border voxel's velocity in that direction to zero. // x direction if ((collider.isValueOn(ijk.offsetBy(-1, 0, 0)) != invert) || (collider.isValueOn(ijk.offsetBy(1, 0, 0)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(0, 0)); } // y direction if ((collider.isValueOn(ijk.offsetBy(0, -1, 0)) != invert) || (collider.isValueOn(ijk.offsetBy(0, 1, 0)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(1, 0)); } // z direction if ((collider.isValueOn(ijk.offsetBy(0, 0, -1)) != invert) || (collider.isValueOn(ijk.offsetBy(0, 0, 1)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(2, 0)); } } break; case CT_DYNAMIC: // For each border voxel of the velocity grid... /// @todo parallelize for (typename MaskTreeType::ValueOnCIter it = mBorderMask->cbeginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); ColliderValueType colliderVal; // If the neighbor in a certain direction is a moving obstacle, // set the border voxel's velocity in that direction to the // obstacle's velocity in that direction. for (int axis = 0; axis <= 2; ++axis) { // 0:x, 1:y, 2:z Coord neighbor = ijk; neighbor[axis] -= 1; if (collider.probeValue(neighbor, colliderVal) != invert) { // Copy or create a Vec3 from the collider value and extract one of // its components. // (Since the collider is dynamic, ColliderValueType must be a Vec3 type, // but this code has to compile for all ColliderGridTypes.) VectorElementType colliderVelocity = VectorType(colliderVal)[axis]; velocity.modifyValue(ijk, SetVecElemOp<VectorType>(axis, colliderVelocity)); } else { neighbor = ijk; neighbor[axis] += 1; if (collider.probeValue(neighbor, colliderVal) != invert) { VectorElementType colliderVelocity = VectorType(colliderVal)[axis]; velocity.modifyValue(ijk, SetVecElemOp<VectorType>(axis, colliderVelocity)); } } } } break; } // switch (mParms->colliderType) } private: SolverParms* mParms; typename MaskTreeType::Ptr mBorderMask; }; // class CorrectCollisionVelocityOp //////////////////////////////////////// //{ // Boundary condition functors /// @brief Functor specifying boundary conditions for the Poisson solver /// when exterior voxels may be either solid (and possibly in motion) or empty template<typename VelocityGridType, typename ColliderAccessorType> class ColliderBoundaryOp { public: using VectorType = typename VelocityGridType::ValueType; explicit ColliderBoundaryOp(const SolverParms& parms) : mVelocity(static_cast<VelocityGridType&>(*parms.velocityGrid).getConstAccessor()) , mCollider(parms) , mInvert(parms.invertCollider) , mDynamic(parms.colliderType == CT_DYNAMIC) , mInvVoxelSize(0.5 / (parms.velocityGrid->voxelSize()[0])) // assumes uniform voxels {} ColliderBoundaryOp(const ColliderBoundaryOp& other) // Give this op new, empty accessors, to avoid data races with // the other op's accessors, which might be updating on another thread. : mVelocity(other.mVelocity.tree()) , mCollider(other.mCollider) , mInvert(other.mInvert) , mDynamic(other.mDynamic) , mInvVoxelSize(other.mInvVoxelSize) {} void operator()(const Coord& ijk, const Coord& ijkNeighbor, double& rhs, double& diag) const { // Voxels outside both the velocity field and the collider // are considered to be empty (unless the collider is inverted). // Voxels outside the velocity field and inside the collider // are considered to be solid. if (mCollider.isValueOn(ijkNeighbor) == mInvert) { // The exterior neighbor is empty (i.e., zero), so just adjust the center weight. diag -= 1; } else { const VectorType& velocity = mVelocity.getValue(ijkNeighbor); double delta = 0.0; if (mDynamic) { // exterior neighbor is a solid obstacle with nonzero velocity const openvdb::Vec3d colliderVelocity(mCollider.getValue(ijkNeighbor)); if (ijkNeighbor[0] < ijk[0]) { delta += velocity[0] - colliderVelocity[0]; } if (ijkNeighbor[0] > ijk[0]) { delta -= (velocity[0] - colliderVelocity[0]); } if (ijkNeighbor[1] < ijk[1]) { delta += velocity[1] - colliderVelocity[1]; } if (ijkNeighbor[1] > ijk[1]) { delta -= (velocity[1] - colliderVelocity[1]); } if (ijkNeighbor[2] < ijk[2]) { delta += velocity[2] - colliderVelocity[2]; } if (ijkNeighbor[2] > ijk[2]) { delta -= (velocity[2] - colliderVelocity[2]); } } else { // exterior neighbor is a stationary solid obstacle if (ijkNeighbor[0] < ijk[0]) { delta += velocity[0]; } if (ijkNeighbor[0] > ijk[0]) { delta -= velocity[0]; } if (ijkNeighbor[1] < ijk[1]) { delta += velocity[1]; } if (ijkNeighbor[1] > ijk[1]) { delta -= velocity[1]; } if (ijkNeighbor[2] < ijk[2]) { delta += velocity[2]; } if (ijkNeighbor[2] > ijk[2]) { delta -= velocity[2]; } } rhs += delta * mInvVoxelSize; // Note: no adjustment to the center weight (diag). } } private: // Disable assignment (due to const members). ColliderBoundaryOp& operator=(const ColliderBoundaryOp&); typename VelocityGridType::ConstAccessor mVelocity; // accessor to the velocity grid ColliderAccessorType mCollider; // accessor to the collider const bool mInvert; // invert the collider? const bool mDynamic; // is the collider moving? const double mInvVoxelSize; }; // class ColliderBoundaryOp //} //////////////////////////////////////// /// @brief Main solver routine template<typename VectorGridType, typename ColliderGridType, typename BoundaryOpType> inline bool removeDivergenceWithColliderGrid(SolverParms& parms, const BoundaryOpType& boundaryOp) { using VectorTreeType = typename VectorGridType::TreeType; using VectorLeafNodeType = typename VectorTreeType::LeafNodeType; using VectorType = typename VectorGridType::ValueType; using VectorElementType = typename VectorType::ValueType; using ScalarGrid = typename VectorGridType::template ValueConverter<VectorElementType>::Type; using ScalarTree = typename ScalarGrid::TreeType; using MaskGridType = typename VectorGridType::template ValueConverter<bool>::Type; VectorGridType& velocityGrid = static_cast<VectorGridType&>(*parms.velocityGrid); const bool staggered = ((velocityGrid.getGridClass() == openvdb::GRID_STAGGERED) && (openvdb::VecTraits<VectorType>::Size == 3)); // Compute the divergence of the incoming velocity field. /// @todo Consider neighboring collider velocities at border voxels? openvdb::tools::Divergence<VectorGridType> divergenceOp(velocityGrid); typename ScalarGrid::ConstPtr divGrid = divergenceOp.process(); parms.outputState = openvdb::math::pcg::terminationDefaults<VectorElementType>(); parms.outputState.iterations = parms.iterations; parms.outputState.absoluteError = (parms.absoluteError >= 0.0 ? parms.absoluteError : DEFAULT_MAX_ERROR); parms.outputState.relativeError = 0.0; using PCT = openvdb::math::pcg::JacobiPreconditioner<openvdb::tools::poisson::LaplacianMatrix>; // Solve for pressure using Poisson's equation. typename ScalarTree::Ptr pressure; if (parms.colliderType == CT_NONE) { pressure = openvdb::tools::poisson::solveWithBoundaryConditionsAndPreconditioner<PCT>( divGrid->tree(), boundaryOp, parms.outputState, *parms.interrupter, staggered); } else { // Create a domain mask by clipping the velocity grid's topology against the collider's. // Pressure will be computed only where the domain mask is active. MaskGridType* domainMaskGrid = new MaskGridType(*divGrid); // match input grid's topology parms.domainMaskGrid.reset(domainMaskGrid); if (parms.colliderType == CT_BBOX) { if (parms.invertCollider) { // Solve for pressure only outside the bounding box. const openvdb::CoordBBox colliderISBBox = velocityGrid.transform().worldToIndexNodeCentered(parms.colliderBBox); domainMaskGrid->fill(colliderISBBox, false, false); } else { // Solve for pressure only inside the bounding box. domainMaskGrid->clipGrid(parms.colliderBBox); } } else { const ColliderGridType& colliderGrid = static_cast<const ColliderGridType&>(*parms.colliderGrid); if (parms.invertCollider) { // Solve for pressure only inside the collider. domainMaskGrid->topologyIntersection(colliderGrid); } else { // Solve for pressure only outside the collider. domainMaskGrid->topologyDifference(colliderGrid); } } pressure = openvdb::tools::poisson::solveWithBoundaryConditionsAndPreconditioner<PCT>( divGrid->tree(), domainMaskGrid->tree(), boundaryOp, parms.outputState, *parms.interrupter, staggered); } // Store the computed pressure grid. parms.pressureGrid = ScalarGrid::create(pressure); parms.pressureGrid->setTransform(velocityGrid.transform().copy()); { std::string name = parms.velocityGrid->getName(); if (!name.empty()) name += "_"; name += "pressure"; parms.pressureGrid->setName(name); } // Compute the gradient of the pressure. openvdb::tools::Gradient<ScalarGrid> gradientOp(static_cast<ScalarGrid&>(*parms.pressureGrid)); typename VectorGridType::Ptr gradientOfPressure = gradientOp.process(); // Compute pressure projection in parallel over leaf nodes. { // Pressure (and therefore the gradient of the pressure) is computed only where // the domain mask is active, but the gradient and velocity grid topologies must match // so that pressure projection can be computed in parallel over leaf nodes (see below). velocityGrid.tree().voxelizeActiveTiles(); gradientOfPressure->topologyUnion(velocityGrid); gradientOfPressure->topologyIntersection(velocityGrid); openvdb::tools::pruneInactive(gradientOfPressure->tree()); std::vector<VectorLeafNodeType*> velNodes; velocityGrid.tree().getNodes(velNodes); std::vector<const VectorLeafNodeType*> gradNodes; gradNodes.reserve(velNodes.size()); gradientOfPressure->tree().getNodes(gradNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, velNodes.size()), PressureProjectionOp<VectorTreeType>(parms, &velNodes[0], &gradNodes[0], staggered)); } if (parms.colliderType != CT_NONE) { // When obstacles are present, the Poisson solve returns a divergence-free // velocity field in the interior of the input grid, but border voxels // need to be adjusted manually to match neighboring collider velocities. CorrectCollisionVelocityOp<VectorGridType> op(parms); if (parms.colliderType == CT_BBOX) { op.correctVelocity(BBoxConstAccessor(parms)); } else { parms.colliderGrid->apply<hvdb::VolumeGridTypes>(op); } } return parms.outputState.success; } /// @brief Main solver routine in the case of no collider or a bounding box collider template<typename VectorGridType, typename BoundaryOpType> inline bool removeDivergence(SolverParms& parms, const BoundaryOpType& boundaryOp) { return removeDivergenceWithColliderGrid<VectorGridType, VectorGridType>(parms, boundaryOp); } /// @brief Functor to invoke the solver with a collider velocity grid of arbitrary vector type template<typename VelocityGridType> struct ColliderDispatchOp { SolverParms* parms; bool success; explicit ColliderDispatchOp(SolverParms& parms_): parms(&parms_) , success(false) {} template<typename ColliderGridType> void operator()(const ColliderGridType&) { using ColliderAccessorType = GridConstAccessor<ColliderGridType>; ColliderBoundaryOp<VelocityGridType, ColliderAccessorType> boundaryOp(*parms); success = removeDivergenceWithColliderGrid<VelocityGridType, ColliderGridType>( *parms, boundaryOp); } }; // struct ColliderDispatchOp /// @brief Invoke the solver for collider inputs of various types (or no collider). template<typename VelocityGridType> inline bool processGrid(SolverParms& parms) { bool success = false; switch (parms.colliderType) { case CT_NONE: // No collider success = removeDivergence<VelocityGridType>( parms, openvdb::tools::poisson::DirichletBoundaryOp<double>()); break; case CT_BBOX: // If collider geometry was supplied, the faces of its bounding box // define solid obstacles. success = removeDivergence<VelocityGridType>(parms, ColliderBoundaryOp<VelocityGridType, BBoxConstAccessor>(parms)); break; case CT_STATIC: // If a static collider grid was supplied, its active voxels define solid obstacles. success = removeDivergenceWithColliderGrid<VelocityGridType, ColliderMaskGrid>( parms, ColliderBoundaryOp<VelocityGridType, ColliderMaskAccessor>(parms)); break; case CT_DYNAMIC: { // If a dynamic collider grid was supplied, its active values define // the velocities of solid obstacles. ColliderDispatchOp<VelocityGridType> op(parms); success = parms.colliderGrid->apply<hvdb::Vec3GridTypes>(op); if (success) success = op.success; break; } } return success; } /// @brief Return the given VDB primitive's name in the form "N (NAME)", /// where N is the primitive's index and NAME is the grid name. /// @todo Use the VdbPrimCIterator method once it is adopted into the HDK. inline UT_String getPrimitiveIndexAndName(const GU_PrimVDB* prim) { UT_String result(UT_String::ALWAYS_DEEP); if (prim != nullptr) { result.itoa(prim->getMapIndex()); UT_String name = prim->getGridName(); result += (" (" + name.toStdString() + ")").c_str(); } return result; } inline std::string joinNames(UT_StringArray& names, const char* lastSep = " and ", const char* sep = ", ") { names.sort(); UT_String joined; names.join(sep, lastSep, joined); return "VDB" + (((names.size() == 1) ? " " : "s ") + joined.toStdString()); } } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Remove_Divergence::Cache::cookVDBSop( OP_Context& context) { try { const GU_Detail* colliderGeo = inputGeo(1); const fpreal time = context.getTime(); hvdb::Interrupter interrupter("Removing divergence"); SolverParms parms; parms.interrupter = &interrupter; parms.iterations = (!evalInt("useiterations", 0, time) ? DEFAULT_MAX_ITERATIONS : static_cast<int>(evalInt("iterations", 0, time))); parms.absoluteError = (!evalInt("usetolerance", 0, time) ? -1.0 : evalFloat("tolerance", 0, time)); parms.invertCollider = evalInt("invertcollider", 0, time); UT_String groupStr; evalString(groupStr, "group", 0, time); const GA_PrimitiveGroup* group = matchGroup(*gdp, groupStr.toStdString()); const bool outputPressure = evalInt("pressure", 0, time); const bool useCollider = evalInt("usecollider", 0, time); const auto colliderTypeStr = evalStdString("collidertype", time); UT_StringArray xformMismatchGridNames, nonuniformGridNames; // Retrieve either a collider grid or a collider bounding box // (or neither) from the reference input. if (useCollider && colliderGeo) { if (colliderTypeStr == "bbox") { // Use the bounding box of the reference geometry as a collider. UT_BoundingBox box; colliderGeo->getBBox(&box); parms.colliderBBox.min() = openvdb::Vec3d(box.xmin(), box.ymin(), box.zmin()); parms.colliderBBox.max() = openvdb::Vec3d(box.xmax(), box.ymax(), box.zmax()); parms.colliderType = CT_BBOX; } else { // Retrieve the collider grid. UT_String colliderStr; evalString(colliderStr, "collider", 0, time); const GA_PrimitiveGroup* colliderGroup = parsePrimitiveGroups( colliderStr.buffer(), GroupCreator(colliderGeo)); if (hvdb::VdbPrimCIterator colliderIt = hvdb::VdbPrimCIterator(colliderGeo, colliderGroup)) { if (colliderIt->getConstGrid().getGridClass() == openvdb::GRID_LEVEL_SET) { // If the collider grid is a level set, extract an interior mask from it. LevelSetMaskOp op; if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(**colliderIt, op)) { parms.colliderGrid = op.outputGrid; } } if (!parms.colliderGrid) { parms.colliderGrid = colliderIt->getConstGridPtr(); } if (parms.colliderGrid && !parms.colliderGrid->constTransform().hasUniformScale()) { nonuniformGridNames.append(getPrimitiveIndexAndName(*colliderIt)); } if (++colliderIt) { addWarning(SOP_MESSAGE, ("found multiple collider VDBs; using VDB " + getPrimitiveIndexAndName(*colliderIt).toStdString()).c_str()); } } if (!parms.colliderGrid) { if (colliderStr.isstring()) { addError(SOP_MESSAGE, ("collider \"" + colliderStr.toStdString() + "\" not found").c_str()); } else { addError(SOP_MESSAGE, "collider VDB not found"); } return error(); } if (parms.colliderGrid->empty()) { // An empty collider grid was found; ignore it. parms.colliderGrid.reset(); } if (parms.colliderGrid) { const bool isVec3Grid = (3 == UTvdbGetGridTupleSize(UTvdbGetGridType(*parms.colliderGrid))); if (isVec3Grid && (colliderTypeStr == "dynamic")) { // The collider grid is vector-valued. Its active values // are the velocities of moving obstacles. parms.colliderType = CT_DYNAMIC; } else { // The active voxels of the collider grid define stationary, // solid obstacles. Extract a topology mask of those voxels. parms.colliderType = CT_STATIC; ColliderMaskOp op; op.mask = ColliderMaskGrid::create(); parms.colliderGrid->apply<hvdb::AllGridTypes>(op); parms.colliderGrid = op.mask; } } } } int numGridsProcessed = 0; std::ostringstream infoStrm; // Main loop for (hvdb::VdbPrimIterator vdbIt(gdp, group); vdbIt; ++vdbIt) { if (interrupter.wasInterrupted()) break; const UT_VDBType velocityType = vdbIt->getStorageType(); if (velocityType == UT_VDB_VEC3F || velocityType == UT_VDB_VEC3D) { // Found a vector-valued input grid. ++numGridsProcessed; vdbIt->makeGridUnique(); // ensure that the grid's tree is not shared parms.velocityGrid = vdbIt->getGridPtr(); const openvdb::math::Transform& xform = parms.velocityGrid->constTransform(); if (!xform.hasUniformScale()) { nonuniformGridNames.append(getPrimitiveIndexAndName(*vdbIt)); } if (parms.colliderGrid && (parms.colliderGrid->constTransform() != xform)) { // The velocity and collider grid transforms need to match. xformMismatchGridNames.append(getPrimitiveIndexAndName(*vdbIt)); } // Remove divergence. bool success = false; if (velocityType == UT_VDB_VEC3F) { success = processGrid<openvdb::Vec3SGrid>(parms); } else if (velocityType == UT_VDB_VEC3D) { success = processGrid<openvdb::Vec3DGrid>(parms); } if (!success) { std::ostringstream errStrm; errStrm << "solver failed to converge for VDB " << getPrimitiveIndexAndName(*vdbIt).c_str() << " with error " << parms.outputState.absoluteError; addWarning(SOP_MESSAGE, errStrm.str().c_str()); } else { if (outputPressure && parms.pressureGrid) { hvdb::createVdbPrimitive(*gdp, parms.pressureGrid); } if (numGridsProcessed > 1) infoStrm << "\n"; infoStrm << "solver converged for VDB " << getPrimitiveIndexAndName(*vdbIt).c_str() << " in " << parms.outputState.iterations << " iteration" << (parms.outputState.iterations == 1 ? "" : "s") << " with error " << parms.outputState.absoluteError; } } parms.velocityGrid.reset(); } if (!interrupter.wasInterrupted()) { // Report various issues. if (numGridsProcessed == 0) { addWarning(SOP_MESSAGE, "found no floating-point vector VDBs"); } else { if (nonuniformGridNames.size() > 0) { const std::string names = joinNames(nonuniformGridNames); addWarning(SOP_MESSAGE, ((names + ((nonuniformGridNames.size() == 1) ? " has" : " have")) + " nonuniform voxels and should be resampled").c_str()); } if (xformMismatchGridNames.size() > 0) { const std::string names = joinNames(xformMismatchGridNames, " or "); addWarning(SOP_MESSAGE, ("vector field and collider transforms don't match for " + names).c_str()); } const std::string info = infoStrm.str(); if (!info.empty()) { addMessage(SOP_MESSAGE, info.c_str()); } } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
40,862
C++
38.905273
100
0.616661
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/UT_VDBUtils.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: UT_VDBUtils.h (UT Library, C++) * * COMMENTS: */ #include "UT_VDBUtils.h" namespace openvdb_houdini { // empty }
427
C++
16.833333
48
0.622951
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Remap.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Remap.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Math.h> // Tolerance and isApproxEqual #include <openvdb/tools/ValueTransformer.h> #include <UT/UT_Ramp.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <algorithm> #include <cmath> #include <map> #include <string> #include <sstream> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Local Utility Methods namespace { template<typename T> inline T minValue(const T a, const T b) { return std::min(a, b); } template<typename T> inline T maxValue(const T a, const T b) { return std::max(a, b); } template<typename T> inline openvdb::math::Vec3<T> minValue(const openvdb::math::Vec3<T>& a, const openvdb::math::Vec3<T>& b) { return openvdb::math::minComponent(a, b); } template<typename T> inline openvdb::math::Vec3<T> maxValue(const openvdb::math::Vec3<T>& a, const openvdb::math::Vec3<T>& b) { return openvdb::math::maxComponent(a, b); } template<typename T> inline T minComponent(const T s) { return s; } template<typename T> inline T maxComponent(const T s) { return s; } template<typename T> inline T minComponent(const openvdb::math::Vec3<T>& v) { return minValue(v[0], minValue(v[1], v[2])); } template<typename T> inline T maxComponent(const openvdb::math::Vec3<T>& v) { return maxValue(v[0], maxValue(v[1], v[2])); } //////////////////////////////////////// template<typename NodeType> struct NodeMinMax { using ValueType = typename NodeType::ValueType; NodeMinMax(const std::vector<const NodeType*>& nodes, ValueType background) : mNodes(&nodes[0]), mBackground(background), mMin(background), mMax(background) {} NodeMinMax(NodeMinMax& other, tbb::split) : mNodes(other.mNodes), mBackground(other.mBackground), mMin(mBackground), mMax(mBackground) {} void join(NodeMinMax& other) { mMin = minValue(other.mMin, mMin); mMax = maxValue(other.mMax, mMax); } void operator()(const tbb::blocked_range<size_t>& range) { ValueType minTmp(mMin), maxTmp(mMax); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const NodeType& node = *mNodes[n]; for (typename NodeType::ValueAllCIter it = node.cbeginValueAll(); it; ++it) { if (node.isChildMaskOff(it.pos())) { const ValueType val = *it; minTmp = minValue(minTmp, val); maxTmp = maxValue(maxTmp, val); } } } mMin = minValue(minTmp, mMin); mMax = maxValue(maxTmp, mMax); } NodeType const * const * const mNodes; ValueType mBackground, mMin, mMax; }; template<typename NodeType> struct Deactivate { using ValueType = typename NodeType::ValueType; Deactivate(std::vector<NodeType*>& nodes, ValueType background) : mNodes(&nodes[0]), mBackground(background) {} void operator()(const tbb::blocked_range<size_t>& range) const { const ValueType background(mBackground), delta = openvdb::math::Tolerance<ValueType>::value(); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { for (typename NodeType::ValueOnIter it = mNodes[n]->beginValueOn(); it; ++it) { if (openvdb::math::isApproxEqual(background, *it, delta)) { it.setValueOff(); } } } } NodeType * const * const mNodes; ValueType mBackground; }; template<typename TreeType> void evalMinMax(const TreeType& tree, typename TreeType::ValueType& minVal, typename TreeType::ValueType& maxVal) { minVal = tree.background(); maxVal = tree.background(); { // eval voxels using LeafNodeType = typename TreeType::LeafNodeType; std::vector<const LeafNodeType*> nodes; tree.getNodes(nodes); NodeMinMax<LeafNodeType> op(nodes, tree.background()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); minVal = minValue(minVal, op.mMin); maxVal = maxValue(maxVal, op.mMax); } { // eval first tiles using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; std::vector<const InternalNodeType*> nodes; tree.getNodes(nodes); NodeMinMax<InternalNodeType> op(nodes, tree.background()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); minVal = minValue(minVal, op.mMin); maxVal = maxValue(maxVal, op.mMax); } { // eval remaining tiles typename TreeType::ValueType minTmp(minVal), maxTmp(maxVal); typename TreeType::ValueAllCIter it(tree); it.setMaxDepth(TreeType::ValueAllCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { const typename TreeType::ValueType val = *it; minTmp = minValue(minTmp, val); maxTmp = maxValue(maxTmp, val); } minVal = minValue(minVal, minTmp); maxVal = maxValue(maxVal, maxTmp); } } template<typename TreeType> void deactivateBackgroundValues(TreeType& tree) { { // eval voxels using LeafNodeType = typename TreeType::LeafNodeType; std::vector<LeafNodeType*> nodes; tree.getNodes(nodes); Deactivate<LeafNodeType> op(nodes, tree.background()); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), op); } { // eval first tiles using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; std::vector<InternalNodeType*> nodes; tree.getNodes(nodes); Deactivate<InternalNodeType> op(nodes, tree.background()); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), op); } { // eval remaining tiles using ValueType = typename TreeType::ValueType; const ValueType background(tree.background()), delta = openvdb::math::Tolerance<ValueType>::value(); typename TreeType::ValueOnIter it(tree); it.setMaxDepth(TreeType::ValueAllCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (openvdb::math::isApproxEqual(background, *it, delta)) { it.setValueOff(); } } } } //////////////////////////////////////// struct RemapGridValues { enum Extrapolation { CLAMP, PRESERVE, EXTRAPOLATE }; RemapGridValues(Extrapolation belowExt, Extrapolation aboveExt, UT_Ramp& ramp, const fpreal inMin, const fpreal inMax, const fpreal outMin, const fpreal outMax, bool deactivate, UT_ErrorManager* errorManager = nullptr) : mBelowExtrapolation(belowExt) , mAboveExtrapolation(aboveExt) , mRamp(&ramp) , mErrorManager(errorManager) , mPrimitiveIndex(0) , mPrimitiveName() , mInfo("Remapped grids: (first range shows actual min/max values)\n") , mInMin(inMin) , mInMax(inMax) , mOutMin(outMin) , mOutMax(outMax) , mDeactivate(deactivate) { mRamp->ensureRampIsBuilt(); } ~RemapGridValues() { if (mErrorManager) { mErrorManager->addMessage(SOP_OPTYPE_NAME, SOP_MESSAGE, mInfo.c_str()); } } void setPrimitiveIndex(int i) { mPrimitiveIndex = i; } void setPrimitiveName(const std::string& name) { mPrimitiveName = name; } template<typename GridType> void operator()(GridType& grid) { using ValueType = typename GridType::ValueType; using LeafNodeType = typename GridType::TreeType::LeafNodeType; std::vector<LeafNodeType*> leafnodes; grid.tree().getNodes(leafnodes); ValueType inputMin, inputMax; evalMinMax(grid.tree(), inputMin, inputMax); ValueTransform<GridType> op(*mRamp, leafnodes, mBelowExtrapolation, mAboveExtrapolation, mInMin, mInMax, mOutMin, mOutMax); // update voxels tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), op); // update tiles typename GridType::ValueAllIter it = grid.beginValueAll(); it.setMaxDepth(GridType::ValueAllIter::LEAF_DEPTH - 1); openvdb::tools::foreach(it, op, true); // update background value grid.tree().root().setBackground(op.map(grid.background()), /*updateChildNodes=*/false); grid.setGridClass(openvdb::GRID_UNKNOWN); ValueType outputMin, outputMax; evalMinMax(grid.tree(), outputMin, outputMax); size_t activeVoxelDelta = size_t(grid.tree().activeVoxelCount()); if (mDeactivate) { deactivateBackgroundValues(grid.tree()); activeVoxelDelta -= size_t(grid.tree().activeVoxelCount()); } { // log std::stringstream msg; msg << " (" << mPrimitiveIndex << ") '" << mPrimitiveName << "'" << " [" << minComponent(inputMin) << ", " << maxComponent(inputMax) << "]" << " -> [" << minComponent(outputMin) << ", " << maxComponent(outputMax) << "]"; if (mDeactivate && activeVoxelDelta > 0) { msg << ", deactivated " << activeVoxelDelta << " voxels."; } msg << "\n"; mInfo += msg.str(); } } private: template<typename GridType> struct ValueTransform { using LeafNodeType = typename GridType::TreeType::LeafNodeType; ValueTransform(const UT_Ramp& utramp, std::vector<LeafNodeType*>& leafnodes, Extrapolation belowExt, Extrapolation aboveExt, const fpreal inMin, const fpreal inMax, const fpreal outMin, const fpreal outMax) : ramp(&utramp) , nodes(&leafnodes[0]) , belowExtrapolation(belowExt) , aboveExtrapolation(aboveExt) , xMin(inMin) , xScale((inMax - inMin)) , yMin(outMin) , yScale((outMax - outMin)) { xScale = std::abs(xScale) > fpreal(0.0) ? fpreal(1.0) / xScale : fpreal(0.0); } inline void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { typename GridType::ValueType * data = nodes[n]->buffer().data(); for (size_t i = 0, I = LeafNodeType::SIZE; i < I; ++i) { data[i] = map(data[i]); } } } inline void operator()(const typename GridType::ValueAllIter& it) const { it.setValue(map(*it)); } template<typename T> inline T map(const T s) const { fpreal pos = (fpreal(s) - xMin) * xScale; if (pos < fpreal(0.0)) { // below (normalized) minimum if (belowExtrapolation == PRESERVE) return s; if (belowExtrapolation == EXTRAPOLATE) return T((pos * xScale) * yScale); pos = std::max(pos, fpreal(0.0)); // clamp } if (pos > fpreal(1.0)) { // above (normalized) maximum if (aboveExtrapolation == PRESERVE) return s; if (aboveExtrapolation == EXTRAPOLATE) return T((pos * xScale) * yScale); pos = std::min(pos, fpreal(1.0)); //clamp } float values[4] = { 0.0f }; ramp->rampLookup(pos, values); return T(yMin + (values[0] * yScale)); } template<typename T> inline openvdb::math::Vec3<T> map(const openvdb::math::Vec3<T>& v) const { openvdb::math::Vec3<T> out; out[0] = map(v[0]); out[1] = map(v[1]); out[2] = map(v[2]); return out; } UT_Ramp const * const ramp; LeafNodeType * const * const nodes; const Extrapolation belowExtrapolation, aboveExtrapolation; fpreal xMin, xScale, yMin, yScale; }; // struct ValueTransform ////////// Extrapolation mBelowExtrapolation, mAboveExtrapolation; UT_Ramp * const mRamp; UT_ErrorManager * const mErrorManager; int mPrimitiveIndex; std::string mPrimitiveName, mInfo; const fpreal mInMin, mInMax, mOutMin, mOutMax; const bool mDeactivate; }; // struct RemapGridValues } // unnamed namespace //////////////////////////////////////// // SOP Implementation struct SOP_OpenVDB_Remap: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Remap(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int sortInputRange(); int sortOutputRange(); class Cache: public SOP_VDBCacheOptions { public: void evalRamp(UT_Ramp&, fpreal time); protected: OP_ERROR cookVDBSop(OP_Context&) override; }; // class Cache }; int inputRangeCB(void*, int, float, const PRM_Template*); int outputRangeCB(void*, int, float, const PRM_Template*); int inputRangeCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Remap* sop = static_cast<SOP_OpenVDB_Remap*>(data); if (sop == nullptr) return 0; return sop->sortInputRange(); } int outputRangeCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Remap* sop = static_cast<SOP_OpenVDB_Remap*>(data); if (sop == nullptr) return 0; return sop->sortOutputRange(); } int SOP_OpenVDB_Remap::sortInputRange() { const fpreal inMin = evalFloat("inrange", 0, 0); const fpreal inMax = evalFloat("inrange", 1, 0); if (inMin > inMax) { setFloat("inrange", 0, 0, inMax); setFloat("inrange", 1, 0, inMin); } return 1; } int SOP_OpenVDB_Remap::sortOutputRange() { const fpreal outMin = evalFloat("outrange", 0, 0); const fpreal outMax = evalFloat("outrange", 1, 0); if (outMin > outMax) { setFloat("outrange", 0, 0, outMax); setFloat("outrange", 1, 0, outMin); } return 1; } void SOP_OpenVDB_Remap::Cache::evalRamp(UT_Ramp& ramp, fpreal time) { const auto rampStr = evalStdString("function", time); UT_IStream strm(rampStr.c_str(), rampStr.size(), UT_ISTREAM_ASCII); ramp.load(strm); } void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenu) .setTooltip("Specify a subset of the input grids.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); { // Extrapolation char const * const items[] = { "clamp", "Clamp", "preserve", "Preserve", "extrapolate", "Extrapolate", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "below", "Below Minimum") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip( "Specify how to handle input values below the input range minimum:" " either by clamping to the output minimum (Clamp)," " leaving out-of-range values intact (Preserve)," " or extrapolating linearly from the output minimum (Extrapolate).") .setDocumentation( "How to handle input values below the input range minimum\n\n" "Clamp:\n" " Clamp values to the output minimum.\n" "Preserve:\n" " Leave out-of-range values intact.\n" "Extrapolate:\n" " Extrapolate values linearly from the output minimum.\n")); parms.add(hutil::ParmFactory(PRM_ORD, "above", "Above Maximum") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip( "Specify how to handle input values above the input range maximum:" " either by clamping to the input maximum (Clamp)," " leaving out-of-range values intact (Preserve)," " or extrapolating linearly from the input maximum (Extrapolate).") .setDocumentation( "How to handle output values above the input range maximum\n\n" "Clamp:\n" " Clamp values to the input maximum.\n" "Preserve:\n" " Leave out-of-range values intact.\n" "Extrapolate:\n" " Extrapolate values linearly from the input maximum.\n")); } std::vector<fpreal> defaultRange; defaultRange.push_back(fpreal(0.0)); defaultRange.push_back(fpreal(1.0)); parms.add(hutil::ParmFactory(PRM_FLT_J, "inrange", "Input Range") .setDefault(defaultRange) .setVectorSize(2) .setTooltip("Input min/max value range") .setCallbackFunc(&inputRangeCB)); parms.add(hutil::ParmFactory(PRM_FLT_J, "outrange", "Output Range") .setDefault(defaultRange) .setVectorSize(2) .setTooltip("Output min/max value range") .setCallbackFunc(&outputRangeCB)); { std::map<std::string, std::string> rampSpare; rampSpare[PRM_SpareData::getFloatRampDefaultToken()] = "1pos ( 0.0 ) 1value ( 0.0 ) 1interp ( linear ) " "2pos ( 1.0 ) 2value ( 1.0 ) 2interp ( linear )"; rampSpare[PRM_SpareData::getRampShowControlsDefaultToken()] = "0"; parms.add(hutil::ParmFactory(PRM_MULTITYPE_RAMP_FLT, "function", "Transfer Function") .setDefault(PRMtwoDefaults) .setSpareData(rampSpare) .setTooltip("X Axis: 0 = input range minimum, 1 = input range maximum.\n" "Y Axis: 0 = output range minimum, 1 = output range maximum.\n") .setDocumentation( "Map values through a transfer function where _x_ = 0 corresponds to" " the input range minimum, _x_ = 1 to the input range maximum," " _y_ = 0 to the output range minimum, and _y_ = 1 to the" " output range maximum.")); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "deactivate", "Deactivate Background Voxels") .setTooltip("Deactivate voxels with values equal to the remapped background value.")); hvdb::OpenVDBOpFactory("VDB Remap", SOP_OpenVDB_Remap::factory, parms, *table) .setNativeName("") .addInput("VDB Grids") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Remap::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Perform a remapping of the voxel values in a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node remaps voxel values to a new range, optionally through\n\ a user-specified transfer function.\n\ \n\ @related\n\ - [Node:sop/volumevop]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Remap::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Remap(net, name, op); } SOP_OpenVDB_Remap::SOP_OpenVDB_Remap(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } OP_ERROR SOP_OpenVDB_Remap::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Remapping values"); const fpreal inMin = evalFloat("inrange", 0, time); const fpreal inMax = evalFloat("inrange", 1, time); const fpreal outMin = evalFloat("outrange", 0, time); const fpreal outMax = evalFloat("outrange", 1, time); const bool deactivate = bool(evalInt("deactivate", 0, time)); RemapGridValues::Extrapolation belowExtrapolation = RemapGridValues::CLAMP; RemapGridValues::Extrapolation aboveExtrapolation = RemapGridValues::CLAMP; auto extrapolation = evalInt("below", 0, time); if (extrapolation == 1) belowExtrapolation = RemapGridValues::PRESERVE; else if (extrapolation == 2) belowExtrapolation = RemapGridValues::EXTRAPOLATE; extrapolation = evalInt("above", 0, time); if (extrapolation == 1) aboveExtrapolation = RemapGridValues::PRESERVE; else if (extrapolation == 2) aboveExtrapolation = RemapGridValues::EXTRAPOLATE; const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); size_t vdbPrimCount = 0; UT_Ramp ramp; evalRamp(ramp, time); RemapGridValues remap(belowExtrapolation, aboveExtrapolation, ramp, inMin, inMax, outMin, outMax, deactivate, UTgetErrorManager()); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) break; remap.setPrimitiveName(it.getPrimitiveName().toStdString()); remap.setPrimitiveIndex(int(it.getIndex())); hvdb::GEOvdbApply<hvdb::NumericGridTypes::Append<hvdb::Vec3GridTypes>>(**it, remap); GU_PrimVDB* vdbPrim = *it; const GEO_VolumeOptions& visOps = vdbPrim->getVisOptions(); vdbPrim->setVisualization(GEO_VOLUMEVIS_SMOKE , visOps.myIso, visOps.myDensity); ++vdbPrimCount; } if (vdbPrimCount == 0) { addWarning(SOP_MESSAGE, "Did not find any VDBs."); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
22,179
C++
31.285298
100
0.603048
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GeometryUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file GeometryUtil.h /// @author FX R&D Simulation team /// @brief Utility methods and tools for geometry processing #ifndef OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/MeshToVolume.h> // for openvdb::tools::MeshToVoxelEdgeData #include <openvdb/tree/LeafManager.h> #include <openvdb/util/Util.h> // for openvdb::util::COORD_OFFSETS #include <GU/GU_Detail.h> #include <algorithm> // for std::max/min() #include <memory> #include <string> #include <vector> class GA_SplittableRange; class OBJ_Camera; class OP_Context; class OP_Node; #ifdef SESI_OPENVDB #ifdef OPENVDB_HOUDINI_API #undef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #endif namespace openvdb_houdini { class Interrupter; /// Add geometry to the given detail to indicate the extents of a frustum transform. OPENVDB_HOUDINI_API void drawFrustum(GU_Detail&, const openvdb::math::Transform&, const UT_Vector3* boxColor, const UT_Vector3* tickColor, bool shaded, bool drawTicks = true); /// Construct a frustum transform from a Houdini camera. OPENVDB_HOUDINI_API openvdb::math::Transform::Ptr frustumTransformFromCamera( OP_Node&, OP_Context&, OBJ_Camera&, float offset, float nearPlaneDist, float farPlaneDist, float voxelDepthSize = 1.0, int voxelCountX = 100); //////////////////////////////////////// /// @brief Return @c true if the point at the given offset is referenced /// by primitives from a certain primitive group. OPENVDB_HOUDINI_API bool pointInPrimGroup(GA_Offset ptnOffset, GU_Detail&, const GA_PrimitiveGroup&); //////////////////////////////////////// /// @brief Convert geometry to quads and triangles. /// @return a pointer to a new GU_Detail object if the geometry was /// converted or subdivided, otherwise a null pointer OPENVDB_HOUDINI_API std::unique_ptr<GU_Detail> convertGeometry(const GU_Detail&, std::string& warning, Interrupter*); //////////////////////////////////////// /// TBB body object for threaded world to voxel space transformation and copy of points class OPENVDB_HOUDINI_API TransformOp { public: TransformOp(GU_Detail const * const gdp, const openvdb::math::Transform& transform, std::vector<openvdb::Vec3s>& pointList); void operator()(const GA_SplittableRange&) const; private: GU_Detail const * const mGdp; const openvdb::math::Transform& mTransform; std::vector<openvdb::Vec3s>* const mPointList; }; //////////////////////////////////////// /// @brief TBB body object for threaded primitive copy /// @details Produces a primitive-vertex index list. class OPENVDB_HOUDINI_API PrimCpyOp { public: PrimCpyOp(GU_Detail const * const gdp, std::vector<openvdb::Vec4I>& primList); void operator()(const GA_SplittableRange&) const; private: GU_Detail const * const mGdp; std::vector<openvdb::Vec4I>* const mPrimList; }; //////////////////////////////////////// /// @brief TBB body object for threaded vertex normal generation /// @details Averages face normals from all similarly oriented primitives, /// that share the same vertex-point, to maintain sharp features. class OPENVDB_HOUDINI_API VertexNormalOp { public: VertexNormalOp(GU_Detail&, const GA_PrimitiveGroup* interiorPrims=nullptr, float angle=0.7f); void operator()(const GA_SplittableRange&) const; private: bool isInteriorPrim(GA_Offset primOffset) const { return mInteriorPrims && mInteriorPrims->containsIndex( mDetail.primitiveIndex(primOffset)); } const GU_Detail& mDetail; const GA_PrimitiveGroup* mInteriorPrims; GA_RWHandleV3 mNormalHandle; const float mAngle; }; //////////////////////////////////////// /// TBB body object for threaded sharp feature construction class OPENVDB_HOUDINI_API SharpenFeaturesOp { public: using EdgeData = openvdb::tools::MeshToVoxelEdgeData; SharpenFeaturesOp(GU_Detail& meshGeo, const GU_Detail& refGeo, EdgeData& edgeData, const openvdb::math::Transform& xform, const GA_PrimitiveGroup* surfacePrims = nullptr, const openvdb::BoolTree* mask = nullptr); void operator()(const GA_SplittableRange&) const; private: GU_Detail& mMeshGeo; const GU_Detail& mRefGeo; EdgeData& mEdgeData; const openvdb::math::Transform& mXForm; const GA_PrimitiveGroup* mSurfacePrims; const openvdb::BoolTree* mMaskTree; }; //////////////////////////////////////// /// TBB body object for threaded sharp feature construction template<typename IndexTreeType, typename BoolTreeType> class GenAdaptivityMaskOp { public: using BoolLeafManager = openvdb::tree::LeafManager<BoolTreeType>; GenAdaptivityMaskOp(const GU_Detail& refGeo, const IndexTreeType& indexTree, BoolLeafManager&, float edgetolerance = 0.0); void run(bool threaded = true); void operator()(const tbb::blocked_range<size_t>&) const; private: const GU_Detail& mRefGeo; const IndexTreeType& mIndexTree; BoolLeafManager& mLeafs; float mEdgeTolerance; }; template<typename IndexTreeType, typename BoolTreeType> GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::GenAdaptivityMaskOp(const GU_Detail& refGeo, const IndexTreeType& indexTree, BoolLeafManager& leafMgr, float edgetolerance) : mRefGeo(refGeo) , mIndexTree(indexTree) , mLeafs(leafMgr) , mEdgeTolerance(edgetolerance) { mEdgeTolerance = std::max(0.0f, mEdgeTolerance); mEdgeTolerance = std::min(1.0f, mEdgeTolerance); } template<typename IndexTreeType, typename BoolTreeType> void GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::run(bool threaded) { if (threaded) { tbb::parallel_for(mLeafs.getRange(), *this); } else { (*this)(mLeafs.getRange()); } } template<typename IndexTreeType, typename BoolTreeType> void GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::operator()( const tbb::blocked_range<size_t>& range) const { using IndexAccessorType = typename openvdb::tree::ValueAccessor<const IndexTreeType>; IndexAccessorType idxAcc(mIndexTree); UT_Vector3 tmpN, normal; GA_Offset primOffset; int tmpIdx; openvdb::Coord ijk, nijk; typename BoolTreeType::LeafNodeType::ValueOnIter iter; for (size_t n = range.begin(); n < range.end(); ++n) { iter = mLeafs.leaf(n).beginValueOn(); for (; iter; ++iter) { ijk = iter.getCoord(); bool edgeVoxel = false; int idx = idxAcc.getValue(ijk); primOffset = mRefGeo.primitiveOffset(idx); normal = mRefGeo.getGEOPrimitive(primOffset)->computeNormal(); for (size_t i = 0; i < 18; ++i) { nijk = ijk + openvdb::util::COORD_OFFSETS[i]; if (idxAcc.probeValue(nijk, tmpIdx) && tmpIdx != idx) { primOffset = mRefGeo.primitiveOffset(tmpIdx); tmpN = mRefGeo.getGEOPrimitive(primOffset)->computeNormal(); if (normal.dot(tmpN) < mEdgeTolerance) { edgeVoxel = true; break; } } } if (!edgeVoxel) iter.setValueOff(); } } } } // namespace openvdb_houdini //////////////////////////////////////// #endif // OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED
7,516
C
26.636029
97
0.66711
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Vector_Split.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Vector_Split.cc /// /// @author FX R&D OpenVDB team /// /// @brief Split vector grids into component scalar grids. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <set> #include <sstream> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Vector_Split: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Vector_Split(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Vector_Split() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Input vector grid group name parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip( "Specify a subset of the input VDB grids to be split.\n" "Vector-valued grids will be split into component scalar grids;\n" "all other grids will be unchanged.") .setDocumentation( "A subset of the input VDBs to be split" " (see [specifying volumes|/model/volumes#group])\n\n" "Vector-valued VDBs are split into component scalar VDBs;" " VDBs of other types are passed through unchanged.")); // Toggle to keep/remove source grids parms.add( hutil::ParmFactory(PRM_TOGGLE, "remove_sources", "Remove Source VDBs") .setDefault(PRMoneDefaults) .setTooltip("Remove vector grids that have been split.") .setDocumentation("If enabled, delete vector grids that have been split.")); // Toggle to copy inactive values in addition to active values parms.add( hutil::ParmFactory(PRM_TOGGLE, "copyinactive", "Copy Inactive Values") .setDefault(PRMzeroDefaults) .setTooltip( "If enabled, split the values of both active and inactive voxels.\n" "If disabled, split the values of active voxels only.")); #ifndef SESI_OPENVDB // Verbosity toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setDocumentation("If enabled, print debugging information to the terminal.")); #endif // Register this operator. hvdb::OpenVDBOpFactory("VDB Vector Split", SOP_OpenVDB_Vector_Split::factory, parms, *table) .addInput("Vector VDBs to split into scalar VDBs") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Vector_Split::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Split a vector VDB primitive into three scalar VDB primitives.\"\"\"\n\ \n\ @overview\n\ \n\ This node will create three new scalar primitives named `<<input>>.x`,\n\ `<<input>>.y`, and `<<input>>.z`.\n\ \n\ TIP:\n\ To reverse the split (i.e., to merge three scalar VDBs into a vector VDB),\n\ use the [OpenVDB Vector Merge node|Node:sop/DW_OpenVDBVectorMerge]\n\ and set the groups to `@name=*.x`, `@name=*.y`, and `@name=*.z`.\n\ \n\ @related\n\ - [OpenVDB Vector Merge|Node:sop/DW_OpenVDBVectorMerge]\n\ - [Node:sop/vdbvectorsplit]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Vector_Split::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Vector_Split(net, name, op); } SOP_OpenVDB_Vector_Split::SOP_OpenVDB_Vector_Split(OP_Network* net, const char* name, OP_Operator* op): SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { class VectorGridSplitter { private: const GEO_PrimVDB& mInVdb; hvdb::GridPtr mXGrid, mYGrid, mZGrid; bool mCopyInactiveValues; public: VectorGridSplitter(const GEO_PrimVDB& _vdb, bool _inactive): mInVdb(_vdb), mCopyInactiveValues(_inactive) {} const hvdb::GridPtr& getXGrid() { return mXGrid; } const hvdb::GridPtr& getYGrid() { return mYGrid; } const hvdb::GridPtr& getZGrid() { return mZGrid; } template<typename VecGridT> void operator()(const VecGridT& vecGrid) { const std::string gridName = mInVdb.getGridName(); using VecT = typename VecGridT::ValueType; using ScalarTreeT = typename VecGridT::TreeType::template ValueConverter<typename VecT::value_type>::Type; using ScalarGridT = typename openvdb::Grid<ScalarTreeT>; using ScalarGridPtr = typename ScalarGridT::Ptr; const VecT bkgd = vecGrid.background(); // Construct the output scalar grids, with background values taken from // the components of the input vector grid's background value. ScalarGridPtr xGrid = ScalarGridT::create(bkgd.x()), yGrid = ScalarGridT::create(bkgd.y()), zGrid = ScalarGridT::create(bkgd.z()); mXGrid = xGrid; mYGrid = yGrid; mZGrid = zGrid; // The output scalar grids share the input vector grid's transform. if (openvdb::math::Transform::Ptr xform = vecGrid.transform().copy()) { xGrid->setTransform(xform); yGrid->setTransform(xform); zGrid->setTransform(xform); } // Use accessors for fast sequential voxel access. typename ScalarGridT::Accessor xAccessor = xGrid->getAccessor(), yAccessor = yGrid->getAccessor(), zAccessor = zGrid->getAccessor(); // For each tile or voxel value in the input vector tree, // set a corresponding value in each of the output scalar trees. openvdb::CoordBBox bbox; if (mCopyInactiveValues) { for (typename VecGridT::ValueAllCIter it = vecGrid.cbeginValueAll(); it; ++it) { if (!it.getBoundingBox(bbox)) continue; const VecT& val = it.getValue(); const bool active = it.isValueOn(); if (it.isTileValue()) { xGrid->fill(bbox, val.x(), active); yGrid->fill(bbox, val.y(), active); zGrid->fill(bbox, val.z(), active); } else { // it.isVoxelValue() xAccessor.setValue(bbox.min(), val.x()); yAccessor.setValue(bbox.min(), val.y()); zAccessor.setValue(bbox.min(), val.z()); if (!active) { xAccessor.setValueOff(bbox.min()); yAccessor.setValueOff(bbox.min()); zAccessor.setValueOff(bbox.min()); } } } } else { for (typename VecGridT::ValueOnCIter it = vecGrid.cbeginValueOn(); it; ++it) { if (!it.getBoundingBox(bbox)) continue; const VecT& val = it.getValue(); if (it.isTileValue()) { xGrid->fill(bbox, val.x()); yGrid->fill(bbox, val.y()); zGrid->fill(bbox, val.z()); } else { // it.isVoxelValue() xAccessor.setValueOn(bbox.min(), val.x()); yAccessor.setValueOn(bbox.min(), val.y()); zAccessor.setValueOn(bbox.min(), val.z()); } } } } }; // class VectorGridSplitter } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Vector_Split::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); const bool copyInactiveValues = evalInt("copyinactive", 0, time); const bool removeSourceGrids = evalInt("remove_sources", 0, time); #ifndef SESI_OPENVDB const bool verbose = evalInt("verbose", 0, time); #else const bool verbose = false; #endif UT_AutoInterrupt progress("Splitting VDB grids"); using PrimVDBSet = std::set<GEO_PrimVDB*>; PrimVDBSet primsToRemove; // Get the group of grids to split. const GA_PrimitiveGroup* splitGroup = nullptr; { UT_String groupStr; evalString(groupStr, "group", 0, time); splitGroup = matchGroup(*gdp, groupStr.toStdString()); } // Iterate over VDB primitives in the selected group. for (hvdb::VdbPrimIterator it(gdp, splitGroup); it; ++it) { if (progress.wasInterrupted()) return error(); GU_PrimVDB* vdb = *it; const std::string gridName = vdb->getGridName(); VectorGridSplitter op(*vdb, copyInactiveValues); if (!hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op)) { if (verbose && !gridName.empty()) { addWarning(SOP_MESSAGE, (gridName + " is not a vector grid").c_str()); } continue; } // Add the new scalar grids to the detail, copying attributes and // group membership from the input vector grid. const std::string xGridName = gridName.empty() ? "x" : gridName + ".x", yGridName = gridName.empty() ? "y" : gridName + ".y", zGridName = gridName.empty() ? "z" : gridName + ".z"; GU_PrimVDB::buildFromGrid(*gdp, op.getXGrid(), vdb, xGridName.c_str()); GU_PrimVDB::buildFromGrid(*gdp, op.getYGrid(), vdb, yGridName.c_str()); GU_PrimVDB::buildFromGrid(*gdp, op.getZGrid(), vdb, zGridName.c_str()); if (verbose) { std::ostringstream ostr; ostr << "Split "; if (!gridName.empty()) ostr << gridName << " "; ostr << "into " << xGridName << ", " << yGridName << " and " << zGridName; addMessage(SOP_MESSAGE, ostr.str().c_str()); } primsToRemove.insert(vdb); } if (removeSourceGrids) { // Remove vector grids that were split. for (PrimVDBSet::iterator i = primsToRemove.begin(), e = primsToRemove.end(); i != e; ++i) { gdp->destroyPrimitive(*(*i), /*andPoints=*/true); } } primsToRemove.clear(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
10,638
C++
33.654723
98
0.586764
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Platonic.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Platonic.cc /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/LevelSetPlatonic.h> #include <openvdb/tools/LevelSetUtil.h> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Platonic: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Platonic(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Platonic() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Shapes parms.add(hutil::ParmFactory(PRM_ORD, "solidType", "Solid Type") .setTooltip("Select a sphere or one of the five platonic solids") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "sphere", "Sphere", "tetrahedron", "Tetrahedron", "cube", "Cube", "octahedron", "Octahedron", "dodecahedron", "Dodecahedron", "icosahedron", "Icosahedron" })); { // Grid Class const std::vector<std::string> items{ openvdb::GridBase::gridClassToString(openvdb::GRID_LEVEL_SET), // token openvdb::GridBase::gridClassToMenuName(openvdb::GRID_LEVEL_SET), // label "sdf", "Signed Distance Field" }; parms.add(hutil::ParmFactory(PRM_STRING, "gridclass", "Grid Class") .setDefault(openvdb::GridBase::gridClassToString(openvdb::GRID_LEVEL_SET)) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDocumentation("\ The type of volume to generate\n\ \n\ Level Set:\n\ Generate a narrow-band signed distance field level set, in which\n\ the values define positive and negative distances to the surface\n\ of the solid up to a certain band width.\n\ \n\ Signed Distance Field:\n\ Generate a narrow-band unsigned distance field level set, in which\n\ the values define positive distances to the surface of the solid\n\ up to a certain band width.\n")); } // Radius parms.add(hutil::ParmFactory(PRM_FLT_J, "scalarRadius", "Radius/Size") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_FREE, 10) .setTooltip("The size of the platonic solid or the radius of the sphere")); // Center parms.add(hutil::ParmFactory(PRM_XYZ_J, "center", "Center") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("The world-space center of the level set")); // Voxel size parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelSize", "Voxel Size") .setDefault(0.1f) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_FREE, 10) .setTooltip("The size of a voxel in world units")); // Narrow-band half-width parms.add(hutil::ParmFactory(PRM_FLT_J, "halfWidth", "Half-Band Width") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1.0, PRM_RANGE_UI, 10) .setTooltip( "Half the width of the narrow band in voxel units\n\n" "For proper operation, many nodes expect this to be at least three.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fogVolume", "Convert to Fog Volume") .setTooltip("If enabled, generate a fog volume instead of a level set")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Platonic", SOP_OpenVDB_Platonic::factory, parms, *table) .setNativeName("") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_Platonic::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Generate a platonic solid as a level set or a fog volume VDB.\"\"\"\n\ \n\ @overview\n\ \n\ This node generates a VDB representing a platonic solid as either a level set or fog volume.\n\ \n\ @related\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [Node:sop/platonic]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Platonic::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Platonic(net, name, op); } SOP_OpenVDB_Platonic::SOP_OpenVDB_Platonic(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } bool SOP_OpenVDB_Platonic::updateParmsFlags() { bool changed = false; const bool sdfGrid = (evalStdString("gridclass", 0) == "sdf"); changed |= enableParm("halfWidth", sdfGrid); return changed; } OP_ERROR SOP_OpenVDB_Platonic::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Creating VDB platonic solid"); // Read GUI parameters and generate narrow-band level set of sphere const float radius = static_cast<float>(evalFloat("scalarRadius", 0, time)); const openvdb::Vec3f center = evalVec3f("center", time); const float voxelSize = static_cast<float>(evalFloat("voxelSize", 0, time)); const float halfWidth = ((evalStdString("gridclass", 0) != "sdf") ? 3.0f : static_cast<float>(evalFloat("halfWidth", 0, time))); openvdb::FloatGrid::Ptr grid; switch (evalInt("solidType", 0, time)) { case 0://Sphere grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; case 1:// Tetrahedraon grid = openvdb::tools::createLevelSetTetrahedron<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; case 2:// Cube grid = openvdb::tools::createLevelSetCube<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; case 3:// Octahedron grid = openvdb::tools::createLevelSetOctahedron<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; case 4:// Dodecahedron grid = openvdb::tools::createLevelSetDodecahedron<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; case 5:// Icosahedron grid = openvdb::tools::createLevelSetIcosahedron<openvdb::FloatGrid, hvdb::Interrupter> (radius, center, voxelSize, halfWidth, &boss); break; default: addError(SOP_MESSAGE, "Illegal shape."); return error(); } // Fog volume conversion if (bool(evalInt("fogVolume", 0, time))) { openvdb::tools::sdfToFogVolume(*grid); } // Store the grid in a new VDB primitive and add the primitive to the output gdp hvdb::createVdbPrimitive(*gdp, grid, "PlatonicSolid"); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
7,504
C++
32.959276
100
0.639126
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Occlusion_Mask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Occlusion_Mask.cc /// /// @author FX R&D OpenVDB team /// /// @brief Masks the occluded regions behind objects in the camera frustum #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/GridTransformer.h> #include <openvdb/tools/Morphology.h> #include <OBJ/OBJ_Camera.h> #include <cmath> // for std::floor() #include <stdexcept> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Occlusion_Mask: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Occlusion_Mask(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) {} ~SOP_OpenVDB_Occlusion_Mask() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { public: openvdb::math::Transform::Ptr frustum() const { return mFrustum; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: openvdb::math::Transform::Ptr mFrustum; }; // class Cache protected: void resolveObsoleteParms(PRM_ParmList*) override; OP_ERROR cookMyGuide1(OP_Context&) override; }; // class SOP_OpenVDB_Occlusion_Mask //////////////////////////////////////// OP_Node* SOP_OpenVDB_Occlusion_Mask::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Occlusion_Mask(net, name, op); } void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "camera", "Camera") .setTypeExtended(PRM_TYPE_DYNAMIC_PATH) .setSpareData(&PRM_SpareData::objCameraPath) .setTooltip("Reference camera path") .setDocumentation("The path to the camera (e.g., `/obj/cam1`)")); parms.add(hutil::ParmFactory(PRM_INT_J, "voxelcount", "Voxel Count") .setDefault(PRM100Defaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 200) .setTooltip("The desired width in voxels of the camera's near plane")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxeldepthsize", "Voxel Depth Size") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setTooltip("The depth of a voxel in world units (all voxels have equal depth)")); parms.add(hutil::ParmFactory(PRM_FLT_J, "depth", "Mask Depth") .setDefault(100) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 1000.0) .setTooltip( "The desired depth of the mask in world units" " from the near plane to the far plane")); parms.add(hutil::ParmFactory(PRM_INT_J, "erode", "Erode") .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setDefault(PRMzeroDefaults) .setTooltip("The number of voxels by which to shrink the mask")); parms.add(hutil::ParmFactory(PRM_INT_J, "zoffset", "Z Offset") .setRange(PRM_RANGE_UI, -10, PRM_RANGE_UI, 10) .setDefault(PRMzeroDefaults) .setTooltip("The number of voxels by which to offset the near plane")); hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "voxelCount", "Voxel Count") .setDefault(PRM100Defaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "voxelDepthSize", "Voxel Depth Size") .setDefault(PRMoneDefaults)); hvdb::OpenVDBOpFactory("VDB Occlusion Mask", SOP_OpenVDB_Occlusion_Mask::factory, parms, *table) .addInput("VDBs") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Occlusion_Mask::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Identify voxels of a VDB volume that are in shadow from a given camera.\"\"\"\n\ \n\ @overview\n\ \n\ This node outputs a VDB volume whose active voxels denote the voxels\n\ of an input volume inside a camera frustum that would be occluded\n\ when viewed through the camera.\n\ \n\ @related\n\ - [OpenVDB Clip|Node:sop/DW_OpenVDBClip]\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Occlusion_Mask::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "voxelCount", "voxelcount"); resolveRenamedParm(*obsoleteParms, "voxelDepthSize", "voxeldepthsize"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Occlusion_Mask::cookMyGuide1(OP_Context&) { myGuide1->clearAndDestroy(); openvdb::math::Transform::ConstPtr frustum; // Attempt to extract the frustum from our cache. if (auto* cache = dynamic_cast<SOP_OpenVDB_Occlusion_Mask::Cache*>(myNodeVerbCache)) { frustum = cache->frustum(); } if (frustum) { UT_Vector3 color(0.9f, 0.0f, 0.0f); hvdb::drawFrustum(*myGuide1, *frustum, &color, nullptr, false, false); } return error(); } //////////////////////////////////////// namespace { template<typename BoolTreeT> class VoxelShadow { public: using BoolLeafManagerT = openvdb::tree::LeafManager<const BoolTreeT>; ////////// VoxelShadow(const BoolLeafManagerT& leafs, int zMax, int offset); void run(bool threaded = true); BoolTreeT& tree() const { return *mNewTree; } ////////// VoxelShadow(VoxelShadow&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const VoxelShadow& rhs) { mNewTree->merge(*rhs.mNewTree); mNewTree->prune(); } private: typename BoolTreeT::Ptr mNewTree; const BoolLeafManagerT* mLeafs; const int mOffset, mZMax; }; template<typename BoolTreeT> VoxelShadow<BoolTreeT>::VoxelShadow(const BoolLeafManagerT& leafs, int zMax, int offset) : mNewTree(new BoolTreeT(false)) , mLeafs(&leafs) , mOffset(offset) , mZMax(zMax) { } template<typename BoolTreeT> VoxelShadow<BoolTreeT>::VoxelShadow(VoxelShadow& rhs, tbb::split) : mNewTree(new BoolTreeT(false)) , mLeafs(rhs.mLeafs) , mOffset(rhs.mOffset) , mZMax(rhs.mZMax) { } template<typename BoolTreeT> void VoxelShadow<BoolTreeT>::run(bool threaded) { if (threaded) tbb::parallel_reduce(mLeafs->getRange(), *this); else (*this)(mLeafs->getRange()); } template<typename BoolTreeT> void VoxelShadow<BoolTreeT>::operator()(const tbb::blocked_range<size_t>& range) { typename BoolTreeT::LeafNodeType::ValueOnCIter it; openvdb::CoordBBox bbox; bbox.max()[2] = mZMax; for (size_t n = range.begin(); n != range.end(); ++n) { for (it = mLeafs->leaf(n).cbeginValueOn(); it; ++it) { bbox.min() = it.getCoord(); bbox.min()[2] += mOffset; bbox.max()[0] = bbox.min()[0]; bbox.max()[1] = bbox.min()[1]; mNewTree->fill(bbox, true, true); } mNewTree->prune(); } } struct BoolSampler { static const char* name() { return "bin"; } static int radius() { return 2; } static bool mipmap() { return false; } static bool consistent() { return true; } template<class TreeT> static bool sample(const TreeT& inTree, const openvdb::Vec3R& inCoord, typename TreeT::ValueType& result) { openvdb::Coord ijk; ijk[0] = int(std::floor(inCoord[0])); ijk[1] = int(std::floor(inCoord[1])); ijk[2] = int(std::floor(inCoord[2])); return inTree.probeValue(ijk, result); } }; struct ConstructShadow { ConstructShadow(const openvdb::math::Transform& frustum, int erode, int zoffset) : mGrid(openvdb::BoolGrid::create(false)) , mFrustum(frustum) , mErode(erode) , mZOffset(zoffset) { } template<typename GridType> void operator()(const GridType& grid) { using TreeType = typename GridType::TreeType; const TreeType& tree = grid.tree(); // Resample active tree topology into camera frustum space. openvdb::BoolGrid frustumMask(false); frustumMask.setTransform(mFrustum.copy()); { openvdb::BoolGrid topologyMask(false); topologyMask.setTransform(grid.transform().copy()); if (openvdb::GRID_LEVEL_SET == grid.getGridClass()) { openvdb::BoolGrid::Ptr tmpGrid = openvdb::tools::sdfInteriorMask(grid); topologyMask.tree().merge(tmpGrid->tree()); if (mErode > 3) { openvdb::tools::erodeVoxels(topologyMask.tree(), (mErode - 3)); } } else { topologyMask.tree().topologyUnion(tree); if (mErode > 0) { openvdb::tools::erodeVoxels(topologyMask.tree(), mErode); } } if (grid.transform().voxelSize()[0] < mFrustum.voxelSize()[0]) { openvdb::tools::resampleToMatch<openvdb::tools::PointSampler>( topologyMask, frustumMask); } else { openvdb::tools::resampleToMatch<BoolSampler>(topologyMask, frustumMask); } } // Create shadow volume mGrid = openvdb::BoolGrid::create(false); mGrid->setTransform(mFrustum.copy()); openvdb::BoolTree& shadowTree = mGrid->tree(); const openvdb::math::NonlinearFrustumMap& map = *mFrustum.map<openvdb::math::NonlinearFrustumMap>(); int zCoord = int(std::floor(map.getBBox().max()[2])); // Voxel shadows openvdb::tree::LeafManager<const openvdb::BoolTree> leafs(frustumMask.tree()); VoxelShadow<openvdb::BoolTree> shadowOp(leafs, zCoord, mZOffset); shadowOp.run(); shadowTree.merge(shadowOp.tree()); // Tile shadows openvdb::CoordBBox bbox; openvdb::BoolTree::ValueOnIter it(frustumMask.tree()); it.setMaxDepth(openvdb::BoolTree::ValueAllIter::LEAF_DEPTH - 1); for ( ; it; ++it) { it.getBoundingBox(bbox); bbox.min()[2] += mZOffset; bbox.max()[2] = zCoord; shadowTree.fill(bbox, true, true); } shadowTree.prune(); } openvdb::BoolGrid::Ptr& grid() { return mGrid; } private: openvdb::BoolGrid::Ptr mGrid; const openvdb::math::Transform mFrustum; const int mErode, mZOffset; }; } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Occlusion_Mask::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Camera reference mFrustum.reset(); UT_String cameraPath; evalString(cameraPath, "camera", 0, time); cameraPath.harden(); if (cameraPath.isstring()) { OBJ_Node* camobj = cookparms()->getCwd()->findOBJNode(cameraPath); OP_Node* self = cookparms()->getCwd(); if (!camobj) { addError(SOP_MESSAGE, "Camera not found"); return error(); } OBJ_Camera* cam = camobj->castToOBJCamera(); if (!cam) { addError(SOP_MESSAGE, "Camera not found"); return error(); } // Register this->addExtraInput(cam, OP_INTEREST_DATA); const float nearPlane = static_cast<float>(cam->getNEAR(time)); const float farPlane = static_cast<float>(nearPlane + evalFloat("depth", 0, time)); const float voxelDepthSize = static_cast<float>(evalFloat("voxeldepthsize", 0, time)); const int voxelCount = static_cast<int>(evalInt("voxelcount", 0, time)); mFrustum = hvdb::frustumTransformFromCamera(*self, context, *cam, 0, nearPlane, farPlane, voxelDepthSize, voxelCount); } else { addError(SOP_MESSAGE, "No camera referenced."); return error(); } ConstructShadow shadowOp(*mFrustum, static_cast<int>(evalInt("erode", 0, time)), static_cast<int>(evalInt("zoffset", 0, time))); // Get the group of grids to surface. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { hvdb::GEOvdbApply<hvdb::NumericGridTypes>(**it, shadowOp); // Replace the original VDB primitive with a new primitive that contains // the output grid and has the same attributes and group membership. if (GU_PrimVDB* prim = hvdb::replaceVdbPrimitive(*gdp, shadowOp.grid(), **it, true)) { // Visualize our bool grids as "smoke", not whatever the input // grid was, which can be a levelset. prim->setVisualization(GEO_VOLUMEVIS_SMOKE, prim->getVisIso(), prim->getVisDensity()); } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
13,792
C++
28.535332
100
0.616734
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GT_GEOPrimCollectVDB.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 123 Front Street West, Suite 1401 * Toronto, Ontario * Canada M5J 2M2 * 416-504-9876 * * NAME: GT_GEOPrimCollectVDB.h (GT Library, C++) * * COMMENTS: */ #include "GT_GEOPrimCollectVDB.h" #include "UT_VDBUtils.h" #include <GT/GT_DANumeric.h> #include <GT/GT_GEOPrimCollect.h> #include <GT/GT_GEOPrimitive.h> #include <GT/GT_Handles.h> #include <GT/GT_PrimCurveMesh.h> #include <GU/GU_DetailHandle.h> #include "GEO_PrimVDB.h" #include <UT/UT_ParallelUtil.h> #include <UT/UT_Vector3.h> #include <UT/UT_Version.h> #include <openvdb/openvdb.h> #include <openvdb/Grid.h> using namespace openvdb_houdini; void GT_GEOPrimCollectVDB::registerPrimitive(const GA_PrimitiveTypeId &id) { new GT_GEOPrimCollectVDB(id); } namespace { class gt_RefineVDB { public: gt_RefineVDB( const GU_Detail &gdp, const GT_GEOOffsetList &vdb_offsets) : myGdp(gdp) , myVDBOffsets(vdb_offsets) , myPos(new GT_Real32Array(0, 3, GT_TYPE_POINT)) , myPosHandle(myPos) { } gt_RefineVDB( const gt_RefineVDB &task, UT_Split) : myGdp(task.myGdp) , myVDBOffsets(task.myVDBOffsets) , myPos(new GT_Real32Array(0, 3, GT_TYPE_POINT)) , myPosHandle(myPos) { } enum { NPTS = 8 }; void appendBox(openvdb::Vec3s corners[NPTS]) { myVertexCounts.append(NPTS * 2); myPos->append(corners[0].asPointer()); // 0 myPos->append(corners[1].asPointer()); // 1 myPos->append(corners[2].asPointer()); // 2 myPos->append(corners[3].asPointer()); // 3 myPos->append(corners[0].asPointer()); // 4 myPos->append(corners[4].asPointer()); // 5 myPos->append(corners[5].asPointer()); // 6 myPos->append(corners[6].asPointer()); // 7 myPos->append(corners[7].asPointer()); // 8 myPos->append(corners[4].asPointer()); // 9 myPos->append(corners[5].asPointer()); // 10 myPos->append(corners[1].asPointer()); // 11 myPos->append(corners[2].asPointer()); // 12 myPos->append(corners[6].asPointer()); // 13 myPos->append(corners[7].asPointer()); // 14 myPos->append(corners[3].asPointer()); // 15 } template <typename GridT> void processGrid(const GridT &grid, int /*dummy*/) { using namespace openvdb; typedef typename GridT::TreeType TreeT; typedef typename TreeT::LeafCIter LeafCIter; typedef typename TreeT::LeafNodeType LeafNodeType; const openvdb::math::Transform &xform = grid.transform(); bool appended = false; for (LeafCIter iter = grid.tree().cbeginLeaf(); iter; ++iter) { LeafNodeType const * const leaf = iter.getLeaf(); const Vec3d half(0.5); Vec3d bbox_pos[2]; /// Nodes are rendered as cell-centered (0.5 voxel dilated) /// AABBox in world space bbox_pos[0] = leaf->origin() - half; bbox_pos[1] = leaf->origin().offsetBy(leaf->dim() - 1) + half; Vec3s corners[NPTS]; Coord lut[NPTS] = { Coord(0, 0, 0), Coord(0, 0, 1), Coord(1, 0, 1), Coord(1, 0, 0), Coord(0, 1, 0), Coord(0, 1, 1), Coord(1, 1, 1), Coord(1, 1, 0), }; for (int i = 0; i < NPTS; i++) { Vec3d pt(bbox_pos[lut[i][0]].x(), bbox_pos[lut[i][1]].y(), bbox_pos[lut[i][2]].z()); corners[i] = xform.indexToWorld(pt); } appendBox(corners); appended = true; } if (!appended) { const int NPTS = 6; openvdb::Vec3s lines[NPTS]; lines[0].init(-0.5, 0.0, 0.0); lines[1].init( 0.5, 0.0, 0.0); lines[2].init( 0.0,-0.5, 0.0); lines[3].init( 0.0, 0.5, 0.0); lines[4].init( 0.0, 0.0,-0.5); lines[5].init( 0.0, 0.0, 0.5); for (int i = 0; i < NPTS; i++) lines[i] = xform.indexToWorld(lines[i]); for (int i = 0; i < NPTS; i += 2) { myVertexCounts.append(2); myPos->append(lines[i].asPointer()); myPos->append(lines[i+1].asPointer()); } } } void operator()(const UT_BlockedRange<exint> &range) { using namespace openvdb; for (exint i = range.begin(); i != range.end(); ++i) { const GEO_Primitive *prim = myGdp.getGEOPrimitive(myVDBOffsets(i)); const GEO_PrimVDB *vdb = static_cast<const GEO_PrimVDB *>(prim); UTvdbCallAllType(vdb->getStorageType(), processGrid, vdb->getGrid(), 0); } } void join(const gt_RefineVDB &task) { myPos->concat(*task.myPos); myVertexCounts.concat(task.myVertexCounts); } const GU_Detail & myGdp; const GT_GEOOffsetList & myVDBOffsets; GT_Real32Array * myPos; GT_DataArrayHandle myPosHandle; GT_GEOOffsetList myVertexCounts; }; } GT_GEOPrimCollectVDB::GT_GEOPrimCollectVDB(const GA_PrimitiveTypeId &id) : myId(id) { bind(myId); } GT_GEOPrimCollectVDB::~GT_GEOPrimCollectVDB() { } GT_GEOPrimCollectData * GT_GEOPrimCollectVDB::beginCollecting( const GT_GEODetailListHandle &, const GT_RefineParms *) const { return new GT_GEOPrimCollectOffsets(); } GT_PrimitiveHandle GT_GEOPrimCollectVDB::collect( const GT_GEODetailListHandle &/*geometry*/, const GEO_Primitive *const* prim_list, int /*nsegments*/, GT_GEOPrimCollectData *data) const { data->asPointer<GT_GEOPrimCollectOffsets>()->append(prim_list[0]); return GT_PrimitiveHandle(); } GT_PrimitiveHandle GT_GEOPrimCollectVDB::endCollecting( const GT_GEODetailListHandle &g, GT_GEOPrimCollectData *data) const { const GT_GEOPrimCollectOffsets & offsets = *(data->asPointer<GT_GEOPrimCollectOffsets>()); const GT_GEOOffsetList & prims = offsets.getPrimitives(); if (!prims.entries()) return GT_PrimitiveHandle(); GU_DetailHandleAutoReadLock gdl(g->getGeometry(0)); const GU_Detail* detail = gdl.getGdp(); gt_RefineVDB task(*detail, prims); UTparallelReduce(UT_BlockedRange<exint>(0, prims.entries()), task); GT_DataArrayHandle vertex_counts = task.myVertexCounts.allocateArray(); GT_AttributeListHandle vertices = GT_AttributeList::createAttributeList("P", task.myPos); return GT_PrimitiveHandle( new GT_PrimCurveMesh( GT_BASIS_LINEAR, vertex_counts, vertices.get(), GT_AttributeListHandle(), // uniform GT_AttributeListHandle(), // detail /*wrap*/false)); }
7,284
C++
27.457031
84
0.55821
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Advect_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Advect_Points.cc /// /// @author FX R&D OpenVDB team /// /// @brief SOP to perform advection of points through a velocity field. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/PointAdvect.h> #include <openvdb/points/PointAdvect.h> #include <GA/GA_PageIterator.h> #include <GU/GU_PrimPoly.h> #include <UT/UT_Interrupt.h> #include <UT/UT_UniquePtr.h> #include <hboost/algorithm/string/case_conv.hpp> #include <hboost/algorithm/string/trim.hpp> #include <algorithm> #include <memory> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Utilities namespace { // Add new items to the *end* of this list, and update NUM_PROPAGATION_TYPES. enum PropagationType { PROPAGATION_TYPE_UNKNOWN = -1, PROPAGATION_TYPE_ADVECTION = 0, PROPAGATION_TYPE_PROJECTION, PROPAGATION_TYPE_CONSTRAINED_ADVECTION }; enum { NUM_PROPAGATION_TYPES = PROPAGATION_TYPE_CONSTRAINED_ADVECTION + 1 }; std::string propagationTypeToString(PropagationType pt) { std::string ret; switch (pt) { case PROPAGATION_TYPE_ADVECTION: ret = "advection"; break; case PROPAGATION_TYPE_PROJECTION: ret = "projection"; break; case PROPAGATION_TYPE_CONSTRAINED_ADVECTION: ret = "cadvection"; break; case PROPAGATION_TYPE_UNKNOWN: ret = "unknown"; break; } return ret; } std::string propagationTypeToMenuName(PropagationType pt) { std::string ret; switch (pt) { case PROPAGATION_TYPE_ADVECTION: ret = "Advection"; break; case PROPAGATION_TYPE_PROJECTION: ret = "Projection"; break; case PROPAGATION_TYPE_CONSTRAINED_ADVECTION: ret = "Constrained Advection"; break; case PROPAGATION_TYPE_UNKNOWN: ret = "Unknown"; break; } return ret; } PropagationType stringToPropagationType(const std::string& s) { PropagationType ret = PROPAGATION_TYPE_UNKNOWN; std::string str = s; hboost::trim(str); hboost::to_lower(str); if (str == propagationTypeToString(PROPAGATION_TYPE_ADVECTION)) { ret = PROPAGATION_TYPE_ADVECTION; } else if (str == propagationTypeToString(PROPAGATION_TYPE_PROJECTION)) { ret = PROPAGATION_TYPE_PROJECTION; } else if (str == propagationTypeToString(PROPAGATION_TYPE_CONSTRAINED_ADVECTION)) { ret = PROPAGATION_TYPE_CONSTRAINED_ADVECTION; } return ret; } // Add new items to the *end* of this list, and update NUM_INTEGRATION_TYPES. enum IntegrationType { INTEGRATION_TYPE_UNKNOWN = -1, INTEGRATION_TYPE_FWD_EULER = 0, INTEGRATION_TYPE_RK_2ND, INTEGRATION_TYPE_RK_3RD, INTEGRATION_TYPE_RK_4TH }; enum { NUM_INTEGRATION_TYPES = INTEGRATION_TYPE_RK_4TH + 1 }; std::string integrationTypeToString(IntegrationType it) { std::string ret; switch (it) { case INTEGRATION_TYPE_FWD_EULER: ret = "fwd euler"; break; case INTEGRATION_TYPE_RK_2ND: ret = "2nd rk"; break; case INTEGRATION_TYPE_RK_3RD: ret = "3rd rk"; break; case INTEGRATION_TYPE_RK_4TH: ret = "4th rk"; break; case INTEGRATION_TYPE_UNKNOWN: ret = "unknown"; break; } return ret; } std::string integrationTypeToMenuName(IntegrationType it) { std::string ret; switch (it) { case INTEGRATION_TYPE_FWD_EULER: ret = "Forward Euler"; break; case INTEGRATION_TYPE_RK_2ND: ret = "Second-Order Runge-Kutta"; break; case INTEGRATION_TYPE_RK_3RD: ret = "Third-Order Runge-Kutta"; break; case INTEGRATION_TYPE_RK_4TH: ret = "Fourth-Order Runge-Kutta"; break; case INTEGRATION_TYPE_UNKNOWN: ret = "Unknown"; break; } return ret; } IntegrationType stringToIntegrationType(const std::string& s) { IntegrationType ret = INTEGRATION_TYPE_UNKNOWN; std::string str = s; hboost::trim(str); hboost::to_lower(str); if (str == integrationTypeToString(INTEGRATION_TYPE_FWD_EULER)) { ret = INTEGRATION_TYPE_FWD_EULER; } else if (str == integrationTypeToString(INTEGRATION_TYPE_RK_2ND)) { ret = INTEGRATION_TYPE_RK_2ND; } else if (str == integrationTypeToString(INTEGRATION_TYPE_RK_3RD)) { ret = INTEGRATION_TYPE_RK_3RD; } else if (str == integrationTypeToString(INTEGRATION_TYPE_RK_4TH)) { ret = INTEGRATION_TYPE_RK_4TH; } return ret; } struct AdvectionParms { AdvectionParms(GU_Detail *outputGeo) : mOutputGeo(outputGeo) , mPointGeo(nullptr) , mPointGroup(nullptr) , mOffsetsToSkip() , mIncludeGroups() , mExcludeGroups() , mVelPrim(nullptr) , mCptPrim(nullptr) , mPropagationType(PROPAGATION_TYPE_ADVECTION) , mIntegrationType(INTEGRATION_TYPE_FWD_EULER) , mTimeStep(1.0) , mIterations(1) , mSteps(1) , mStaggered(false) , mStreamlines(false) { } GU_Detail* mOutputGeo; const GU_Detail* mPointGeo; const GA_PointGroup* mPointGroup; std::vector<GA_Offset> mOffsetsToSkip; std::vector<std::string> mIncludeGroups; std::vector<std::string> mExcludeGroups; const GU_PrimVDB *mVelPrim; const GU_PrimVDB *mCptPrim; PropagationType mPropagationType; IntegrationType mIntegrationType; double mTimeStep; int mIterations, mSteps; bool mStaggered, mStreamlines; }; /// @brief Creates a new line segment for each point in @c ptnGeo /// @note The lines will only have one node. void createNewLines(GU_Detail& geo, const GA_PointGroup* group) { GA_SplittableRange ptnRange(geo.getPointRange(group)); GA_Offset start, end, pt; for (GA_PageIterator pIt = ptnRange.beginPages(); !pIt.atEnd(); ++pIt) { for (GA_Iterator it(pIt.begin()); it.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { pt = geo.appendPointOffset(); geo.setPos3(pt, geo.getPos3(i)); GU_PrimPoly& prim = *GU_PrimPoly::build(&geo, 0, GU_POLY_OPEN, 0); prim.appendVertex(pt); } } } } /// @brief Append a new node to each line. /// @note The numbers of lines and points have to match. void appendLineNodes(GU_Detail& geo, GA_Size firstline, const GU_Detail& ptnGeo) { GA_SplittableRange ptnRange(ptnGeo.getPointRange()); GA_Offset start, end, pt; GA_Size n = firstline, N = geo.getNumPrimitives(); for (GA_PageIterator pIt = ptnRange.beginPages(); !pIt.atEnd(); ++pIt) { for (GA_Iterator it(pIt.begin()); it.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { pt = geo.appendPointOffset(); geo.setPos3(pt, ptnGeo.getPos3(i)); GA_Offset offset = geo.primitiveOffset(n); GU_PrimPoly& prim = *static_cast<GU_PrimPoly*>( geo.getPrimitiveList().get(offset)); prim.appendVertex(pt); if (++n == N) break; } if (n == N) break; } if (n == N) break; } } // Threaded closest point projection template<typename GridType> class ProjectionOp { using ProjectorType = openvdb::tools::ClosestPointProjector<GridType>; using VectorType = typename GridType::ValueType; using ElementType = typename VectorType::ValueType; public: ProjectionOp(const GridType& cptGrid, int cptIterations, GU_Detail& geo, const std::vector<GA_Offset>& offsetsToSkip, hvdb::Interrupter& boss) : mProjector(cptGrid, cptIterations) , mGeo(geo) , mOffsetsToSkip(offsetsToSkip) , mBoss(boss) { } void operator()(const GA_SplittableRange &range) const { GA_Offset start, end; UT_Vector3 p; VectorType w; for (GA_PageIterator pIt = range.beginPages(); !pIt.atEnd(); ++pIt) { if (mBoss.wasInterrupted()) return; for (GA_Iterator it(pIt.begin()); it.blockAdvance(start, end); ) { if (mBoss.wasInterrupted()) return; for (GA_Offset i = start; i < end; ++i) { // skip any offsets requested if (std::binary_search(mOffsetsToSkip.begin(), mOffsetsToSkip.end(), i)) { continue; } p = mGeo.getPos3(i); w[0] = ElementType(p[0]); w[1] = ElementType(p[1]); w[2] = ElementType(p[2]); mProjector.projectToConstraintSurface(w); p[0] = UT_Vector3::value_type(w[0]); p[1] = UT_Vector3::value_type(w[1]); p[2] = UT_Vector3::value_type(w[2]); mGeo.setPos3(i, p); } } } } private: ProjectorType mProjector; GU_Detail& mGeo; const std::vector<GA_Offset>& mOffsetsToSkip; hvdb::Interrupter& mBoss; }; class Projection { public: Projection(AdvectionParms& parms, hvdb::Interrupter& boss) : mParms(parms) , mBoss(boss) { } template<typename GridType> void operator()(const GridType& grid) { if (mBoss.wasInterrupted()) return; ProjectionOp<GridType> op( grid, mParms.mIterations, *mParms.mOutputGeo, mParms.mOffsetsToSkip, mBoss); UTparallelFor(GA_SplittableRange(mParms.mOutputGeo->getPointRange(mParms.mPointGroup)), op); } private: AdvectionParms& mParms; hvdb::Interrupter& mBoss; }; // Threaded point advection template<typename GridType, int IntegrationOrder, bool StaggeredVelocity, bool Constrained = false> class AdvectionOp { using IntegrationType = openvdb::tools::VelocityIntegrator<GridType, StaggeredVelocity>; using ProjectorType = openvdb::tools::ClosestPointProjector<GridType>; // Used for constrained advection using VectorType = typename GridType::ValueType; using ElementType = typename VectorType::ValueType; public: AdvectionOp(const GridType& velocityGrid, GU_Detail& geo, const std::vector<GA_Offset>& offsetsToSkip, hvdb::Interrupter& boss, double timeStep, GA_ROHandleF traillen, int steps) : mVelocityGrid(velocityGrid) , mCptGrid(nullptr) , mGeo(geo) , mOffsetsToSkip(offsetsToSkip) , mBoss(boss) , mTimeStep(timeStep) , mTrailLen(traillen) , mSteps(steps) , mCptIterations(0) { } AdvectionOp(const GridType& velocityGrid, const GridType& cptGrid, GU_Detail& geo, const std::vector<GA_Offset>& offsetsToSkip, hvdb::Interrupter& boss, double timeStep, int steps, int cptIterations) : mVelocityGrid(velocityGrid) , mCptGrid(&cptGrid) , mGeo(geo) , mOffsetsToSkip(offsetsToSkip) , mBoss(boss) , mTimeStep(timeStep) , mSteps(steps) , mCptIterations(cptIterations) { } void operator()(const GA_SplittableRange &range) const { GA_Offset start, end; UT_Vector3 p; VectorType w; IntegrationType integrator(mVelocityGrid); // Constrained-advection compiled out if Constrained == false UT_UniquePtr<ProjectorType> projector; if (Constrained && mCptGrid != nullptr) { projector.reset(new ProjectorType(*mCptGrid, mCptIterations)); } for (GA_PageIterator pIt = range.beginPages(); !pIt.atEnd(); ++pIt) { if (mBoss.wasInterrupted()) return; for (GA_Iterator it(pIt.begin()); it.blockAdvance(start, end); ) { if (mBoss.wasInterrupted()) return; for (GA_Offset i = start; i < end; ++i) { // skip any point offsets requested if (std::binary_search(mOffsetsToSkip.begin(), mOffsetsToSkip.end(), i)) { continue; } p = mGeo.getPos3(i); w[0] = ElementType(p[0]); w[1] = ElementType(p[1]); w[2] = ElementType(p[2]); ElementType timestep = static_cast<ElementType>(mTimeStep); if (mTrailLen.isValid()) { timestep *= static_cast<ElementType>(mTrailLen.get(i)); } for (int n = 0; n < mSteps; ++n) { integrator.template rungeKutta<IntegrationOrder, VectorType>(timestep, w); if (Constrained) projector->projectToConstraintSurface(w); } p[0] = UT_Vector3::value_type(w[0]); p[1] = UT_Vector3::value_type(w[1]); p[2] = UT_Vector3::value_type(w[2]); mGeo.setPos3(i, p); } } } } private: const GridType& mVelocityGrid; const GridType* mCptGrid; GU_Detail& mGeo; const std::vector<GA_Offset>& mOffsetsToSkip; hvdb::Interrupter& mBoss; double mTimeStep; GA_ROHandleF mTrailLen; const int mSteps, mCptIterations; }; class Advection { public: Advection(AdvectionParms& parms, hvdb::Interrupter& boss) : mParms(parms) , mBoss(boss) { } template<typename GridType, int IntegrationOrder, bool StaggeredVelocity> void advection(const GridType& velocityGrid) { if (mBoss.wasInterrupted()) return; if (!mParms.mStreamlines) { // Advect points GA_ROHandleF traillen_h(mParms.mOutputGeo, GA_ATTRIB_POINT, "traillen"); AdvectionOp<GridType, IntegrationOrder, StaggeredVelocity> op(velocityGrid, *mParms.mOutputGeo, mParms.mOffsetsToSkip, mBoss, mParms.mTimeStep, traillen_h, mParms.mSteps); UTparallelFor( GA_SplittableRange(mParms.mOutputGeo->getPointRange(mParms.mPointGroup)), op); } else { // Advect points and generate streamlines. GA_Index firstline = mParms.mOutputGeo->getNumPrimitives(); GU_Detail geo; geo.mergePoints(*mParms.mOutputGeo, mParms.mPointGroup); createNewLines(*mParms.mOutputGeo, mParms.mPointGroup); for (int n = 0; n < mParms.mSteps; ++n) { if (mBoss.wasInterrupted()) return; GA_ROHandleF traillen_h(&geo, GA_ATTRIB_POINT, "traillen"); AdvectionOp<GridType, IntegrationOrder, StaggeredVelocity> op(velocityGrid, geo, mParms.mOffsetsToSkip, mBoss, mParms.mTimeStep, traillen_h, 1); UTparallelFor(GA_SplittableRange(geo.getPointRange()), op); appendLineNodes(*mParms.mOutputGeo, firstline, geo); } } } template<typename GridType, int IntegrationOrder, bool StaggeredVelocity> void constrainedAdvection(const GridType& velocityGrid) { const GridType& cptGrid = static_cast<const GridType&>(mParms.mCptPrim->getGrid()); using AdvectionOp = AdvectionOp<GridType, IntegrationOrder, StaggeredVelocity, /*Constrained*/true>; if (mBoss.wasInterrupted()) return; if (!mParms.mStreamlines) { // Advect points AdvectionOp op(velocityGrid, cptGrid, *mParms.mOutputGeo, mParms.mOffsetsToSkip, mBoss, mParms.mTimeStep, mParms.mSteps, mParms.mIterations); UTparallelFor( GA_SplittableRange(mParms.mOutputGeo->getPointRange(mParms.mPointGroup)), op); } else { // Advect points and generate streamlines. GA_Index firstline = mParms.mOutputGeo->getNumPrimitives(); GU_Detail geo; geo.mergePoints(*mParms.mOutputGeo, mParms.mPointGroup); createNewLines(*mParms.mOutputGeo, mParms.mPointGroup); for (int n = 0; n < mParms.mSteps; ++n) { if (mBoss.wasInterrupted()) return; AdvectionOp op(velocityGrid, cptGrid, geo, mParms.mOffsetsToSkip, mBoss, mParms.mTimeStep, 1, mParms.mIterations); UTparallelFor(GA_SplittableRange(geo.getPointRange()), op); appendLineNodes(*mParms.mOutputGeo, firstline, geo); } } } // Resolves velocity representation and advection type template<typename GridType, int IntegrationOrder> void resolveAdvection(const GridType& velocityGrid) { if (mBoss.wasInterrupted()) return; if (mParms.mPropagationType == PROPAGATION_TYPE_ADVECTION) { if (!mParms.mStaggered) advection<GridType, IntegrationOrder, false>(velocityGrid); else advection<GridType, IntegrationOrder, true>(velocityGrid); } else if (mParms.mCptPrim != nullptr) { // constrained if (!mParms.mStaggered) { constrainedAdvection<GridType, IntegrationOrder, false>(velocityGrid); } else { constrainedAdvection<GridType, IntegrationOrder, true>(velocityGrid); } } } template<typename GridType> void operator()(const GridType& velocityGrid) { if (mBoss.wasInterrupted()) return; // Resolve integration order switch (mParms.mIntegrationType) { case INTEGRATION_TYPE_FWD_EULER: resolveAdvection<GridType, 1>(velocityGrid); break; case INTEGRATION_TYPE_RK_2ND: resolveAdvection<GridType, 2>(velocityGrid); break; case INTEGRATION_TYPE_RK_3RD: resolveAdvection<GridType, 3>(velocityGrid); break; case INTEGRATION_TYPE_RK_4TH: resolveAdvection<GridType, 4>(velocityGrid); break; case INTEGRATION_TYPE_UNKNOWN: break; } } private: AdvectionParms& mParms; hvdb::Interrupter& mBoss; }; template <typename PointDataGridT> class VDBPointsAdvection { public: VDBPointsAdvection(PointDataGridT& outputGrid, AdvectionParms& parms, hvdb::Interrupter& boss) : mOutputGrid(outputGrid) , mParms(parms) , mBoss(boss) { } template<typename GridType> void operator()(const GridType& velocityGrid) { if (mBoss.wasInterrupted()) return; // note that streamlines are not implemented for VDB Points if (mParms.mStreamlines) return; auto leaf = mOutputGrid.constTree().cbeginLeaf(); if (!leaf) return; openvdb::points::MultiGroupFilter filter( mParms.mIncludeGroups, mParms.mExcludeGroups, leaf->attributeSet()); openvdb::points::advectPoints(mOutputGrid, velocityGrid, mParms.mIntegrationType+1, mParms.mTimeStep, mParms.mSteps, filter); } private: PointDataGridT& mOutputGrid; AdvectionParms& mParms; hvdb::Interrupter& mBoss; }; } // namespace //////////////////////////////////////// // SOP Declaration class SOP_OpenVDB_Advect_Points: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Advect_Points(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Advect_Points() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i ) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; bool evalAdvectionParms(OP_Context&, AdvectionParms&); }; // class Cache protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; }; //////////////////////////////////////// // Build UI and register this operator void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Points to process parms.add(hutil::ParmFactory(PRM_STRING, "group", "Point Group") .setChoiceList(&SOP_Node::pointGroupMenu) .setTooltip("A subset of points in the first input to move using the velocity field.")); // VDB Points advection parms.add(hutil::ParmFactory(PRM_TOGGLE, "advectvdbpoints", "Advect VDB Points") .setDefault(PRMoneDefaults) .setTooltip("Enable/disable advection of VDB Points.") .setDocumentation( "If enabled, advect the points in a VDB Points grid, otherwise apply advection" " only to the Houdini point associated with the VDB primitive.\n\n" "The latter is faster to compute but updates the VDB transform only" " and not the relative positions of the points within the grid." " It is useful primarily when instancing multiple static VDB point sets" " onto a dynamically advected Houdini point set.")); parms.add(hutil::ParmFactory(PRM_STRING, "vdbgroup", "VDB Primitive Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("A subset of VDB Points primitives in the first input to move using the velocity field.")); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroups", "VDB Points Groups") .setHelpText("Specify VDB Points groups to advect.") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1)); // Velocity grid parms.add(hutil::ParmFactory(PRM_STRING, "velgroup", "Velocity VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Velocity grid") .setDocumentation( "The name of a VDB primitive in the second input to use as" " the velocity field (see [specifying volumes|/model/volumes#group])\n\n" "This must be a vector-valued VDB primitive." " You can use the [Vector Merge node|Node:sop/DW_OpenVDBVectorMerge]" " to turn a `vel.[xyz]` triple into a single primitive.")); // Closest point grid parms.add(hutil::ParmFactory(PRM_STRING, "cptgroup", "Closest-Point VDB") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip("Vector grid that in each voxel stores the closest point on a surface.") .setDocumentation( "The name of a VDB primitive in the third input to use for" " the closest point values (see [specifying volumes|/model/volumes#group])")); // Propagation scheme { std::vector<std::string> items; for (int i = 0; i < NUM_PROPAGATION_TYPES; ++i) { PropagationType pt = PropagationType(i); items.push_back(propagationTypeToString(pt)); // token items.push_back(propagationTypeToMenuName(pt)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "operation", "Operation") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(items[0]) .setTooltip( "Advection: Move the point along the velocity field.\n" "Projection: Move the point to the nearest surface point.\n" "Constrained Advection: Advect, then project to the nearest surface point.") .setDocumentation( "How to use the velocity field to move the points\n\n" "Advection:\n" " Move each point along the velocity field.\n" "Projection:\n" " Move each point to the nearest surface point using the closest point field.\n" "Constrained Advection:\n" " Move the along the velocity field, and then project using the" " closest point field. This forces the particles to remain on a surface.")); } // Integration scheme { std::vector<std::string> items; for (int i = 0; i < NUM_INTEGRATION_TYPES; ++i) { IntegrationType it = IntegrationType(i); items.push_back(integrationTypeToString(it)); // token items.push_back(integrationTypeToMenuName(it)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "integration", "Integration") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(items[0]) .setTooltip("Lower order means faster performance, " "but the points will not follow the velocity field closely.") .setDocumentation("Algorithm to use to move the points\n\n" "Later options in the list are slower but better follow the velocity field.")); } // Closest point iterations parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("The interpolation step when sampling nearest points introduces\n" "error so that the result of a single sample may not lie exactly\n" "on the surface. Multiple iterations help minimize this error.") .setDocumentation( "Number of times to try projecting to the nearest point on the surface\n\n" "Projecting might not move exactly to the surface on the first try." " More iterations are slower but give more accurate projection.")); // Time step parms.add(hutil::ParmFactory(PRM_FLT, "timestep", "Timestep") .setDefault(1, "1.0/$FPS") .setRange(PRM_RANGE_UI, 0, PRM_RANGE_UI, 10) .setDocumentation( "Number of seconds of movement to apply to the input points\n\n" "The default is `1/$FPS` (one frame's worth of time)." " You can use negative values to move the points backwards through" " the velocity field.\n\n" "If the attribute `traillen` is present, it is multiplied by this" " time step allowing per-particle variation in trail length.")); // Steps parms.add(hutil::ParmFactory(PRM_INT_J, "steps", "Substeps") .setDefault(1) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("Number of timesteps to take per frame.") .setDocumentation( "How many times to repeat the advection step\n\n" "This will produce a more accurate motion, especially if large" " time steps or high velocities are present.")); // Output streamlines parms.add(hutil::ParmFactory(PRM_TOGGLE, "outputstreamlines", "Output Streamlines") .setDefault(PRMzeroDefaults) .setTooltip("Output the particle path as line segments.") .setDocumentation( "Generate polylines instead of moving points.\n\n" "This is useful for visualizing the effect of the node." " It may also be useful for special effects (see also the" " [Trail SOP|Node:sop/trail]).")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "staggered", "Staggered Velocities")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "Sep")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "Sep")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "ptnGroup", "Point Group")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "velGroup", "Velocity VDB")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "cptGroup", "Closest-Point VDB")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "propagation", "Operation") .setDefault("advection")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "cptIterations", "Iterations") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT, "timeStep", "Time Step") .setDefault(1, "1.0/$FPS")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "outputStreamlines", "Output Streamlines") .setDefault(PRMzeroDefaults)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Advect Points", SOP_OpenVDB_Advect_Points::factory, parms, *table) .setObsoleteParms(obsoleteParms) .addInput("Points to Advect") .addOptionalInput("Velocity VDB") .addOptionalInput("Closest Point VDB") .setVerb(SOP_NodeVerb::COOK_DUPLICATE, []() { return new SOP_OpenVDB_Advect_Points::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Move points in the input geometry along a VDB velocity field.\"\"\"\n\ \n\ @overview\n\ \n\ This node has different functions based on the value of the __Operation__ parameter.\n\ * Move geometry points according to a VDB velocity field.\n\ * Move points onto a surface using a VDB field storing the nearest surface point at each voxel.\n\ # Convert the \"sticky\" surface to a VDB SDF using the\n\ [OpenVDB From Polygons node|Node:sop/DW_OpenVDBFromPolygons].\n\ # Generate a \"nearest point\" VDB using the \n\ [OpenVDB Analysis node|Node:sop/DW_OpenVDBAnalysis].\n\ # Connect the points you want to stick, and the \"nearest point\" field,\n\ into this node.\n\ * Move geometry points according to a VDB velocity field _and_ stick them\n\ to a surface using a \"nearest point\" field (combine the first two operations).\n\ This lets you advect points through a velocity field while keeping them\n\ stuck to a surface.\n\ \n\ NOTE:\n\ The `traillen` float attribute can be used to control how far particles\n\ move on a per-particle basis.\n\ \n\ @animation Animating advection\n\ \n\ *This node is not a feedback loop*.\n\ It moves the points it finds in the input geometry. It _cannot_ modify\n\ the point locations over time. (That is, if you hook this node up to do advection\n\ and press play, the points will not animate.)\n\ \n\ To set up a feedback loop, where the advection at each frame affects\n\ the advected point positions from the previous frame, do one of the following:\n\ \n\ * Do the advection inside a [SOP Solver|Node:sop/solver].\n\ * Set __Substeps__ to `$F` and the __Time Step__ to `$T`\n\ \n\ This will cause the node to recalculate, _at every frame_, the path\n\ of every particle through _every previous frame_ to get the current one.\n\ This is obviously not very practical, however the calculations are fast\n\ so it may be useful as a quick \"hack\" to animate the advection\n\ for small numbers of particles.\n\ \n\ @inputs\n\ Points to Advect:\n\ The points to advect are copied from this input.\n\ Velocity VDB:\n\ The VDB that stores the velocity at each location\n\ Closest Point VDB:\n\ The VDB that stores the closest point to each location\n\ \n\ @related\n\ - [OpenVDB Advect|Node:sop/DW_OpenVDBAdvect]\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [Node:sop/vdbadvectpoints]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Advect_Points::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "cptGroup", "cptgroup"); resolveRenamedParm(*obsoleteParms, "cptIterations", "iterations"); resolveRenamedParm(*obsoleteParms, "outputStreamlines", "outputstreamlines"); resolveRenamedParm(*obsoleteParms, "propagation", "operation"); resolveRenamedParm(*obsoleteParms, "ptnGroup", "group"); resolveRenamedParm(*obsoleteParms, "timeStep", "timestep"); resolveRenamedParm(*obsoleteParms, "velGroup", "velgroup"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable/disable or show/hide parameters in the UI. bool SOP_OpenVDB_Advect_Points::updateParmsFlags() { bool changed = false; const auto op = stringToPropagationType(evalStdString("operation", 0)); const bool advectVdbPoints = (0 != evalInt("advectvdbpoints", 0, 0)); changed |= enableParm("iterations", op != PROPAGATION_TYPE_ADVECTION); changed |= enableParm("integration", op != PROPAGATION_TYPE_PROJECTION); changed |= enableParm("timestep", op != PROPAGATION_TYPE_PROJECTION); changed |= enableParm("steps", op != PROPAGATION_TYPE_PROJECTION); changed |= enableParm("outputstreamlines", op != PROPAGATION_TYPE_PROJECTION); changed |= enableParm("advectvdbpoints", op == PROPAGATION_TYPE_ADVECTION); changed |= enableParm("vdbgroup", (op == PROPAGATION_TYPE_ADVECTION) && advectVdbPoints); changed |= enableParm("vdbpointsgroups", (op == PROPAGATION_TYPE_ADVECTION) && advectVdbPoints); changed |= setVisibleState("iterations", getEnableState("iterations")); changed |= setVisibleState("integration", getEnableState("integration")); changed |= setVisibleState("timestep", getEnableState("timestep")); changed |= setVisibleState("steps", getEnableState("steps")); changed |= setVisibleState("outputstreamlines", getEnableState("outputstreamlines")); changed |= setVisibleState("advectvdbpoints", getEnableState("advectvdbpoints")); changed |= setVisibleState("vdbgroup", getEnableState("advectvdbpoints")); changed |= setVisibleState("vdbpointsgroups", getEnableState("advectvdbpoints")); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Advect_Points::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Advect_Points(net, name, op); } SOP_OpenVDB_Advect_Points::SOP_OpenVDB_Advect_Points(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Advect_Points::Cache::cookVDBSop(OP_Context& context) { try { // Evaluate UI parameters const fpreal now = context.getTime(); AdvectionParms parms(gdp); if (!evalAdvectionParms(context, parms)) return error(); const bool advectVdbPoints = (0 != evalInt("advectvdbpoints", 0, now)); hvdb::Interrupter boss("Processing points"); if (advectVdbPoints) { // build a list of point offsets to skip during Houdini point advection for (hvdb::VdbPrimIterator vdbIt(gdp); vdbIt; ++vdbIt) { GU_PrimVDB* vdbPrim = *vdbIt; parms.mOffsetsToSkip.push_back(vdbPrim->getPointOffset(0)); } // ensure the offsets to skip are sorted to make lookups faster std::sort(parms.mOffsetsToSkip.begin(), parms.mOffsetsToSkip.end()); const std::string vdbGroupStr = evalStdString("vdbgroup", now); const GA_PrimitiveGroup* vdbGroup = matchGroup(*parms.mPointGeo, vdbGroupStr); for (hvdb::VdbPrimIterator vdbIt(gdp, vdbGroup); vdbIt; ++vdbIt) { GU_PrimVDB* vdbPrim = *vdbIt; // only process if grid is a PointDataGrid with leaves if (!openvdb::gridConstPtrCast<openvdb::points::PointDataGrid>( vdbPrim->getConstGridPtr())) continue; auto&& pointDataGrid = UTvdbGridCast<openvdb::points::PointDataGrid>(vdbPrim->getConstGrid()); auto leafIter = pointDataGrid.tree().cbeginLeaf(); if (!leafIter) continue; // deep copy the VDB tree if it is not already unique vdbPrim->makeGridUnique(); auto&& outputGrid = UTvdbGridCast<openvdb::points::PointDataGrid>(vdbPrim->getGrid()); switch (parms.mPropagationType) { case PROPAGATION_TYPE_ADVECTION: case PROPAGATION_TYPE_CONSTRAINED_ADVECTION: { VDBPointsAdvection<openvdb::points::PointDataGrid> advection( outputGrid, parms, boss); hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*parms.mVelPrim, advection); break; } case PROPAGATION_TYPE_PROJECTION: break; // not implemented case PROPAGATION_TYPE_UNKNOWN: break; } } } switch (parms.mPropagationType) { case PROPAGATION_TYPE_ADVECTION: case PROPAGATION_TYPE_CONSTRAINED_ADVECTION: { Advection advection(parms, boss); hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*parms.mVelPrim, advection); break; } case PROPAGATION_TYPE_PROJECTION: { Projection projection(parms, boss); hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*parms.mVelPrim, projection); break; } case PROPAGATION_TYPE_UNKNOWN: break; } if (boss.wasInterrupted()) addWarning(SOP_MESSAGE, "processing was interrupted"); boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// bool SOP_OpenVDB_Advect_Points::Cache::evalAdvectionParms( OP_Context& context, AdvectionParms& parms) { const fpreal now = context.getTime(); parms.mPointGeo = inputGeo(0); if (!parms.mPointGeo) { addError(SOP_MESSAGE, "Missing point input"); return false; } UT_String ptGroupStr; evalString(ptGroupStr, "group", 0, now); parms.mPointGroup = parsePointGroups(ptGroupStr, GroupCreator(gdp)); const bool advectVdbPoints = (0 != evalInt("advectvdbpoints", 0, now)); if (advectVdbPoints) { const std::string groups = evalStdString("vdbpointsgroups", now); // Get and parse the vdb points groups openvdb::points::AttributeSet::Descriptor::parseNames( parms.mIncludeGroups, parms.mExcludeGroups, groups); } if (!parms.mPointGroup && ptGroupStr.length() > 0) { addWarning(SOP_MESSAGE, "Point group not found"); return false; } parms.mPropagationType = stringToPropagationType(evalStdString("operation", now)); if (parms.mPropagationType == PROPAGATION_TYPE_UNKNOWN) { addError(SOP_MESSAGE, "Unknown propargation scheme"); return false; } if (parms.mPropagationType == PROPAGATION_TYPE_ADVECTION || parms.mPropagationType == PROPAGATION_TYPE_CONSTRAINED_ADVECTION) { const GU_Detail* velGeo = inputGeo(1); if (!velGeo) { addError(SOP_MESSAGE, "Missing velocity grid input"); return false; } const GA_PrimitiveGroup* velGroup = matchGroup(*velGeo, evalStdString("velgroup", now)); hvdb::VdbPrimCIterator it(velGeo, velGroup); parms.mVelPrim = *it; if (!parms.mVelPrim) { addError(SOP_MESSAGE, "Missing velocity grid"); return false; } if (parms.mVelPrim->getStorageType() != UT_VDB_VEC3F) { addError(SOP_MESSAGE, "Expected velocity grid to be of type Vec3f"); return false; } // Check if the velocity grid uses a staggered representation. parms.mStaggered = parms.mVelPrim->getGrid().getGridClass() == openvdb::GRID_STAGGERED; parms.mTimeStep = static_cast<float>(evalFloat("timestep", 0, now)); parms.mSteps = static_cast<int>(evalInt("steps", 0, now)); // The underlying code will accumulate, so to make it substeps // we need to divide out. parms.mTimeStep /= static_cast<float>(parms.mSteps); parms.mStreamlines = bool(evalInt("outputstreamlines", 0, now)); parms.mIntegrationType = stringToIntegrationType(evalStdString("integration", now)); if (parms.mIntegrationType == INTEGRATION_TYPE_UNKNOWN) { addError(SOP_MESSAGE, "Unknown integration scheme"); return false; } } if (parms.mPropagationType == PROPAGATION_TYPE_PROJECTION || parms.mPropagationType == PROPAGATION_TYPE_CONSTRAINED_ADVECTION) { const GU_Detail* cptGeo = inputGeo(2); if (!cptGeo) { addError(SOP_MESSAGE, "Missing closest point grid input"); return false; } const GA_PrimitiveGroup *cptGroup = matchGroup(*cptGeo, evalStdString("cptgroup", now)); hvdb::VdbPrimCIterator it(cptGeo, cptGroup); parms.mCptPrim = *it; if (!parms.mCptPrim) { addError(SOP_MESSAGE, "Missing closest point grid"); return false; } if (parms.mCptPrim->getStorageType() != UT_VDB_VEC3F) { addError(SOP_MESSAGE, "Expected closest point grid to be of type Vec3f"); return false; } parms.mIterations = static_cast<int>(evalInt("iterations", 0, now)); } return true; }
40,963
C++
34.559028
111
0.626956
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/VRAY_OpenVDB_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file VRAY_OpenVDB_Points.cc /// /// @authors Dan Bailey, Richard Kwok /// /// @brief The Delayed Load Mantra Procedural for OpenVDB Points. #include <UT/UT_Version.h> #include <UT/UT_DSOVersion.h> #include <GU/GU_Detail.h> #include <OP/OP_OperatorTable.h> #include <UT/UT_BoundingBox.h> #include <UT/UT_Ramp.h> #include <VRAY/VRAY_Procedural.h> #include <VRAY/VRAY_ProceduralFactory.h> #include <openvdb/openvdb.h> #include <openvdb/io/File.h> #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointGroup.h> #include <openvdb_houdini/PointUtils.h> #include <algorithm> #include <sstream> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; namespace hvdb = openvdb_houdini; // mantra renders points with a world-space radius of 0.05 by default static const float DEFAULT_PSCALE = 0.05f; class VRAY_OpenVDB_Points : public VRAY_Procedural { public: using GridVecPtr = std::vector<PointDataGrid::Ptr>; VRAY_OpenVDB_Points(); ~VRAY_OpenVDB_Points() override = default; const char* className() const override; int initialize(const UT_BoundingBox*) override; void getBoundingBox(UT_BoundingBox&) override; void render() override; private: UT_BoundingBox mBox; UT_StringHolder mFilename; std::vector<Name> mIncludeGroups; std::vector<Name> mExcludeGroups; UT_StringHolder mAttrStr; GridVecPtr mGridPtrs; float mPreBlur; float mPostBlur; bool mSpeedToColor; float mMaxSpeed; UT_Ramp mFunctionRamp; }; // class VRAY_OpenVDB_Points //////////////////////////////////////// template <typename PointDataTreeT> struct GenerateBBoxOp { using PointDataLeaf = typename PointDataTreeT::LeafNodeType; using LeafRangeT = typename tree::LeafManager<const PointDataTreeT>::LeafRange; GenerateBBoxOp( const math::Transform& transform, const std::vector<Name>& includeGroups, const std::vector<Name>& excludeGroups) : mTransform(transform) , mIncludeGroups(includeGroups) , mExcludeGroups(excludeGroups) { } GenerateBBoxOp(const GenerateBBoxOp& parent, tbb::split) : mTransform(parent.mTransform) , mBbox(parent.mBbox) , mIncludeGroups(parent.mIncludeGroups) , mExcludeGroups(parent.mExcludeGroups) { } void operator()(const LeafRangeT& range) { for (auto leafIter = range.begin(); leafIter; ++leafIter) { const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); size_t pscaleIndex = descriptor.find("pscale"); if (pscaleIndex != AttributeSet::INVALID_POS) { std::string pscaleType = descriptor.type(pscaleIndex).first; if (pscaleType == typeNameAsString<float>()) { expandBBox<float>(*leafIter, pscaleIndex); } else if (pscaleType == typeNameAsString<half>()) { expandBBox<half>(*leafIter, pscaleIndex); } else { throw TypeError("Unsupported pscale type - " + pscaleType); } } else { // use default pscale value expandBBox<float>(*leafIter, pscaleIndex); } } } void join(GenerateBBoxOp& rhs) { mBbox.expand(rhs.mBbox); } template <typename PscaleType> void expandBBox(const PointDataLeaf& leaf, size_t pscaleIndex) { auto positionHandle = points::AttributeHandle<Vec3f>::create(leaf.constAttributeArray("P")); // expandBBox will not pick up a pscale handle unless // the attribute type matches the template type typename AttributeHandle<PscaleType>::Ptr pscaleHandle; if (pscaleIndex != AttributeSet::INVALID_POS) { if (leaf.attributeSet().descriptor().type(pscaleIndex).first == typeNameAsString<PscaleType>()) { pscaleHandle = AttributeHandle<PscaleType>::create(leaf.constAttributeArray(pscaleIndex)); } } // uniform value is in world space bool pscaleIsUniform = true; PscaleType uniformPscale(DEFAULT_PSCALE); if (pscaleHandle) { pscaleIsUniform = pscaleHandle->isUniform(); uniformPscale = pscaleHandle->get(0); } // combine the bounds of every point on this leaf into an index-space bbox if (!mIncludeGroups.empty() || !mExcludeGroups.empty()) { points::MultiGroupFilter filter(mIncludeGroups, mExcludeGroups, leaf.attributeSet()); auto iter = leaf.beginIndexOn(filter); for (; iter; ++iter) { double pscale = double(pscaleIsUniform ? uniformPscale : pscaleHandle->get(*iter)); // pscale needs to be transformed to index space Vec3d radius = mTransform.worldToIndex(Vec3d(pscale)); Vec3d position = iter.getCoord().asVec3d() + positionHandle->get(*iter); mBbox.expand(position - radius); mBbox.expand(position + radius); } } else { auto iter = leaf.beginIndexOn(); for (; iter; ++iter) { double pscale = double(pscaleIsUniform ? uniformPscale : pscaleHandle->get(*iter)); // pscale needs to be transformed to index space Vec3d radius = mTransform.worldToIndex(Vec3d(pscale)); Vec3d position = iter.getCoord().asVec3d() + positionHandle->get(*iter); mBbox.expand(position - radius); mBbox.expand(position + radius); } } } ///////////// const math::Transform& mTransform; BBoxd mBbox; const std::vector<Name>& mIncludeGroups; const std::vector<Name>& mExcludeGroups; }; // GenerateBBoxOp ////////////////////////////////////// template <typename PointDataTreeT> struct PopulateColorFromVelocityOp { using LeafNode = typename PointDataTreeT::LeafNodeType; using IndexOnIter = typename LeafNode::IndexOnIter; using LeafManagerT = typename tree::LeafManager<PointDataTreeT>; using LeafRangeT = typename LeafManagerT::LeafRange; using MultiGroupFilter = points::MultiGroupFilter; PopulateColorFromVelocityOp( const size_t colorIndex, const size_t velocityIndex, const UT_Ramp& ramp, const float maxSpeed, const std::vector<Name>& includeGroups, const std::vector<Name>& excludeGroups) : mColorIndex(colorIndex) , mVelocityIndex(velocityIndex) , mRamp(ramp) , mMaxSpeed(maxSpeed) , mIncludeGroups(includeGroups) , mExcludeGroups(excludeGroups) { } Vec3f getColorFromRamp(const Vec3f& velocity) const{ float proportionalSpeed = (mMaxSpeed == 0.0f ? 0.0f : velocity.length()/mMaxSpeed); if (proportionalSpeed > 1.0f) proportionalSpeed = 1.0f; if (proportionalSpeed < 0.0f) proportionalSpeed = 0.0f; float rampVal[4]; mRamp.rampLookup(proportionalSpeed, rampVal); return Vec3f(rampVal[0], rampVal[1], rampVal[2]); } void operator()(LeafRangeT& range) const{ for (auto leafIter = range.begin(); leafIter; ++leafIter) { auto& leaf = *leafIter; auto colorHandle = points::AttributeWriteHandle<Vec3f>::create(leaf.attributeArray(mColorIndex)); auto velocityHandle = points::AttributeHandle<Vec3f>::create(leaf.constAttributeArray(mVelocityIndex)); const bool uniform = velocityHandle->isUniform(); const Vec3f uniformColor = getColorFromRamp(velocityHandle->get(0)); MultiGroupFilter filter(mIncludeGroups, mExcludeGroups, leaf.attributeSet()); if (filter.state() == points::index::ALL) { for (auto iter = leaf.beginIndexOn(); iter; ++iter) { Vec3f color = uniform ? uniformColor : getColorFromRamp(velocityHandle->get(*iter)); colorHandle->set(*iter, color); } } else { for (auto iter = leaf.beginIndexOn(filter); iter; ++iter) { Vec3f color = uniform ? uniformColor : getColorFromRamp(velocityHandle->get(*iter)); colorHandle->set(*iter, color); } } } } ////////////////////////////////////////////// const size_t mColorIndex; const size_t mVelocityIndex; const UT_Ramp& mRamp; const float mMaxSpeed; const std::vector<Name>& mIncludeGroups; const std::vector<Name>& mExcludeGroups; }; //////////////////////////////////////////// namespace { template <typename PointDataGridT> inline BBoxd getBoundingBox( const std::vector<typename PointDataGridT::Ptr>& gridPtrs, const std::vector<Name>& includeGroups, const std::vector<Name>& excludeGroups) { using PointDataTree = typename PointDataGridT::TreeType; BBoxd worldBounds; for (const auto& grid : gridPtrs) { tree::LeafManager<const PointDataTree> leafManager(grid->tree()); // size and combine the boxes for each leaf in the tree via a reduction GenerateBBoxOp<PointDataTree> generateBbox(grid->transform(), includeGroups, excludeGroups); tbb::parallel_reduce(leafManager.leafRange(), generateBbox); if (generateBbox.mBbox.empty()) continue; // all the bounds must be unioned in world space BBoxd gridBounds = grid->transform().indexToWorld(generateBbox.mBbox); worldBounds.expand(gridBounds); } return worldBounds; } } // namespace static VRAY_ProceduralArg theArgs[] = { VRAY_ProceduralArg("file", "string", ""), VRAY_ProceduralArg("streamdata", "int", "1"), VRAY_ProceduralArg("groupmask", "string", ""), VRAY_ProceduralArg("attrmask", "string", ""), VRAY_ProceduralArg("speedtocolor", "int", "0"), VRAY_ProceduralArg("maxspeed", "real", "1.0"), VRAY_ProceduralArg("ramp", "string", ""), VRAY_ProceduralArg() }; class ProcDef : public VRAY_ProceduralFactory::ProcDefinition { public: ProcDef() : VRAY_ProceduralFactory::ProcDefinition("openvdb_points") { } virtual VRAY_Procedural *create() const { return new VRAY_OpenVDB_Points(); } virtual VRAY_ProceduralArg *arguments() const { return theArgs; } }; void registerProcedural(VRAY_ProceduralFactory *factory) { factory->insert(new ProcDef); } VRAY_OpenVDB_Points::VRAY_OpenVDB_Points() { openvdb::initialize(); } const char * VRAY_OpenVDB_Points::className() const { return "VRAY_OpenVDB_Points"; } int VRAY_OpenVDB_Points::initialize(const UT_BoundingBox *) { struct Local { static GridVecPtr loadGrids(const std::string& filename, const bool stream) { GridVecPtr grids; // save the grids so that we only read the file once try { io::File file(filename); file.open(); for (auto iter=file.beginName(), endIter=file.endName(); iter != endIter; ++iter) { GridBase::Ptr baseGrid = file.readGridMetadata(*iter); if (baseGrid->isType<points::PointDataGrid>()) { auto grid = StaticPtrCast<points::PointDataGrid>(file.readGrid(*iter)); assert(grid); if (stream) { // enable streaming mode to auto-collapse attributes // on read for improved memory efficiency points::setStreamingMode(grid->tree(), /*on=*/true); } grids.push_back(grid); } } file.close(); } catch (const IoError& e) { OPENVDB_LOG_ERROR(e.what() << " (" << filename << ")"); } return grids; } }; import("file", mFilename); int streamData; import("streamdata", &streamData, 1); import("attrmask", mAttrStr); float fps; import("global:fps", &fps, 1); float shutter[2]; import("camera:shutter", shutter, 2); int velocityBlur; import("object:velocityblur", &velocityBlur, 1); mPreBlur = velocityBlur ? -shutter[0]/fps : 0; mPostBlur = velocityBlur ? shutter[1]/fps : 0; int speedToColorInt = 0; import("speedtocolor", &speedToColorInt, 1); mSpeedToColor = bool(speedToColorInt); // if speed-to-color is enabled we need to build a ramp object if (mSpeedToColor) { import("maxspeed", &mMaxSpeed, 1); UT_StringHolder rampStr; import("ramp", rampStr); std::stringstream rampStream(rampStr.toStdString()); std::istream_iterator<float> begin(rampStream); std::istream_iterator<float> end; std::vector<float> rampVals(begin, end); for (size_t n = 4, N = rampVals.size(); n < N; n += 5) { const int basis = static_cast<int>(rampVals[n]); mFunctionRamp.addNode(rampVals[n-4], UT_FRGBA(rampVals[n-3], rampVals[n-2], rampVals[n-1], 1.0f), static_cast<UT_SPLINE_BASIS>(basis)); } } mGridPtrs = Local::loadGrids(mFilename.toStdString(), streamData ? true : false); // extract which groups to include and exclude UT_StringHolder groupStr; import("groupmask", groupStr); AttributeSet::Descriptor::parseNames(mIncludeGroups, mExcludeGroups, groupStr.toStdString()); // get openvdb bounds and convert to houdini bounds BBoxd vdbBox = ::getBoundingBox<PointDataGrid>(mGridPtrs, mIncludeGroups, mExcludeGroups); mBox.setBounds( static_cast<float>(vdbBox.min().x()), static_cast<float>(vdbBox.min().y()), static_cast<float>(vdbBox.min().z()), static_cast<float>(vdbBox.max().x()), static_cast<float>(vdbBox.max().y()), static_cast<float>(vdbBox.max().z())); // if streaming the data, re-open the file now that the bounding box has been computed if (streamData) { mGridPtrs = Local::loadGrids(mFilename.toStdString(), true); } return 1; } void VRAY_OpenVDB_Points::getBoundingBox(UT_BoundingBox &box) { box = mBox; } void VRAY_OpenVDB_Points::render() { using PointDataTree = points::PointDataGrid::TreeType; using AttributeSet = points::AttributeSet; using Descriptor = AttributeSet::Descriptor; /// Allocate geometry and extract the GU_Detail VRAY_ProceduralGeo geo = createGeometry(); GU_Detail* gdp = geo.get(); // extract which attributes to include and exclude std::vector<Name> includeAttributes; std::vector<Name> excludeAttributes; AttributeSet::Descriptor::parseNames( includeAttributes, excludeAttributes, mAttrStr.toStdString()); // if nothing was included or excluded: "all attributes" is implied with an empty vector // if nothing was included but something was explicitly excluded: // add all attributes but then remove the excluded // if something was included: // add only explicitly included attributes and then removed any excluded if (includeAttributes.empty() && !excludeAttributes.empty()) { // add all attributes for (const auto& grid : mGridPtrs) { auto leafIter = grid->tree().cbeginLeaf(); if (!leafIter) continue; const AttributeSet& attributeSet = leafIter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); const Descriptor::NameToPosMap& nameToPosMap = descriptor.map(); for (const auto& namePos : nameToPosMap) { includeAttributes.push_back(namePos.first); } } } // sort, and then remove any duplicates std::sort(includeAttributes.begin(), includeAttributes.end()); std::sort(excludeAttributes.begin(), excludeAttributes.end()); includeAttributes.erase( std::unique(includeAttributes.begin(), includeAttributes.end()), includeAttributes.end()); excludeAttributes.erase( std::unique(excludeAttributes.begin(), excludeAttributes.end()), excludeAttributes.end()); // make a vector (validAttributes) of all elements that are in includeAttributes // but are NOT in excludeAttributes std::vector<Name> validAttributes(includeAttributes.size()); auto pastEndIter = std::set_difference(includeAttributes.begin(), includeAttributes.end(), excludeAttributes.begin(), excludeAttributes.end(), validAttributes.begin()); validAttributes.resize(pastEndIter - validAttributes.begin()); // if any of the grids are going to add a pscale, set the default here if (std::binary_search(validAttributes.begin(), validAttributes.end(), "pscale")) { gdp->addTuple(GA_STORE_REAL32, GA_ATTRIB_POINT, "pscale", 1, GA_Defaults(DEFAULT_PSCALE)); } // map speed to color if requested if (mSpeedToColor) { for (const auto& grid : mGridPtrs) { PointDataTree& tree = grid->tree(); auto leafIter = tree.beginLeaf(); if (!leafIter) continue; size_t velocityIndex = leafIter->attributeSet().find("v"); if (velocityIndex != AttributeSet::INVALID_POS) { const std::string velocityType = leafIter->attributeSet().descriptor().type(velocityIndex).first; // keep existing Cd attribute only if it is a supported type (float or half) size_t colorIndex = leafIter->attributeSet().find("Cd"); std::string colorType = ""; if (colorIndex != AttributeSet::INVALID_POS) { colorType = leafIter->attributeSet().descriptor().type(colorIndex).first; if (colorType != typeNameAsString<Vec3f>() && colorType != typeNameAsString<Vec3H>()) { dropAttribute(tree, "Cd"); colorIndex = AttributeSet::INVALID_POS; } } // create new Cd attribute if one did not previously exist if (colorIndex == AttributeSet::INVALID_POS) { openvdb::points::appendAttribute<Vec3f, FixedPointCodec<false, UnitRange>>( tree, "Cd"); colorIndex = leafIter->attributeSet().find("Cd"); } tree::LeafManager<PointDataTree> leafManager(tree); PopulateColorFromVelocityOp<PointDataTree> populateColor(colorIndex, velocityIndex, mFunctionRamp, mMaxSpeed, mIncludeGroups, mExcludeGroups); tbb::parallel_for(leafManager.leafRange(), populateColor); } } } for (const auto& grid : mGridPtrs) { hvdb::convertPointDataGridToHoudini( *gdp, *grid, validAttributes, mIncludeGroups, mExcludeGroups); } geo.addVelocityBlur(mPreBlur, mPostBlur); // Create a geometry object in mantra VRAY_ProceduralChildPtr obj = createChild(); obj->addGeometry(geo); // Override the renderpoints setting to always enable points only rendering int one = 1; obj->changeSetting("renderpoints", 1, &one); }
20,152
C++
33.273809
100
0.599444
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Noise.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Noise.cc /// /// @author FX R&D OpenVDB team /// /// @brief Applies noise to level sets represented by VDBs. The noise can /// optionally be masked by another level set #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Stencils.h> #include <openvdb/tools/Interpolation.h> // for box sampler #include <UT/UT_PNoise.h> #include <UT/UT_Interrupt.h> #include <sstream> #include <stdexcept> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace cvdb = openvdb; namespace { // anon namespace struct FractalBoltzmannGenerator { FractalBoltzmannGenerator(float freq, float amp, int octaves, float gain, float lacunarity, float roughness, int mode): mOctaves(octaves), mNoiseMode(mode), mFreq(freq), mAmp(amp), mGain(gain), mLacunarity(lacunarity), mRoughness(roughness) {} // produce the noise as float float noise(cvdb::Vec3R point, float freqMult = 1.0f) const { float signal; float result = 0.0f; float curamp = mAmp; float curfreq = mFreq * freqMult; for (int n = 0; n <= mOctaves; n++) { point = (point * curfreq); // convert to float for UT_PNoise float location[3] = { float(point[0]), float(point[1]), float(point[2]) }; // generate noise in the [-1,1] range signal = 2.0f*UT_PNoise::noise3D(location) - 1.0f; if (mNoiseMode > 0) { signal = cvdb::math::Pow(cvdb::math::Abs(signal), mGain); } result += (signal * curamp); curfreq = mLacunarity; curamp *= mRoughness; } if (mNoiseMode == 1) { result = -result; } return result; } private: // member data int mOctaves; int mNoiseMode; float mFreq; float mAmp; float mGain; float mLacunarity; float mRoughness; }; struct NoiseSettings { NoiseSettings() : mMaskMode(0), mOffset(0.0), mThreshold(0.0), mFallOff(0.0), mNOffset(cvdb::Vec3R(0.0, 0.0, 0.0)) {} int mMaskMode; float mOffset, mThreshold, mFallOff; cvdb::Vec3R mNOffset; }; } // anon namespace //////////////////////////////////////// class SOP_OpenVDB_Noise: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Noise(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Noise() override {} static OP_Node* factory(OP_Network*, const char*, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 1); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; private: // Process the given grid and return the output grid. // Can be applied to FloatGrid or DoubleGrid // this contains the majority of the work template<typename GridType> void applyNoise(hvdb::Grid& grid, const FractalBoltzmannGenerator&, const NoiseSettings&, const hvdb::Grid* maskGrid) const; }; // class Cache protected: bool updateParmsFlags() override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Define a string-valued group name pattern parameter and add it to the list. parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); // amplitude parms.add(hutil::ParmFactory(PRM_FLT_J, "amp", "Amplitude") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 10.0) .setTooltip("The amplitude of the noise")); // frequency parms.add(hutil::ParmFactory(PRM_FLT_J, "freq", "Frequency") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 1.0) .setTooltip("The frequency of the noise")); // Octaves parms.add(hutil::ParmFactory(PRM_INT_J, "oct", "Octaves") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_FREE, 10) .setTooltip("The number of octaves for the noise")); // Lacunarity parms.add(hutil::ParmFactory(PRM_FLT_J, "lac", "Lacunarity") .setDefault(PRMtwoDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 10.0) .setTooltip("The lacunarity of the noise")); // Gain parms.add(hutil::ParmFactory(PRM_FLT_J, "gain", "Gain") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 1.0) .setTooltip("The gain of the noise")); // Roughness parms.add(hutil::ParmFactory(PRM_FLT_J, "rough", "Roughness") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 10.0) .setTooltip("The roughness of the noise")); // SurfaceOffset parms.add(hutil::ParmFactory(PRM_FLT_J, "soff", "Surface Offset") .setDefault(PRMzeroDefaults) .setTooltip("An offset from the isosurface of the level set at which to apply the noise")); // Noise Offset parms.add(hutil::ParmFactory(PRM_XYZ_J, "noff", "Noise Offset") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("An offset for the noise in world units")); // Noise Mode parms.add(hutil::ParmFactory(PRM_ORD, "mode", "Noise Mode") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "straight", "Straight", "abs", "Absolute", "invabs", "Inverse Absolute" }) .setTooltip("The noise mode: either Straight, Absolute, or Inverse Absolute")); // Mask { parms.add(hutil::ParmFactory(PRM_HEADING, "maskHeading", "Mask")); // Group parms.add(hutil::ParmFactory(PRM_STRING, "maskGroup", "Mask Group") .setChoiceList(&hutil::PrimGroupMenuInput2) .setDocumentation( "A scalar VDB from the second input to be used as a mask" " (see [specifying volumes|/model/volumes#group])")); // Use mask parms.add(hutil::ParmFactory(PRM_ORD, "mask", "Mask") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "maskless", "No noise if mask < threshold", "maskgreater", "No noise if mask > threshold", "maskgreaternml", "No noise if mask > threshold & normals align", "maskisfreqmult", "Use mask as frequency multiplier" }) .setTooltip("How to interpret the mask") .setDocumentation("\ How to interpret the mask\n\ \n\ No noise if mask < threshold:\n\ Don't add noise to a voxel if the mask value at that voxel\n\ is less than the __Mask Threshold__.\n\ No noise if mask > threshold:\n\ Don't add noise to a voxel if the mask value at that voxel\n\ is greater than the __Mask Threshold__.\n\ No noise if mask > threshold & normals align:\n\ Don't add noise to a voxel if the mask value at that voxel\n\ is greater than the __Mask Threshold__ and the surface normal\n\ of the level set at that voxel aligns with the gradient of the mask.\n\ Use mask as frequency multiplier:\n\ Add noise to every voxel, but multiply the noise frequency by the mask.\n")); // mask threshold parms.add(hutil::ParmFactory(PRM_FLT_J, "thres", "Mask Threshold") .setDefault(PRMzeroDefaults) .setTooltip("The threshold value for mask comparisons")); // Fall off parms.add(hutil::ParmFactory(PRM_FLT_J, "fall", "Falloff") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 10.0) .setTooltip("A falloff value for the threshold")); // } // Register this operator. hvdb::OpenVDBOpFactory("VDB Noise", SOP_OpenVDB_Noise::factory, parms, *table) .setNativeName("") .addInput("VDB grids to noise") .addOptionalInput("Optional VDB grid to use as mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Noise::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Add noise to VDB level sets.\"\"\"\n\ \n\ @overview\n\ \n\ Using a fractal Boltzmann generator, this node adds surface noise\n\ to VDB level set volumes.\n\ An optional mask grid can be provided to control the amount of noise per voxel.\n\ \n\ @related\n\ - [Node:sop/cloudnoise]\n\ - [Node:sop/volumevop]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Noise::factory(OP_Network* net, const char* name, OP_Operator *op) { return new SOP_OpenVDB_Noise(net, name, op); } SOP_OpenVDB_Noise::SOP_OpenVDB_Noise(OP_Network* net, const char* name, OP_Operator* op): SOP_NodeVDB(net, name, op) { UT_PNoise::initNoise(); } //////////////////////////////////////// bool SOP_OpenVDB_Noise::updateParmsFlags() { bool changed = false; const GU_Detail* refGdp = this->getInputLastGeo(1, /*time=*/0.0); const bool hasSecondInput = (refGdp != nullptr); changed |= enableParm("maskGroup", hasSecondInput); changed |= enableParm("mask", hasSecondInput); changed |= enableParm("thres", hasSecondInput); changed |= enableParm("fall", hasSecondInput); return changed; } //////////////////////////////////////// template<typename GridType> void SOP_OpenVDB_Noise::Cache::applyNoise( hvdb::Grid& grid, const FractalBoltzmannGenerator& fbGenerator, const NoiseSettings& settings, const hvdb::Grid* mask) const { // Use second order finite difference. using Gradient = cvdb::math::Gradient<cvdb::math::GenericMap, cvdb::math::CD_2ND>; using CPT = cvdb::math::CPT<cvdb::math::GenericMap, cvdb::math::CD_2ND>; using StencilType = cvdb::math::SecondOrderDenseStencil<GridType>; using TreeType = typename GridType::TreeType; using Vec3Type = cvdb::math::Vec3<typename TreeType::ValueType>; // Down cast the generic pointer to the output grid. GridType& outGrid = UTvdbGridCast<GridType>(grid); const cvdb::math::Transform& xform = grid.transform(); // Create a stencil. StencilType stencil(outGrid); // uses its own grid accessor // scratch variables typename GridType::ValueType result; // result - use mask as frequency multiplier cvdb::Vec3R voxelPt; // voxel coordinates cvdb::Vec3R worldPt; // world coordinates float noise, alpha; // The use of the GenericMap is a performance compromise // because the GenericMap holdds a base class pointer. // This should be optimized by resolving the acutal map type cvdb::math::GenericMap map(grid); if (!mask) { alpha = 1.0f; for (typename GridType::ValueOnIter v = outGrid.beginValueOn(); v; ++v) { stencil.moveTo(v); worldPt = xform.indexToWorld(CPT::result(map, stencil) + settings.mNOffset); noise = fbGenerator.noise(worldPt); v.setValue(*v + alpha * (noise - settings.mOffset)); } return; } // Down cast the generic pointer to the mask grid. const GridType* maskGrid = UTvdbGridCast<GridType>(mask); const cvdb::math::Transform& maskXform = mask->transform(); switch (settings.mMaskMode) { case 0: //No noise if mask < threshold { for (typename GridType::ValueOnIter v = outGrid.beginValueOn(); v; ++v) { cvdb::Coord ijk = v.getCoord(); stencil.moveTo(ijk); // in voxel units worldPt = xform.indexToWorld(ijk); voxelPt = maskXform.worldToIndex(worldPt); cvdb::tools::BoxSampler::sample<TreeType>(maskGrid->tree(), voxelPt, result); // apply threshold if (result < settings.mThreshold) { continue; //next voxel } alpha = static_cast<float>(result >= settings.mThreshold + settings.mFallOff ? 1.0f : (result - settings.mThreshold) / settings.mFallOff); worldPt = xform.indexToWorld(CPT::result(map, stencil) + settings.mNOffset); noise = fbGenerator.noise(worldPt); v.setValue(*v + alpha * (noise - settings.mOffset)); } } break; case 1: //No noise if mask > threshold { for (typename GridType::ValueOnIter v = outGrid.beginValueOn(); v; ++v) { cvdb::Coord ijk = v.getCoord(); stencil.moveTo(ijk); // in voxel units worldPt = xform.indexToWorld(ijk); voxelPt = maskXform.worldToIndex(worldPt); cvdb::tools::BoxSampler::sample<TreeType>(maskGrid->tree(), voxelPt, result); // apply threshold if (result > settings.mThreshold) { continue; //next voxel } alpha = static_cast<float>(result <= settings.mThreshold - settings.mFallOff ? 1.0f : (settings.mThreshold - result) / settings.mFallOff); worldPt = xform.indexToWorld(CPT::result(map, stencil) + settings.mNOffset); noise = fbGenerator.noise(worldPt); v.setValue(*v + alpha * (noise - settings.mOffset)); } } break; case 2: //No noise if mask < threshold & normals align { StencilType maskStencil(*maskGrid); for (typename GridType::ValueOnIter v = outGrid.beginValueOn(); v; ++v) { cvdb::Coord ijk = v.getCoord(); stencil.moveTo(ijk); // in voxel units worldPt = xform.indexToWorld(ijk); voxelPt = maskXform.worldToIndex(worldPt); cvdb::tools::BoxSampler::sample<TreeType>(maskGrid->tree(), voxelPt, result); // for the gradient of the maskGrid cvdb::Coord mask_ijk( static_cast<int>(voxelPt[0]), static_cast<int>(voxelPt[1]), static_cast<int>(voxelPt[2])); maskStencil.moveTo(mask_ijk); // normal alignment Vec3Type grid_grad = Gradient::result(map, stencil); Vec3Type mask_grad = Gradient::result(map, maskStencil); const double c = cvdb::math::Abs(grid_grad.dot(mask_grad)); if (result > settings.mThreshold && c > 0.9) continue;//next voxel alpha = static_cast<float>(result <= settings.mThreshold - settings.mFallOff ? 1.0f : (settings.mThreshold - result) / settings.mFallOff); worldPt = xform.indexToWorld(CPT::result(map, stencil) + settings.mNOffset); noise = fbGenerator.noise(worldPt); v.setValue(*v + alpha * (noise - settings.mOffset)); } } break; case 3: //Use mask as frequency multiplier { alpha = 1.0f; for (typename GridType::ValueOnIter v = outGrid.beginValueOn(); v; ++v) { cvdb::Coord ijk = v.getCoord(); stencil.moveTo(ijk); // in voxel units worldPt = xform.indexToWorld(ijk); voxelPt = maskXform.worldToIndex(worldPt); cvdb::tools::BoxSampler::sample<TreeType>(maskGrid->tree(), voxelPt, result); worldPt = xform.indexToWorld(CPT::result(map, stencil) + settings.mNOffset); // Use result of sample as frequency multiplier. noise = fbGenerator.noise(worldPt, static_cast<float>(result)); v.setValue(*v + alpha * (noise - settings.mOffset)); } } break; default: // should never get here throw std::runtime_error("internal error in mode selection"); }// end switch } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Noise::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Evaluate the FractalBoltzmann noise parameters from UI FractalBoltzmannGenerator fbGenerator( static_cast<float>(evalFloat("freq", 0, time)), static_cast<float>(evalFloat("amp", 0, time)), static_cast<int>(evalInt("oct", 0, time)), static_cast<float>(evalFloat("gain", 0, time)), static_cast<float>(evalFloat("lac", 0, time)), static_cast<float>(evalFloat("rough", 0, time)), static_cast<int>(evalInt("mode", 0, time))); NoiseSettings settings; // evaluate parameter for blending noise settings.mOffset = static_cast<float>(evalFloat("soff", 0, time)); settings.mNOffset = cvdb::Vec3R( evalFloat("noff", 0, time), evalFloat("noff", 1, time), evalFloat("noff", 2, time)); // Mask const openvdb::GridBase* maskGrid = nullptr; if (const GU_Detail* refGdp = inputGeo(1)) { const GA_PrimitiveGroup* maskGroup = matchGroup(*refGdp, evalStdString("maskGroup", time)); hvdb::VdbPrimCIterator gridIter(refGdp, maskGroup); if (gridIter) { settings.mMaskMode = static_cast<int>(evalInt("mask", 0, time)); settings.mThreshold = static_cast<float>(evalFloat("thres", 0, time)); settings.mFallOff = static_cast<float>(evalFloat("fall", 0, time)); maskGrid = &((*gridIter)->getGrid()); ++gridIter; } if (gridIter) { addWarning(SOP_MESSAGE, "Found more than one grid in the mask group; the first grid will be used."); } } // Do the work.. UT_AutoInterrupt progress("OpenVDB LS Noise"); // Get the group of grids to process. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); // For each VDB primitive in the selected group. for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } GU_PrimVDB* vdbPrim = *it; if (vdbPrim->getStorageType() == UT_VDB_FLOAT) { vdbPrim->makeGridUnique(); applyNoise<cvdb::ScalarGrid>(vdbPrim->getGrid(), fbGenerator, settings, maskGrid); } else if (vdbPrim->getStorageType() == UT_VDB_DOUBLE) { vdbPrim->makeGridUnique(); applyNoise<cvdb::DoubleGrid>(vdbPrim->getGrid(), fbGenerator, settings, maskGrid); } else { std::stringstream ss; ss << "VDB primitive " << it.getPrimitiveNameOrIndex() << " was skipped because it is not a scalar grid."; addWarning(SOP_MESSAGE, ss.str().c_str()); continue; } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
19,663
C++
33.80354
99
0.595331
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Points_Group.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Points_Group.cc /// /// @author Dan Bailey /// /// @brief Add and remove point groups. #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointGroup.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/Utils.h> #include <houdini_utils/geometry.h> #include <houdini_utils/ParmFactory.h> #include <algorithm> // for std::find() #include <limits> #include <random> #include <stdexcept> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; using namespace openvdb::math; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { struct GroupParms { // global parms bool mEnable = false; std::string mGroupName = ""; const GA_PrimitiveGroup * mGroup = nullptr; // operation flags bool mOpGroup = false; bool mOpLeaf = false; bool mOpHashI = false; bool mOpHashL = false; bool mOpBBox = false; bool mOpLS = false; // group parms std::vector<std::string> mIncludeGroups; std::vector<std::string> mExcludeGroups; // number parms bool mCountMode = false; bool mHashMode = false; float mPercent = 0.0f; long mCount = 0L; std::string mHashAttribute = ""; size_t mHashAttributeIndex = openvdb::points::AttributeSet::INVALID_POS; // bbox parms openvdb::BBoxd mBBox; // level set parms openvdb::FloatGrid::ConstPtr mLevelSetGrid = FloatGrid::create(0); float mSDFMin = 0.0f; float mSDFMax = 0.0f; // viewport parms bool mEnableViewport = false; bool mAddViewport = false; std::string mViewportGroupName = ""; // drop groups bool mDropAllGroups = false; std::vector<std::string> mDropIncludeGroups; std::vector<std::string> mDropExcludeGroups; }; } // namespace //////////////////////////////////////// class SOP_OpenVDB_Points_Group: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Points_Group(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Points_Group() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { public: OP_ERROR evalGroupParms(OP_Context&, GroupParms&); OP_ERROR evalGridGroupParms(const PointDataGrid&, OP_Context&, GroupParms&); void performGroupFiltering(PointDataGrid&, const GroupParms&); void setViewportMetadata(PointDataGrid&, const GroupParms&); void removeViewportMetadata(PointDataGrid&); protected: OP_ERROR cookVDBSop(OP_Context&) override; }; // class Cache protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; // class SOP_OpenVDB_Points_Group //////////////////////////////////////// static PRM_Default negPointOneDefault(-0.1); static PRM_Default fiveThousandDefault(5000); // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { openvdb::initialize(); if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be loaded.") .setDocumentation( "A subset of the input VDB Points primitives to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroup", "VDB Points Group") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1) .setTooltip( "Create a new VDB points group as a subset of an existing VDB points group(s).")); parms.beginSwitcher("tabMenu1"); parms.addFolder("Create"); // Toggle to enable creation parms.add(hutil::ParmFactory(PRM_TOGGLE, "enablecreate", "Enable") .setDefault(PRMoneDefaults) .setTooltip("Enable creation of the group.")); parms.add(hutil::ParmFactory(PRM_STRING, "groupname", "Group Name") .setDefault(0, ::strdup("group1")) .setTooltip("The name of the internal group to create")); parms.beginSwitcher("tabMenu2"); parms.addFolder("Number"); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enablenumber", "Enable") .setDefault(PRMzeroDefaults) .setTooltip("Enable filtering by number.")); parms.add(hutil::ParmFactory(PRM_ORD, "numbermode", "Mode") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "percentage", "Percentage", "total", "Total" }) .setTooltip("Specify how to filter out a subset of the points inside the VDB Points.")); parms.add(hutil::ParmFactory(PRM_FLT, "pointpercent", "Percent") .setDefault(PRMtenDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_RESTRICTED, 100) .setTooltip("The percentage of points to include in the group")); parms.add(hutil::ParmFactory(PRM_INT, "pointcount", "Count") .setDefault(&fiveThousandDefault) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1000000) .setTooltip("The total number of points to include in the group")); parms.add(hutil::ParmFactory(PRM_TOGGLE_J, "enablepercentattribute", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "percentattribute", "Attribute Seed") .setDefault(0, ::strdup("id")) .setTooltip("The point attribute to use as a seed for percent filtering")); parms.endSwitcher(); parms.beginSwitcher("tabMenu3"); parms.addFolder("Bounding"); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enableboundingbox", "Enable") .setDefault(PRMzeroDefaults) .setTooltip("Enable filtering by bounding box.")); parms.add(hutil::ParmFactory(PRM_ORD, "boundingmode", "Mode") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "boundingbox", "Bounding Box", "boundingobject", "Bounding Object" })); parms.add(hutil::ParmFactory(PRM_STRING, "boundingname", "Name") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("The name of the bounding geometry")); parms.add(hutil::ParmFactory(PRM_XYZ, "size", "Size") .setDefault(PRMoneDefaults) .setVectorSize(3) .setTooltip("The size of the bounding box")); parms.add(hutil::ParmFactory(PRM_XYZ, "center", "Center") .setVectorSize(3) .setTooltip("The center of the bounding box")); parms.endSwitcher(); parms.beginSwitcher("tabMenu4"); parms.addFolder("SDF"); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enablesdf", "Enable") .setDefault(PRMzeroDefaults) .setTooltip("Enable filtering by SDF.")); parms.add(hutil::ParmFactory(PRM_STRING, "sdfname", "Name") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("The name of the SDF")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enablesdfmin", "Enable") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable SDF minimum.")); parms.add(hutil::ParmFactory(PRM_FLT, "sdfmin", "SDF Minimum") .setDefault(&negPointOneDefault) .setRange(PRM_RANGE_UI, -1, PRM_RANGE_UI, 1) .setTooltip("SDF minimum value")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enablesdfmax", "Enable") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable SDF maximum.")); parms.add(hutil::ParmFactory(PRM_FLT, "sdfmax", "SDF Maximum") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_UI, -1, PRM_RANGE_UI, 1) .setTooltip("SDF maximum value")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "sdfinvert", "Invert") .setDefault(PRMzeroDefaults) .setTooltip("Invert SDF minimum and maximum.")); parms.endSwitcher(); parms.addFolder("Delete"); parms.add(hutil::ParmFactory(PRM_STRING, "deletegroups", "Point Groups") .setDefault(0, "") .setHelpText( "A space-delimited list of groups to delete.\n\n" "This will delete the selected groups but will not delete" " the points contained in them.") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1)); parms.addFolder("Viewport"); parms.add(hutil::ParmFactory(PRM_TOGGLE, "enableviewport", "Enable") .setDefault(PRMzeroDefaults) .setTooltip("Toggle viewport group.") .setDocumentation( "Enable the viewport group.\n\n" "This allows one to specify a subset of points to be displayed in the viewport.\n" "This minimizes the data transfer to the viewport without removing the data.\n\n" "NOTE:\n" " Only one group can be tagged as a viewport group.\n")); parms.add(hutil::ParmFactory(PRM_ORD, "viewportoperation", "Operation") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "addviewportgroup", "Add Viewport Group", "removeviewportgroup", "Remove Viewport Group" }) .setTooltip("Specify whether to add or remove the viewport group.")); parms.add(hutil::ParmFactory(PRM_STRING, "viewportgroupname", "Name") .setDefault("chs(\"groupname\")", CH_OLD_EXPRESSION) .setTooltip("Display only this group in the viewport.")); parms.endSwitcher(); hutil::ParmList obsoleteParms; obsoleteParms.add(houdini_utils::ParmFactory(PRM_LABEL, "spacer1", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "enablelevelset", "Enable") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "levelsetname", "Name")); ////////// // Register this operator. hvdb::OpenVDBOpFactory("VDB Points Group", SOP_OpenVDB_Points_Group::factory, parms, *table) .addInput("VDB Points") .addOptionalInput("Optional bounding geometry or level set") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Points_Group::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Manipulate the internal groups of a VDB Points primitive.\"\"\"\n\ \n\ @overview\n\ \n\ This node acts like the [Node:sop/group] node, but for the points inside\n\ a VDB Points primitive.\n\ It can create and manipulate the primitive's internal groups.\n\ Generated groups can be used to selectively unpack a subset of the points\n\ with an [OpenVDB Points Convert node|Node:sop/DW_OpenVDBPointsConvert].\n\ \n\ @related\n\ - [OpenVDB Points Convert|Node:sop/DW_OpenVDBPointsConvert]\n\ - [OpenVDB Points Delete|Node:sop/DW_OpenVDBPointsDelete]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Points_Group::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "enablelevelset", "enablesdf"); resolveRenamedParm(*obsoleteParms, "levelsetname", "sdfname"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_Points_Group::updateParmsFlags() { bool changed = false; const bool creation = evalInt("enablecreate", 0, 0) != 0; const bool number = evalInt("enablenumber", 0, 0) != 0; const bool total = evalInt("numbermode", 0, 0) == 1; const bool percentattribute = evalInt("enablepercentattribute", 0, 0) != 0; const bool bounding = evalInt("enableboundingbox", 0, 0) != 0; const bool boundingobject = evalInt("boundingmode", 0, 0) == 1; const bool viewport = evalInt("enableviewport", 0, 0) != 0; const bool levelset = evalInt("enablesdf", 0, 0); const bool sdfmin = evalInt("enablesdfmin", 0, 0); const bool sdfmax = evalInt("enablesdfmax", 0, 0); const bool viewportadd = evalInt("viewportoperation", 0, 0) == 0; changed |= enableParm("vdbpointsgroup", creation); changed |= enableParm("groupname", creation); changed |= enableParm("enablenumber", creation); changed |= enableParm("numbermode", creation && number); changed |= enableParm("pointpercent", creation && number && !total); changed |= enableParm("pointcount", creation && number && total); changed |= enableParm("enablepercentattribute", creation && number && !total); changed |= enableParm("percentattribute", creation && number && percentattribute && !total); changed |= enableParm("enableboundingbox", creation); changed |= enableParm("boundingmode", creation && bounding); changed |= enableParm("boundingname", creation && bounding && boundingobject); changed |= enableParm("size", creation && bounding && !boundingobject); changed |= enableParm("center", creation && bounding && !boundingobject); changed |= enableParm("viewportoperation", viewport); changed |= enableParm("viewportgroupname", viewport && viewportadd); changed |= enableParm("sdfname", levelset); changed |= enableParm("enablesdfmin", levelset); changed |= enableParm("enablesdfmax", levelset); changed |= enableParm("sdfmin", levelset && sdfmin); changed |= enableParm("sdfmax", levelset && sdfmax); changed |= enableParm("sdfinvert", levelset && sdfmin && sdfmax); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Points_Group::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Points_Group(net, name, op); } SOP_OpenVDB_Points_Group::SOP_OpenVDB_Points_Group(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Group::Cache::cookVDBSop(OP_Context& context) { try { // Evaluate UI parameters GroupParms parms; if (evalGroupParms(context, parms) >= UT_ERROR_ABORT) return error(); UT_AutoInterrupt progress("Processing Points Group"); hvdb::VdbPrimIterator vdbIt(gdp, parms.mGroup); for (; vdbIt; ++vdbIt) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } GU_PrimVDB* vdbPrim = *vdbIt; // only process if grid is a PointDataGrid with leaves if(!openvdb::gridConstPtrCast<PointDataGrid>(vdbPrim->getConstGridPtr())) continue; auto&& pointDataGrid = UTvdbGridCast<PointDataGrid>(vdbPrim->getConstGrid()); auto leafIter = pointDataGrid.tree().cbeginLeaf(); if (!leafIter) continue; // Set viewport metadata if no group being created // (copy grid first to ensure metadata is deep copied) if (!parms.mEnable) { if (parms.mEnableViewport) { auto&& outputGrid = UTvdbGridCast<PointDataGrid>(vdbPrim->getGrid()); if (parms.mAddViewport) { setViewportMetadata(outputGrid, parms); } else { removeViewportMetadata(outputGrid); } } } const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); std::vector<std::string> groupsToDrop; bool hasGroupsToDrop = !descriptor.groupMap().empty(); if (hasGroupsToDrop) { // exclude groups mode if (!parms.mDropExcludeGroups.empty()) { // if any groups are to be excluded, ignore those to be included // and rebuild them for (const auto& it: descriptor.groupMap()) { if (std::find( parms.mDropExcludeGroups.begin(), parms.mDropExcludeGroups.end(), it.first) == parms.mDropExcludeGroups.end()) { groupsToDrop.push_back(it.first); } } } else if (!parms.mDropAllGroups) { // if any groups are to be included, intersect them with groups that exist for (const auto& groupName : parms.mDropIncludeGroups) { if (descriptor.hasGroup(groupName)) { groupsToDrop.push_back(groupName); } } } } if (hasGroupsToDrop) hasGroupsToDrop = parms.mDropAllGroups || !groupsToDrop.empty(); // If we are not creating groups and there are no groups to drop (due to an empty list or because none of // the chosen ones were actually present), we can continue the loop early here if(!parms.mEnable && !hasGroupsToDrop) { continue; } // Evaluate grid-specific UI parameters if (evalGridGroupParms(pointDataGrid, context, parms) >= UT_ERROR_ABORT) return error(); // deep copy the VDB tree if it is not already unique vdbPrim->makeGridUnique(); auto&& outputGrid = UTvdbGridCast<PointDataGrid>(vdbPrim->getGrid()); // filter and create the point group in the grid if (parms.mEnable) { performGroupFiltering(outputGrid, parms); } // drop groups if (parms.mDropAllGroups) { dropGroups(outputGrid.tree()); } else if (!groupsToDrop.empty()) { dropGroups(outputGrid.tree(), groupsToDrop); } // attach group viewport metadata to the grid if (parms.mEnableViewport) { if (parms.mAddViewport) setViewportMetadata(outputGrid, parms); else removeViewportMetadata(outputGrid); } } return error(); } catch (const std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Group::Cache::evalGroupParms( OP_Context& context, GroupParms& parms) { const fpreal time = context.getTime(); // evaluate filter mode const bool number = evalInt("enablenumber", 0, time); const bool countMode = evalInt("numbermode", 0, time) == 1; const bool percentAttribute = evalInt("enablepercentattribute", 0, time); const bool bounding = evalInt("enableboundingbox", 0, time); const bool boundingObject = evalInt("boundingmode", 0, time) == 1; const bool levelSet = evalInt("enablesdf", 0, time); parms.mCountMode = countMode; parms.mHashMode = number && !countMode && percentAttribute; parms.mOpLeaf = number; parms.mOpBBox = bounding; parms.mOpLS = levelSet; // Get the grids to group. parms.mGroup = matchGroup(*gdp, evalStdString("group", time)); hvdb::VdbPrimIterator vdbIt(gdp, parms.mGroup); // Handle no vdbs if (!vdbIt) { addError(SOP_MESSAGE, "No VDBs found."); return error(); } // Get and parse the vdb points groups AttributeSet::Descriptor::parseNames( parms.mIncludeGroups, parms.mExcludeGroups, evalStdString("vdbpointsgroup", time)); if (parms.mIncludeGroups.size() > 0 || parms.mExcludeGroups.size() > 0) { parms.mOpGroup = true; } // reference geometry const GU_Detail* refGdp = inputGeo(1); // group creation parms.mEnable = evalInt("enablecreate", 0, time); std::string groupName = evalStdString("groupname", time); if (groupName.empty()) { addWarning(SOP_MESSAGE, "Cannot create a group with an empty name, changing to _"); groupName = "_"; } else if (!AttributeSet::Descriptor::validName(groupName)) { addError(SOP_MESSAGE, ("Group name contains invalid characters - " + groupName).c_str()); return error(); } parms.mGroupName = groupName; // number if (number) { parms.mPercent = static_cast<float>(evalFloat("pointpercent", 0, time)); parms.mCount = evalInt("pointcount", 0, time); parms.mHashAttribute = evalStdString("percentattribute", time); } // bounds if (bounding) { if (boundingObject) { if (!refGdp) { addError(SOP_MESSAGE, "Missing second input"); return error(); } // retrieve bounding object const GA_PrimitiveGroup* boundsGroup = parsePrimitiveGroups( evalStdString("boundingname", time).c_str(), GroupCreator(refGdp)); // compute bounds of bounding object UT_BoundingBox box; box.initBounds(); if (boundsGroup) { GA_Range range = refGdp->getPrimitiveRange(boundsGroup); refGdp->enlargeBoundingBox(box, range); } else { refGdp->getBBox(&box); } parms.mBBox.min()[0] = box.xmin(); parms.mBBox.min()[1] = box.ymin(); parms.mBBox.min()[2] = box.zmin(); parms.mBBox.max()[0] = box.xmax(); parms.mBBox.max()[1] = box.ymax(); parms.mBBox.max()[2] = box.zmax(); } else { // store bounding box openvdb::BBoxd::ValueType size( evalFloat("size", 0, time), evalFloat("size", 1, time), evalFloat("size", 2, time)); openvdb::BBoxd::ValueType center( evalFloat("center", 0, time), evalFloat("center", 1, time), evalFloat("center", 2, time)); parms.mBBox = openvdb::BBoxd(center - size/2, center + size/2); } } // level set if (levelSet) { if (!refGdp) { addError(SOP_MESSAGE, "Missing second input"); return error(); } // retrieve level set grid const GA_PrimitiveGroup* levelSetGroup = parsePrimitiveGroups( evalStdString("sdfname", time).c_str(), GroupCreator(refGdp)); for (hvdb::VdbPrimCIterator vdbRefIt(refGdp, levelSetGroup); vdbRefIt; ++vdbRefIt) { if (vdbRefIt->getStorageType() == UT_VDB_FLOAT && vdbRefIt->getGrid().getGridClass() == openvdb::GRID_LEVEL_SET) { parms.mLevelSetGrid = gridConstPtrCast<FloatGrid>((*vdbRefIt)->getConstGridPtr()); break; } } if (!parms.mLevelSetGrid) { addError(SOP_MESSAGE, "Second input has no float VDB level set"); return error(); } bool enableSDFMin = evalInt("enablesdfmin", 0, time); bool enableSDFMax = evalInt("enablesdfmax", 0, time); float sdfMin = enableSDFMin ? static_cast<float>(evalFloat("sdfmin", 0, time)) : -std::numeric_limits<float>::max(); float sdfMax = enableSDFMax ? static_cast<float>(evalFloat("sdfmax", 0, time)) : std::numeric_limits<float>::max(); // check level set min and max values if ((enableSDFMin || enableSDFMax) && sdfMin > sdfMax) { addWarning(SOP_MESSAGE, "SDF minimum is greater than SDF maximum," " suggest using the invert toggle instead"); } const float background = parms.mLevelSetGrid->background(); if (enableSDFMin && sdfMin < -background) { addWarning(SOP_MESSAGE, "SDF minimum value is less than the background value of the level set"); } if (enableSDFMax && sdfMax > background) { addWarning(SOP_MESSAGE, "SDF maximum value is greater than the background value of the level set"); } const bool sdfInvert = evalInt("sdfinvert", 0, time); parms.mSDFMin = sdfInvert ? -sdfMin : sdfMin; parms.mSDFMax = sdfInvert ? -sdfMax : sdfMax; } // viewport parms.mEnableViewport = evalInt("enableviewport", 0, time); parms.mAddViewport = evalInt("viewportoperation", 0, time) == 0; std::string viewportGroupName = evalStdString("viewportgroupname", time); if (viewportGroupName == "") { addWarning(SOP_MESSAGE, "Cannot create a viewport group with an empty name, changing to _"); viewportGroupName = "_"; } parms.mViewportGroupName = viewportGroupName; // group deletion AttributeSet::Descriptor::parseNames(parms.mDropIncludeGroups, parms.mDropExcludeGroups, parms.mDropAllGroups, evalStdString("deletegroups", time)); if (parms.mDropAllGroups) { // include groups only apply if not also deleting all groups parms.mDropIncludeGroups.clear(); // if exclude groups is not empty, don't delete all groups if (!parms.mDropExcludeGroups.empty()) { parms.mDropAllGroups = false; } } else { // exclude groups only apply if also deleting all groups parms.mDropExcludeGroups.clear(); } return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Group::Cache::evalGridGroupParms( const PointDataGrid& grid, OP_Context&, GroupParms& parms) { auto leafIter = grid.tree().cbeginLeaf(); if (!leafIter) return error(); const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); // check new group doesn't already exist if (parms.mEnable) { if (descriptor.hasGroup(parms.mGroupName)) { addError(SOP_MESSAGE, ("Cannot create duplicate group - " + parms.mGroupName).c_str()); return error(); } // group if (parms.mOpGroup) { for (const std::string& name : parms.mIncludeGroups) { if (!descriptor.hasGroup(name)) { addError(SOP_MESSAGE, ("Unable to find VDB Points group - " + name).c_str()); return error(); } } for (const std::string& name : parms.mExcludeGroups) { if (!descriptor.hasGroup(name)) { addError(SOP_MESSAGE, ("Unable to find VDB Points group - " + name).c_str()); return error(); } } } // number if (parms.mHashMode) { // retrieve percent attribute type (if it exists) const size_t index = descriptor.find(parms.mHashAttribute); if (index == AttributeSet::INVALID_POS) { addError(SOP_MESSAGE, ("Unable to find attribute - " + parms.mHashAttribute).c_str()); return error(); } parms.mHashAttributeIndex = index; const std::string attributeType = descriptor.valueType(index); if (attributeType == "int32") parms.mOpHashI = true; else if (attributeType == "int64") parms.mOpHashL = true; else { addError(SOP_MESSAGE, ("Unsupported attribute type for percent attribute filtering - " + attributeType).c_str()); return error(); } } } return error(); } //////////////////////////////////////// void SOP_OpenVDB_Points_Group::Cache::performGroupFiltering( PointDataGrid& outputGrid, const GroupParms& parms) { // filter typedefs using HashIFilter = AttributeHashFilter<std::mt19937, int>; using HashLFilter = AttributeHashFilter<std::mt19937_64, long>; using LeafFilter = RandomLeafFilter<PointDataGrid::TreeType, std::mt19937>; using LSFilter = LevelSetFilter<FloatGrid>; // composite typedefs (a combination of the above five filters) // the group filter is always included because it's cheap to execute using GroupHashI = BinaryFilter<MultiGroupFilter, HashIFilter>; using GroupHashL = BinaryFilter<MultiGroupFilter, HashLFilter>; using GroupLeaf = BinaryFilter<MultiGroupFilter, LeafFilter>; using GroupLS = BinaryFilter<MultiGroupFilter, LSFilter>; using GroupBBox = BinaryFilter<MultiGroupFilter, BBoxFilter>; using LSHashI = BinaryFilter<LSFilter, HashIFilter>; using LSHashL = BinaryFilter<LSFilter, HashLFilter>; using LSLeaf = BinaryFilter<LSFilter, LeafFilter>; using GroupBBoxHashI = BinaryFilter<GroupBBox, HashIFilter>; using GroupBBoxHashL = BinaryFilter<GroupBBox, HashLFilter>; using GroupBBoxLS = BinaryFilter<GroupBBox, LSFilter>; using GroupBBoxLeaf = BinaryFilter<GroupBBox, LeafFilter>; using GroupLSHashI = BinaryFilter<GroupLS, HashIFilter>; using GroupLSHashL = BinaryFilter<GroupLS, HashLFilter>; using GroupLSLeaf = BinaryFilter<GroupLS, LeafFilter>; using GroupBBoxLSHashI = BinaryFilter<GroupBBox, LSHashI>; using GroupBBoxLSHashL = BinaryFilter<GroupBBox, LSHashL>; using GroupBBoxLSLeaf = BinaryFilter<GroupBBox, LSLeaf>; // grid data PointDataTree& tree = outputGrid.tree(); if (!tree.beginLeaf()) { return; } openvdb::math::Transform& transform = outputGrid.transform(); const std::string groupName = parms.mGroupName; auto targetPoints = static_cast<int>(parms.mCount); if (parms.mOpLeaf && !parms.mCountMode) { targetPoints = int(math::Round( (parms.mPercent * static_cast<double>(pointCount(tree))) / 100.0)); } const AttributeSet& attributeSet = tree.beginLeaf()->attributeSet(); // build filter data MultiGroupFilter groupFilter(parms.mIncludeGroups, parms.mExcludeGroups, attributeSet); BBoxFilter bboxFilter(transform, parms.mBBox); HashIFilter hashIFilter(parms.mHashAttributeIndex, parms.mPercent); HashLFilter hashLFilter(parms.mHashAttributeIndex, parms.mPercent); LeafFilter leafFilter(tree, targetPoints); LSFilter lsFilter(*parms.mLevelSetGrid, transform, parms.mSDFMin, parms.mSDFMax); // build composite filter data GroupHashI groupHashIFilter(groupFilter, hashIFilter); GroupHashL groupHashLFilter(groupFilter, hashLFilter); GroupLeaf groupLeafFilter(groupFilter, leafFilter); GroupLS groupLSFilter(groupFilter, lsFilter); GroupBBox groupBBoxFilter(groupFilter, bboxFilter); LSHashI lsHashIFilter(lsFilter, hashIFilter); LSHashL lsHashLFilter(lsFilter, hashLFilter); LSLeaf lsLeafFilter(lsFilter, leafFilter); GroupBBoxHashI groupBBoxHashIFilter(groupBBoxFilter, hashIFilter); GroupBBoxHashL groupBBoxHashLFilter(groupBBoxFilter, hashLFilter); GroupBBoxLS groupBBoxLSFilter(groupBBoxFilter, lsFilter); GroupBBoxLeaf groupBBoxLeafFilter(groupBBoxFilter, leafFilter); GroupLSHashI groupLSHashIFilter(groupLSFilter, hashIFilter); GroupLSHashL groupLSHashLFilter(groupLSFilter, hashLFilter); GroupLSLeaf groupLSLeafFilter(groupLSFilter, leafFilter); GroupBBoxLSHashI groupBBoxLSHashIFilter(groupBBoxFilter, lsHashIFilter); GroupBBoxLSHashL groupBBoxLSHashLFilter(groupBBoxFilter, lsHashLFilter); GroupBBoxLSLeaf groupBBoxLSLeafFilter(groupBBoxFilter, lsLeafFilter); // append the group appendGroup(tree, groupName); // perform group filtering const GroupParms& p = parms; if (p.mOpBBox && p.mOpLS && p.mOpHashI) { setGroupByFilter(tree, groupName, groupBBoxLSHashIFilter); } else if (p.mOpBBox && p.mOpLS && p.mOpHashL) { setGroupByFilter(tree, groupName, groupBBoxLSHashLFilter); } else if (p.mOpBBox && p.mOpLS && p.mOpLeaf) { setGroupByFilter(tree, groupName, groupBBoxLSLeafFilter); } else if (p.mOpBBox && p.mOpHashI) { setGroupByFilter(tree, groupName, groupBBoxHashIFilter); } else if (p.mOpBBox && p.mOpHashL) { setGroupByFilter(tree, groupName, groupBBoxHashLFilter); } else if (p.mOpBBox && p.mOpLeaf) { setGroupByFilter(tree, groupName, groupBBoxLeafFilter); } else if (p.mOpBBox && p.mOpLS) { setGroupByFilter(tree, groupName, groupBBoxLSFilter); } else if (p.mOpLS && p.mOpHashI) { setGroupByFilter(tree, groupName, groupLSHashIFilter); } else if (p.mOpLS && p.mOpHashL) { setGroupByFilter(tree, groupName, groupLSHashLFilter); } else if (p.mOpLS && p.mOpLeaf) { setGroupByFilter(tree, groupName, groupLSLeafFilter); } else if (p.mOpBBox) { setGroupByFilter(tree, groupName, groupBBoxFilter); } else if (p.mOpLS) { setGroupByFilter(tree, groupName, groupLSFilter); } else if (p.mOpHashI) { setGroupByFilter(tree, groupName, groupHashIFilter); } else if (p.mOpHashL) { setGroupByFilter(tree, groupName, groupHashLFilter); } else if (p.mOpLeaf) { setGroupByFilter(tree, groupName, groupLeafFilter); } else if (p.mOpGroup) { setGroupByFilter(tree, groupName, groupFilter); } else { setGroup<PointDataTree>(tree, groupName); } } //////////////////////////////////////// void SOP_OpenVDB_Points_Group::Cache::setViewportMetadata( PointDataGrid& outputGrid, const GroupParms& parms) { outputGrid.insertMeta(openvdb_houdini::META_GROUP_VIEWPORT, StringMetadata(parms.mViewportGroupName)); } void SOP_OpenVDB_Points_Group::Cache::removeViewportMetadata( PointDataGrid& outputGrid) { outputGrid.removeMeta(openvdb_houdini::META_GROUP_VIEWPORT); }
34,440
C++
35.215563
117
0.61806
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Sort_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Sort_Points.cc /// /// @author Mihai Alden #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/GU_VDBPointTools.h> #include <openvdb/tools/PointPartitioner.h> #include <GA/GA_AttributeFilter.h> #include <GA/GA_ElementWrangler.h> #include <GA/GA_PageIterator.h> #include <GA/GA_SplittableRange.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <UT/UT_UniquePtr.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <memory> #include <stdexcept> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Local Utility Methods namespace { struct CopyElements { CopyElements(GA_PointWrangler& wrangler, const GA_Offset* offsetArray) : mWrangler(&wrangler), mOffsetArray(offsetArray) { } void operator()(const GA_SplittableRange& range) const { GA_Offset start, end; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { mWrangler->copyAttributeValues(i, mOffsetArray[i]); } } } } GA_PointWrangler * const mWrangler; GA_Offset const * const mOffsetArray; }; // struct CopyElements struct SetOffsets { using PointPartitioner = openvdb::tools::UInt32PointPartitioner; SetOffsets(const GU_Detail& srcGeo, const PointPartitioner& partitioner, GA_Offset* offsetArray) : mSrcGeo(&srcGeo), mPartitioner(&partitioner), mOffsetArray(offsetArray) { } void operator()(const tbb::blocked_range<size_t>& range) const { size_t idx = 0; for (size_t n = 0, N = range.begin(); n != N; ++n) { idx += mPartitioner->indices(n).size(); // increment to start index } for (size_t n = range.begin(), N = range.end(); n != N; ++n) { for (PointPartitioner::IndexIterator it = mPartitioner->indices(n); it; ++it) { mOffsetArray[idx++] = mSrcGeo->pointOffset(*it); } } } GU_Detail const * const mSrcGeo; PointPartitioner const * const mPartitioner; GA_Offset * const mOffsetArray; }; // struct SetOffsets } // unnamed namespace //////////////////////////////////////// // SOP Implementation struct SOP_OpenVDB_Sort_Points: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Sort_Points(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "pointgroup", "Point Group") .setChoiceList(&SOP_Node::pointGroupMenu) .setTooltip("A group of points to rasterize.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "binsize", "Bin Size") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 5) .setTooltip("The size (length of a side) of the cubic bin, in world units.")); hvdb::OpenVDBOpFactory("VDB Sort Points", SOP_OpenVDB_Sort_Points::factory, parms, *table) .setNativeName("") .addInput("points") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_Sort_Points::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Reorder points into spatially-organized bins.\"\"\"\n\ \n\ @overview\n\ \n\ This node reorders Houdini points so that they are sorted into\n\ three-dimensional spatial bins.\n\ By increasing CPU cache locality of point data, sorting can improve the\n\ performance of algorithms such as rasterization that rely on neighbor access.\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Sort_Points::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Sort_Points(net, name, op); } SOP_OpenVDB_Sort_Points::SOP_OpenVDB_Sort_Points(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } OP_ERROR SOP_OpenVDB_Sort_Points::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); const GU_Detail* srcGeo = inputGeo(0); UT_UniquePtr<GA_Offset[]> srcOffsetArray; size_t numPoints = 0; { // partition points and construct ordered offset list const GA_PointGroup* pointGroup = parsePointGroups( evalStdString("pointgroup", time).c_str(), GroupCreator(srcGeo)); const fpreal voxelSize = evalFloat("binsize", 0, time); const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); GU_VDBPointList<openvdb::Vec3s> points(*srcGeo, pointGroup); openvdb::tools::UInt32PointPartitioner partitioner; partitioner.construct(points, *transform, /*voxel order=*/true); numPoints = points.size(); srcOffsetArray.reset(new GA_Offset[numPoints]); tbb::parallel_for(tbb::blocked_range<size_t>(0, partitioner.size()), SetOffsets(*srcGeo, partitioner, srcOffsetArray.get())); } // order point attributes gdp->appendPointBlock(numPoints); gdp->cloneMissingAttributes(*srcGeo, GA_ATTRIB_POINT, GA_AttributeFilter::selectPublic()); GA_PointWrangler ptWrangler(*gdp, *srcGeo, GA_PointWrangler::INCLUDE_P); UTparallelFor(GA_SplittableRange(gdp->getPointRange()), CopyElements(ptWrangler, srcOffsetArray.get())); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
6,179
C++
29.294118
100
0.641528
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Densify.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Densify.cc /// /// @author FX R&D OpenVDB team /// /// @brief SOP to replace active tiles with active voxels in OpenVDB grids #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <stdexcept> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Densify: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Densify(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be densified.") .setDocumentation( "A subset of the input VDBs to be densified" " (see [specifying volumes|/model/volumes#group])")); hvdb::OpenVDBOpFactory("VDB Densify", SOP_OpenVDB_Densify::factory, parms, *table) .setNativeName("") .addInput("VDBs to densify") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Densify::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Densify sparse VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node replaces active\n\ [tiles|http://www.openvdb.org/documentation/doxygen/overview.html#secSparsity]\n\ in VDB [trees|http://www.openvdb.org/documentation/doxygen/overview.html#secTree]\n\ with dense, leaf-level voxels.\n\ This is useful for subsequent processing with nodes like [Node:sop/volumevop]\n\ that operate only on leaf voxels.\n\ \n\ WARNING:\n\ Densifying a sparse VDB can significantly increase its memory footprint.\n\ \n\ @related\n\ - [OpenVDB Fill|Node:sop/DW_OpenVDBFill]\n\ - [OpenVDB Prune|Node:sop/DW_OpenVDBPrune]\n\ - [Node:sop/vdbactivate]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Densify::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Densify(net, name, op); } SOP_OpenVDB_Densify::SOP_OpenVDB_Densify(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { struct DensifyOp { DensifyOp() {} template<typename GridT> void operator()(GridT& grid) const { grid.tree().voxelizeActiveTiles(/*threaded=*/true); } }; } OP_ERROR SOP_OpenVDB_Densify::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Get the group of grids to process. const GA_PrimitiveGroup* group = this->matchGroup(*gdp, evalStdString("group", time)); // Construct a functor to process grids of arbitrary type. const DensifyOp densifyOp; UT_AutoInterrupt progress("Densifying VDBs"); // Process each VDB primitive that belongs to the selected group. for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, densifyOp); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
3,850
C++
25.376712
94
0.648571
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_From_Polygons.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_From_Polygons.cc /// /// @author FX R&D OpenVDB team /// /// @brief Converts a closed mesh of trinagles and/or quads into different VDB volumes. /// The supported volumes are: Signed distance field / level-set, closest primitive grid /// and grids with different mesh attributes (closest UVW, Normal etc.) #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/AttributeTransferUtil.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/util/Util.h> #include <CH/CH_Manager.h> #include <PRM/PRM_Parm.h> #include <PRM/PRM_SharedFunc.h> #include <algorithm> // for std::max() #include <sstream> #include <stdexcept> #include <string> #include <limits> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { enum AttributeClass { POINT_ATTR, VERTEX_ATTR, PRIMITIVE_ATTR }; inline bool evalAttrType(const UT_String& attrStr, UT_String& attrName, int& attrClass) { std::string str = attrStr.toStdString(); const size_t idx = str.find_first_of('.'); if (idx == std::string::npos) return false; attrName = str.substr(idx + 1, str.size() - 1); str = str.substr(0, 2); if (str == "po") attrClass = POINT_ATTR; else if (str == "ve") attrClass = VERTEX_ATTR; else if (str == "pr") attrClass = PRIMITIVE_ATTR; else return false; return true; } inline int lookupAttrInput(const PRM_SpareData* spare) { const char *istring; if (!spare) return 0; istring = spare->getValue("sop_input"); return istring ? atoi(istring) : 0; } inline void sopBuildAttrMenu(void* data, PRM_Name* menuEntries, int themenusize, const PRM_SpareData* spare, const PRM_Parm*) { if (data == nullptr || menuEntries == nullptr || spare == nullptr) return; size_t menuIdx = 0; menuEntries[menuIdx].setToken("point.v"); menuEntries[menuIdx++].setLabel("point.v"); SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); if (sop == nullptr) { // terminate and quit menuEntries[menuIdx].setToken(0); menuEntries[menuIdx].setLabel(0); return; } int inputIndex = lookupAttrInput(spare); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); size_t menuEnd(themenusize - 2); if (gdp) { // point attribute names GA_AttributeDict::iterator iter = gdp->pointAttribs().begin(GA_SCOPE_PUBLIC); if(!iter.atEnd() && menuIdx != menuEnd) { if (menuIdx > 0) { menuEntries[menuIdx].setToken(PRM_Name::mySeparator); menuEntries[menuIdx++].setLabel(PRM_Name::mySeparator); } for (; !iter.atEnd() && menuIdx != menuEnd; ++iter) { std::ostringstream token; token << "point." << (*iter)->getName(); menuEntries[menuIdx].setToken(token.str().c_str()); menuEntries[menuIdx++].setLabel(token.str().c_str()); } } // vertex attribute names iter = gdp->vertexAttribs().begin(GA_SCOPE_PUBLIC); if(!iter.atEnd() && menuIdx != menuEnd) { if (menuIdx > 0) { menuEntries[menuIdx].setToken(PRM_Name::mySeparator); menuEntries[menuIdx++].setLabel(PRM_Name::mySeparator); } for (; !iter.atEnd() && menuIdx != menuEnd; ++iter) { std::ostringstream token; token << "vertex." << (*iter)->getName(); menuEntries[menuIdx].setToken(token.str().c_str()); menuEntries[menuIdx++].setLabel(token.str().c_str()); } } // primitive attribute names iter = gdp->primitiveAttribs().begin(GA_SCOPE_PUBLIC); if(menuIdx != menuEnd) { if (menuIdx > 0) { menuEntries[menuIdx].setToken(PRM_Name::mySeparator); menuEntries[menuIdx++].setLabel(PRM_Name::mySeparator); } for (; !iter.atEnd() && menuIdx != menuEnd; ++iter) { std::ostringstream token; token << "primitive." << (*iter)->getName(); menuEntries[menuIdx].setToken(token.str().c_str()); menuEntries[menuIdx++].setLabel(token.str().c_str()); } // Special case menuEntries[menuIdx].setToken("primitive.primitive_list_index"); menuEntries[menuIdx++].setLabel("primitive.primitive_list_index"); } } // terminator menuEntries[menuIdx].setToken(0); menuEntries[menuIdx].setLabel(0); } const PRM_ChoiceList PrimAttrMenu( PRM_ChoiceListType(PRM_CHOICELIST_REPLACE), sopBuildAttrMenu); } // unnamed namespace //////////////////////////////////////// class SOP_OpenVDB_From_Polygons: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_From_Polygons(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_From_Polygons() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i ) const override { return (i == 1); } int convertUnits(); class Cache: public SOP_VDBCacheOptions { public: float voxelSize() const { return mVoxelSize; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: int constructGenericAtttributeLists( hvdb::AttributeDetailList &pointAttributes, hvdb::AttributeDetailList &vertexAttributes, hvdb::AttributeDetailList &primitiveAttributes, const GU_Detail&, const openvdb::Int32Grid& closestPrimGrid, const float time); template <class ValueType> void addAttributeDetails( hvdb::AttributeDetailList &attributeList, const GA_Attribute *attribute, const GA_AIFTuple *tupleAIF, const int attrTupleSize, const openvdb::Int32Grid& closestPrimGrid, std::string& customName, int vecType = -1); void transferAttributes( hvdb::AttributeDetailList &pointAttributes, hvdb::AttributeDetailList &vertexAttributes, hvdb::AttributeDetailList &primitiveAttributes, const openvdb::Int32Grid&, openvdb::math::Transform::Ptr& transform, const GU_Detail&); float mVoxelSize = 0.1f; }; // class Cache protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// namespace { // Callback to convert from voxel to world space units int convertUnitsCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_From_Polygons* sop = static_cast<SOP_OpenVDB_From_Polygons*>(data); if (sop == nullptr) return 0; return sop->convertUnits(); } } // unnamed namespace //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; ////////// // Output grids // distance field parms.add(hutil::ParmFactory(PRM_TOGGLE, "builddistance", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the level set output.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_STRING, "distancename", "Distance VDB") .setDefault("surface") .setTooltip( "Output a signed distance field VDB with the given name.\n\n" "An SDF stores the distance to the surface in each voxel." " If a voxel is inside the surface, the distance is negative.")); // fog volume parms.add(hutil::ParmFactory(PRM_TOGGLE, "buildfog", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the fog volume output.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_STRING, "fogname", "Fog VDB") .setDefault("density") .setTooltip( "Output a fog volume VDB with the given name.\n\n" "Voxels inside the surface have value one, and voxels outside" " have value zero. Within a narrow band centered on the surface," " voxel values vary linearly from zero to one.\n\n" "Turn on __Fill Interior__ to create a solid VDB" " (from an airtight surface) instead of a narrow band.")); ////////// // Conversion settings parms.add(hutil::ParmFactory(PRM_HEADING, "conversionheading", "Conversion settings")); parms.add(hutil::ParmFactory(PRM_STRING, "group", "Reference VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip( "Give the output VDB the same orientation and voxel size as the selected VDB," " and match the narrow band width if the reference VDB is a level set.") .setDocumentation( "Give the output VDB the same orientation and voxel size as" " the selected VDB (see [specifying volumes|/model/volumes#group])" " and match the narrow band width if the reference VDB is a level set.")); // Voxel size or voxel count menu parms.add(hutil::ParmFactory(PRM_STRING, "sizeorcount", "Voxel") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "worldVoxelSize", "Size in World Units", "countX", "Count Along X Axis", "countY", "Count Along Y Axis", "countZ", "Count Along Z Axis", "countLongest", "Count Along Longest Axis" }) .setDefault("worldVoxelSize") .setTooltip( "How to specify the voxel size: either in world units or as" " a voxel count along one axis")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelsize", "Voxel Size") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 5) .setTooltip( "The desired voxel size in world units\n\n" "Surface features smaller than this will not be represented in the output VDB.")); parms.add(hutil::ParmFactory(PRM_INT_J, "voxelcount", "Voxel Count") .setDefault(100) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 500) .setTooltip( "The desired voxel count along one axis\n\n" "The resulting voxel count might be off by one voxel" " due to roundoff errors during the conversion process.")); // Narrow-band width { parms.add(hutil::ParmFactory(PRM_TOGGLE, "useworldspaceunits", "Use World Space Units for Narrow Band") .setCallbackFunc(&convertUnitsCB) .setTooltip( "If enabled, specify the narrow band width in world units," " otherwise in voxels.")); // voxel space units parms.add(hutil::ParmFactory(PRM_INT_J, "exteriorbandvoxels", "Exterior Band Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "The width of the exterior (distance >= 0) portion of the narrow band\n" "Many level set operations require a minimum of three voxels.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_INT_J, "interiorbandvoxels", "Interior Band Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "The width of the interior (distance < 0) portion of the narrow band\n" "Many level set operations require a minimum of three voxels.") .setDocumentation(nullptr)); // world space units parms.add(hutil::ParmFactory(PRM_FLT_J, "exteriorband", "Exterior Band") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("The width of the exterior (distance >= 0) portion of the narrow band") .setDocumentation( "The width of the exterior (_distance_ => 0) portion of the narrow band")); parms.add(hutil::ParmFactory(PRM_FLT_J, "interiorband", "Interior Band") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("The width of the interior (distance < 0) portion of the narrow band") .setDocumentation( "The width of the interior (_distance_ < 0) portion of the narrow band")); // } // Options parms.add(hutil::ParmFactory(PRM_TOGGLE, "fillinterior", "Fill Interior") .setTooltip( "Extract signed distances for all interior voxels.\n\n" "This operation densifies the interior of the model." " It requires a closed, watertight surface.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "unsigneddist", "Unsigned Distance Field") .setTooltip( "Generate an unsigned distance field.\n" "This operation will work on any surface, whether or not it is closed or watertight.") .setDocumentation( "Generate an unsigned distance field.\n\n" "This operation will work on any surface, whether or not" " it is closed or watertight. It is similar to the Minimum" " function of the [Node:sop/isooffset] node.")); ////////// // Mesh attribute transfer {Point, Vertex & Primitive} parms.add(hutil::ParmFactory(PRM_HEADING, "transferheading", "Attribute Transfer")); hutil::ParmList attrParms; // Attribute name attrParms.add(hutil::ParmFactory(PRM_STRING, "attribute#", "Attribute") .setChoiceList(&PrimAttrMenu) .setSpareData(&SOP_Node::theFirstInput) .setTooltip( "A point, vertex, or primitive attribute from which to create a VDB\n\n" "Supports integer and floating point attributes of arbitrary" " precision and tuple size.")); attrParms.add(hutil::ParmFactory(PRM_STRING, "attributeGridName#", "VDB Name") .setTooltip("The name for this VDB primitive (leave blank to use the attribute's name)")); // Vec type menu { std::vector<std::string> items; for (int i = 0; i < openvdb::NUM_VEC_TYPES ; ++i) { items.push_back(openvdb::GridBase::vecTypeToString(openvdb::VecType(i))); items.push_back(openvdb::GridBase::vecTypeExamples(openvdb::VecType(i))); } attrParms.add(hutil::ParmFactory(PRM_ORD, "vecType#", "Vector Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("How vector values should be interpreted")); } // Add multi parm parms.add(hutil::ParmFactory(PRM_MULTITYPE_LIST, "attrList", "Surface Attributes") .setMultiparms(attrParms) .setDefault(PRMzeroDefaults) .setTooltip( "Generate additional VDB primitives that store the values of" " primitive (face), point, or vertex attributes.") .setDocumentation( "Generate additional VDB primitives that store the values of primitive" " (face), point, or vertex [attributes|/model/attributes].\n\n" "Only voxels in the narrow band around the surface will be set.")); ////////// // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "optionsHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "otherHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "attrHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "isoOffset", "")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "gradientWidth", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "customGradientWidth", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "sdfHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "addSdfGridName", "")); // fix obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "sdfGridName", "")); // fix obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "outputClosestPrimGrid", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "closestPrimGridName", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "transformHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "outputHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "hermiteData", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "hermiteDataGridName", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "matchlevelset", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "distanceField", "") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "distanceFieldGridName", "") .setDefault("surface")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "fogVolume", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "fogVolumeGridName", "") .setDefault("density")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "conversionHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "sizeOrCount", "") .setDefault("worldVoxelSize")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "voxelSize", "") .setDefault(PRMpointOneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "voxelCount", "").setDefault(100)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "worldSpaceUnits", "")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "exteriorBandWidth", "") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "interiorBandWidth", "") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "exteriorBandWidthWS", "") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "interiorBandWidthWS", "") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "fillInterior", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "unsignedDist", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "transferHeading", "")); //obsoleteParms.add(hutil::ParmFactory(PRM_MULTITYPE_LIST, "attrList", "") // .setDefault(PRMzeroDefaults)); ///< @todo crashes in OP_Node::createObsoleteParmList() /// @todo obsoleteAttrParms ////////// // Register this operator. hvdb::OpenVDBOpFactory("VDB from Polygons", SOP_OpenVDB_From_Polygons::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBFromPolygons") #endif .addInput("Polygons to Convert") .addOptionalInput("Optional Reference VDB (for transform matching)") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_From_Polygons::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Convert polygonal surfaces and/or surface attributes into VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node can create signed or unsigned distance fields\n\ and/or density fields (\"fog volumes\") from polygonal surfaces.\n\ \n\ When you create a fog volume you can choose either to fill the band of voxels\n\ on the surface or (if you have an airtight surface) to fill the interior\n\ of the surface (see the __Fill interior__ parameter).\n\ \n\ Since the resulting VDB volumes store only the voxels near the surface,\n\ they can have a much a higher effective resolution than a traditional volume\n\ created with [Node:sop/isooffset].\n\ \n\ You can connect a VDB to the second input to automatically use that VDB's\n\ orientation and voxel size (see the __Reference VDB__ parameter).\n\ \n\ NOTE:\n\ The input geometry must be a quad or triangle mesh.\n\ This node will convert the input surface into such a mesh if necessary.\n\ \n\ @inputs\n\ \n\ Polygonal mesh to convert:\n\ The polygonal surface to convert.\n\ Optional reference VDB:\n\ If connected, give the output VDB the same orientation and voxel size\n\ as a VDB from this input.\n\ \n\ @related\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [Node:sop/isooffset]\n\ - [Node:sop/vdbfrompolygons]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_From_Polygons::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_From_Polygons(net, name, op); } SOP_OpenVDB_From_Polygons::SOP_OpenVDB_From_Polygons(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// int SOP_OpenVDB_From_Polygons::convertUnits() { const bool toWSUnits = static_cast<bool>(evalInt("useworldspaceunits", 0, 0)); float width; float voxSize = 0.1f; // Attempt to extract the voxel size from our cache. if (const auto* cache = dynamic_cast<SOP_OpenVDB_From_Polygons::Cache*>(myNodeVerbCache)) { voxSize = cache->voxelSize(); } if (toWSUnits) { width = static_cast<float>(evalInt("exteriorbandvoxels", 0, 0)); setFloat("exteriorband", 0, 0, width * voxSize); width = static_cast<float>(evalInt("interiorbandvoxels", 0, 0)); setFloat("interiorband", 0, 0, width * voxSize); return 1; } width = static_cast<float>(evalFloat("exteriorband", 0, 0)); int voxelWidth = std::max(static_cast<int>(width / voxSize), 1); setInt("exteriorbandvoxels", 0, 0, voxelWidth); width = static_cast<float>(evalFloat("interiorband", 0, 0)); voxelWidth = std::max(static_cast<int>(width / voxSize), 1); setInt("interiorbandvoxels", 0, 0, voxelWidth); return 1; } //////////////////////////////////////// void SOP_OpenVDB_From_Polygons::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; //resolveRenamedParm(*obsoleteParms, "attrList", "numattrib"); resolveRenamedParm(*obsoleteParms, "distanceField", "builddistance"); resolveRenamedParm(*obsoleteParms, "distanceFieldGridName", "distancename"); resolveRenamedParm(*obsoleteParms, "fogVolume", "buildfog"); resolveRenamedParm(*obsoleteParms, "fogVolumeGridName", "fogname"); resolveRenamedParm(*obsoleteParms, "sizeOrCount", "sizeorcount"); resolveRenamedParm(*obsoleteParms, "voxelSize", "voxelsize"); resolveRenamedParm(*obsoleteParms, "voxelCount", "voxelcount"); resolveRenamedParm(*obsoleteParms, "worldSpaceUnits", "useworldspaceunits"); resolveRenamedParm(*obsoleteParms, "exteriorBandWidth", "exteriorbandvoxels"); resolveRenamedParm(*obsoleteParms, "interiorBandWidth", "interiorbandvoxels"); resolveRenamedParm(*obsoleteParms, "exteriorBandWidthWS", "exteriorband"); resolveRenamedParm(*obsoleteParms, "interiorBandWidthWS", "interiorband"); resolveRenamedParm(*obsoleteParms, "fillInterior", "fillinterior"); resolveRenamedParm(*obsoleteParms, "unsignedDist", "unsigneddist"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// // Enable or disable parameters in the UI. bool SOP_OpenVDB_From_Polygons::updateParmsFlags() { bool changed = false; const fpreal time = 0; // No point using CHgetTime as that is unstable. int refexists = (nInputs() == 2); // Transform changed |= enableParm("group", refexists); // Conversion const bool wsUnits = bool(evalInt("useworldspaceunits", 0, time)); const bool fillInterior = bool(evalInt("fillinterior", 0, time)); const bool unsignedDist = bool(evalInt("unsigneddist", 0, time)); // Voxel size or voxel count menu const bool countMenu = (evalStdString("sizeorcount", time) != "worldVoxelSize"); changed |= setVisibleState("voxelsize", !countMenu); changed |= setVisibleState("voxelcount", countMenu); changed |= enableParm("voxelsize", !countMenu && !refexists); changed |= enableParm("voxelcount", countMenu && !refexists); changed |= enableParm("interiorbandvoxels", !wsUnits && !fillInterior && !unsignedDist); changed |= enableParm("exteriorband", wsUnits && !fillInterior && !unsignedDist); changed |= enableParm("exteriorbandvoxels", !wsUnits); changed |= enableParm("exteriorband", wsUnits); changed |= setVisibleState("interiorbandvoxels", !wsUnits); changed |= setVisibleState("exteriorbandvoxels", !wsUnits); changed |= setVisibleState("interiorband", wsUnits); changed |= setVisibleState("exteriorband", wsUnits); changed |= enableParm("fillinterior", !unsignedDist); // Output changed |= enableParm("distancename", bool(evalInt("builddistance", 0, time))); changed |= enableParm("fogname", bool(evalInt("buildfog", 0, time)) && !unsignedDist); changed |= enableParm("buildfog", !unsignedDist); // enable / diable vector type menu UT_String attrStr, attrName; GA_ROAttributeRef attrRef; int attrClass = POINT_ATTR; const GU_Detail* meshGdp = this->getInputLastGeo(0, time); for (int i = 1, N = static_cast<int>(evalInt("attrList", 0, time)); i <= N; ++i) { bool isVector = true; if (meshGdp) { isVector = false; evalStringInst("attribute#", &i, attrStr, 0, time); if (attrStr.length() != 0 && evalAttrType(attrStr, attrName, attrClass)) { if (attrClass == POINT_ATTR) { attrRef = meshGdp->findPointAttribute(attrName); } else if (attrClass == VERTEX_ATTR) { attrRef = meshGdp->findVertexAttribute(attrName); } else if (attrClass == PRIMITIVE_ATTR) { attrRef = meshGdp->findPrimitiveAttribute(attrName); } if (attrRef.isValid()) { const GA_Attribute *attr = attrRef.getAttribute(); if (attr) { const GA_TypeInfo typeInfo = attr->getTypeInfo(); isVector = (typeInfo == GA_TYPE_HPOINT || typeInfo == GA_TYPE_POINT || typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL); if (!isVector) { const GA_AIFTuple *tupleAIF = attr->getAIFTuple(); if (tupleAIF) isVector = tupleAIF->getTupleSize(attr) == 3; } } } } } changed |= enableParmInst("vectype#", &i, isVector); changed |= setVisibleStateInst("vectype#", &i, isVector); } return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_From_Polygons::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Converting geometry to volume"); ////////// // Validate the input const GU_Detail* inputGdp = inputGeo(0); if (!inputGdp || !inputGdp->getNumPrimitives()) { addWarning(SOP_MESSAGE, "No mesh to convert"); // We still create the grids as later workflow // may be able to handle an empty grid. } // Validate geometry std::string warningStr; auto geoPtr = hvdb::convertGeometry(*inputGdp, warningStr, &boss); if (geoPtr) { inputGdp = geoPtr.get(); if (!warningStr.empty()) addWarning(SOP_MESSAGE, warningStr.c_str()); } ////////// // Evaluate the UI parameters. const bool outputDistanceField = bool(evalInt("builddistance", 0, time)); const bool unsignedDistanceFieldConversion = bool(evalInt("unsigneddist", 0, time)); const bool outputFogVolumeGrid = bool(evalInt("buildfog", 0, time)); const bool outputAttributeGrid = bool(evalInt("attrList", 0, time) > 0); if (!outputDistanceField && !outputFogVolumeGrid && !outputAttributeGrid) { addWarning(SOP_MESSAGE, "No output selected"); return error(); } openvdb::math::Transform::Ptr transform; float inBand = std::numeric_limits<float>::max(), exBand = 0.0; const GU_Detail* refGdp = inputGeo(1); bool secondinput = refGdp != nullptr; if (secondinput) { // Get the first grid's transform const GA_PrimitiveGroup *refGroup = matchGroup(*refGdp, evalStdString("group", time)); hvdb::VdbPrimCIterator gridIter(refGdp, refGroup); if (gridIter) { transform = (*gridIter)->getGrid().transform().copy(); mVoxelSize = static_cast<float>(transform->voxelSize()[0]); ++gridIter; } else { addError(SOP_MESSAGE, "Could not find a reference grid"); return error(); } } else {// derive the voxel size and define output grid's transform UT_String str; evalString(str, "sizeorcount", 0, time); if ( str == "worldVoxelSize" ) { mVoxelSize = static_cast<float>(evalFloat("voxelsize", 0, time)); } else { const float dim = static_cast<float>(evalInt("voxelcount", 0, time)); UT_BoundingBox bbox; inputGdp->getCachedBounds(bbox); const float size = str == "countX" ? bbox.xsize() : str == "countY" ? bbox.ysize() : str == "countZ" ? bbox.ysize() : bbox.sizeMax(); if ( evalInt("useworldspaceunits", 0, time) ) { const float w = static_cast<float>(evalFloat("exteriorband", 0, time)); mVoxelSize = (size + 2.0f*w)/dim; } else { const float w = static_cast<float>(evalInt("exteriorbandvoxels", 0, time)); mVoxelSize = size/std::max(1.0f, dim - 2.0f*w); } } // Create a new transform transform = openvdb::math::Transform::createLinearTransform(mVoxelSize); } if (mVoxelSize < 1e-5) { std::ostringstream ostr; ostr << "The voxel size ("<< mVoxelSize << ") is too small."; addError(SOP_MESSAGE, ostr.str().c_str()); return error(); } // Set the narrow-band parameters { const bool wsUnits = static_cast<bool>(evalInt("useworldspaceunits", 0, time)); if (wsUnits) { exBand = static_cast<float>(evalFloat("exteriorband", 0, time) / mVoxelSize); } else { exBand = static_cast<float>(evalInt("exteriorbandvoxels", 0, time)); } if (!bool(evalInt("fillinterior", 0, time))) { if (wsUnits) { inBand = static_cast<float>(evalFloat("interiorband", 0, time) / mVoxelSize); } else { inBand = static_cast<float>(evalInt("interiorbandvoxels", 0, time)); } } } ////////// // Copy the input mesh and transform to local grid space. std::vector<openvdb::Vec3s> pointList; std::vector<openvdb::Vec4I> primList; if (!boss.wasInterrupted()) { pointList.resize(inputGdp->getNumPoints()); primList.resize(inputGdp->getNumPrimitives()); UTparallelFor(GA_SplittableRange(inputGdp->getPointRange()), hvdb::TransformOp(inputGdp, *transform, pointList)); UTparallelFor(GA_SplittableRange(inputGdp->getPrimitiveRange()), hvdb::PrimCpyOp(inputGdp, primList)); } ////////// // Mesh to volume conversion openvdb::tools::QuadAndTriangleDataAdapter<openvdb::Vec3s, openvdb::Vec4I> mesh(pointList, primList); int conversionFlags = unsignedDistanceFieldConversion ? openvdb::tools::UNSIGNED_DISTANCE_FIELD : 0; openvdb::Int32Grid::Ptr primitiveIndexGrid; if (outputAttributeGrid) { primitiveIndexGrid.reset(new openvdb::Int32Grid(0)); } openvdb::FloatGrid::Ptr grid = openvdb::tools::meshToVolume<openvdb::FloatGrid>( boss, mesh, *transform, exBand, inBand, conversionFlags, primitiveIndexGrid.get()); ////////// // Output // Distance field / level set if (!boss.wasInterrupted() && outputDistanceField) { hvdb::createVdbPrimitive(*gdp, grid, evalStdString("distancename", time).c_str()); } // Fog volume if (!boss.wasInterrupted() && outputFogVolumeGrid && !unsignedDistanceFieldConversion) { // If no level set grid is exported the original level set // grid is modified in place. openvdb::FloatGrid::Ptr outputGrid; if (outputDistanceField) { outputGrid = grid->deepCopy(); } else { outputGrid = grid; } openvdb::tools::sdfToFogVolume(*outputGrid); hvdb::createVdbPrimitive(*gdp, outputGrid, evalStdString("fogname", time).c_str()); } // Transfer mesh attributes if (!boss.wasInterrupted() && outputAttributeGrid) { hvdb::AttributeDetailList pointAttributes; hvdb::AttributeDetailList vertexAttributes; hvdb::AttributeDetailList primitiveAttributes; int closestPrimIndexInstance = constructGenericAtttributeLists(pointAttributes, vertexAttributes, primitiveAttributes, *inputGdp, *primitiveIndexGrid, float(time)); transferAttributes(pointAttributes, vertexAttributes, primitiveAttributes, *primitiveIndexGrid, transform, *inputGdp); // Export the closest prim idx grid. if (!boss.wasInterrupted() && closestPrimIndexInstance > -1) { UT_String gridNameStr; evalStringInst("attributeGridName#", &closestPrimIndexInstance, gridNameStr, 0, time); if (gridNameStr.length() == 0) gridNameStr = "primitive_list_index"; hvdb::createVdbPrimitive( *gdp, primitiveIndexGrid, gridNameStr.toStdString().c_str()); } } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// // Helper method constructs the attribute detail lists int SOP_OpenVDB_From_Polygons::Cache::constructGenericAtttributeLists( hvdb::AttributeDetailList &pointAttributes, hvdb::AttributeDetailList &vertexAttributes, hvdb::AttributeDetailList &primitiveAttributes, const GU_Detail& meshGdp, const openvdb::Int32Grid& closestPrimGrid, const float time) { UT_String attrStr, attrName; GA_ROAttributeRef attrRef; GA_Range range; int attrClass = POINT_ATTR; int closestPrimIndexInstance = -1; // for each selected attribute for (int i = 1, N = static_cast<int>(evalInt("attrList", 0, time)); i <= N; ++i) { evalStringInst("attribute#", &i, attrStr, 0, time); if (attrStr.length() == 0) continue; if (!evalAttrType(attrStr, attrName, attrClass)) { std::ostringstream ostr; ostr << "Skipped attribute with unrecognized class {point/vertex/prim}: "<< attrStr; addWarning(SOP_MESSAGE, ostr.str().c_str()); continue; } hvdb::AttributeDetailList* attributeList = nullptr; if (attrClass == POINT_ATTR) { attrRef = meshGdp.findPointAttribute(attrName); attributeList = &pointAttributes; } else if (attrClass == VERTEX_ATTR) { attrRef = meshGdp.findVertexAttribute(attrName); attributeList = &vertexAttributes; } else if (attrClass == PRIMITIVE_ATTR) { if (attrName == "primitive_list_index") { // The closest prim idx grid is a special case, // the converter has already generated it for us. closestPrimIndexInstance = i; continue; } attrRef = meshGdp.findPrimitiveAttribute(attrName); attributeList = &primitiveAttributes; } if (attrName.length() == 0 || !attrRef.isValid()) { std::ostringstream ostr; ostr << "Skipped unrecognized attribute: "<< attrName; addWarning(SOP_MESSAGE, ostr.str().c_str()); continue; } evalStringInst("attributeGridName#", &i, attrStr, 0, time); std::string customName = attrStr.toStdString(); int vecType = static_cast<int>(evalIntInst("vecType#", &i, 0, time)); const GA_Attribute *attr = attrRef.getAttribute(); if (!attr) { std::ostringstream ostr; ostr << "Skipped unrecognized attribute type for: "<< attrName; addWarning(SOP_MESSAGE, ostr.str().c_str()); continue; } const GA_AIFTuple *tupleAIF = attr->getAIFTuple(); if (!tupleAIF) { std::ostringstream ostr; ostr << "Skipped unrecognized attribute type for: "<< attrName; addWarning(SOP_MESSAGE, ostr.str().c_str()); continue; } const GA_Storage attrStorage = tupleAIF->getStorage(attr); const int attrTupleSize = tupleAIF->getTupleSize(attr); const GA_TypeInfo typeInfo = attr->getTypeInfo(); const bool interpertAsVector = (typeInfo == GA_TYPE_HPOINT || typeInfo == GA_TYPE_POINT || typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL); switch (attrStorage) { case GA_STORE_INT16: case GA_STORE_INT32: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3i>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName, vecType); } else { addAttributeDetails<openvdb::Int32>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName); } break; case GA_STORE_INT64: addAttributeDetails<openvdb::Int64> (*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName); break; case GA_STORE_REAL16: case GA_STORE_REAL32: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3s>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName, vecType); } else { addAttributeDetails<float>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName); } break; case GA_STORE_REAL64: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3d>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName, vecType); } else { addAttributeDetails<double>(*attributeList, attr, tupleAIF, attrTupleSize, closestPrimGrid, customName); } break; default: addWarning(SOP_MESSAGE, "Skipped unrecognized attribute type"); break; } } return closestPrimIndexInstance; } //////////////////////////////////////// template<class ValueType> void SOP_OpenVDB_From_Polygons::Cache::addAttributeDetails( hvdb::AttributeDetailList &attributeList, const GA_Attribute *attribute, const GA_AIFTuple *tupleAIF, const int attrTupleSize, const openvdb::Int32Grid& closestPrimGrid, std::string& customName, int vecType) { // Defines a new type of a tree having the same hierarchy as the incoming // Int32Grid's tree but potentially a different value type. using TreeType = typename openvdb::Int32Grid::TreeType::ValueConverter<ValueType>::Type; using GridType = typename openvdb::Grid<TreeType>; if (vecType != -1) { // Vector grid // Get the attribute's default value. ValueType defValue = hvdb::evalAttrDefault<ValueType>(tupleAIF->getDefaults(attribute), 0); // Construct a new tree that matches the closestPrimGrid's active voxel topology. typename TreeType::Ptr tree( new TreeType(closestPrimGrid.tree(), defValue, openvdb::TopologyCopy())); typename GridType::Ptr grid(GridType::create(tree)); grid->setVectorType(openvdb::VecType(vecType)); attributeList.push_back(hvdb::AttributeDetailBase::Ptr( new hvdb::AttributeDetail<GridType>(grid, attribute, tupleAIF, 0, true))); if (customName.size() > 0) { attributeList[attributeList.size()-1]->name() = customName; } } else { for (int c = 0; c < attrTupleSize; ++c) { // Get the attribute's default value. ValueType defValue = hvdb::evalAttrDefault<ValueType>(tupleAIF->getDefaults(attribute), c); // Construct a new tree that matches the closestPrimGrid's active voxel topology. typename TreeType::Ptr tree( new TreeType(closestPrimGrid.tree(), defValue, openvdb::TopologyCopy())); typename GridType::Ptr grid(GridType::create(tree)); attributeList.push_back(hvdb::AttributeDetailBase::Ptr( new hvdb::AttributeDetail<GridType>(grid, attribute, tupleAIF, c))); if (customName.size() > 0) { std::ostringstream name; name << customName; if(attrTupleSize != 1) name << "_" << c; attributeList[attributeList.size()-1]->name() = name.str(); } } } } //////////////////////////////////////// void SOP_OpenVDB_From_Polygons::Cache::transferAttributes( hvdb::AttributeDetailList &pointAttributes, hvdb::AttributeDetailList &vertexAttributes, hvdb::AttributeDetailList &primitiveAttributes, const openvdb::Int32Grid& closestPrimGrid, openvdb::math::Transform::Ptr& transform, const GU_Detail& meshGdp) { // Threaded attribute transfer. hvdb::MeshAttrTransfer transferOp(pointAttributes, vertexAttributes, primitiveAttributes, closestPrimGrid, *transform, meshGdp); transferOp.runParallel(); // Construct and add VDB primitives to the gdp for (size_t i = 0, N = pointAttributes.size(); i < N; ++i) { hvdb::AttributeDetailBase::Ptr& attrDetail = pointAttributes[i]; attrDetail->grid()->setTransform(transform); hvdb::createVdbPrimitive(*gdp, attrDetail->grid(), attrDetail->name().c_str()); } for (size_t i = 0, N = vertexAttributes.size(); i < N; ++i) { hvdb::AttributeDetailBase::Ptr& attrDetail = vertexAttributes[i]; attrDetail->grid()->setTransform(transform); hvdb::createVdbPrimitive(*gdp, attrDetail->grid(), attrDetail->name().c_str()); } for (size_t i = 0, N = primitiveAttributes.size(); i < N; ++i) { hvdb::AttributeDetailBase::Ptr& attrDetail = primitiveAttributes[i]; attrDetail->grid()->setTransform(transform); hvdb::createVdbPrimitive(*gdp, attrDetail->grid(), attrDetail->name().c_str()); } }
44,385
C++
36.174204
100
0.613518
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Visualize.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Visualize.cc /// /// @author FX R&D OpenVDB team /// /// @brief Visualize VDB grids and their tree topology #include <houdini_utils/ParmFactory.h> #include <houdini_utils/geometry.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/PointIndexGrid.h> #include <openvdb/points/PointDataGrid.h> #ifdef DWA_OPENVDB #include <openvdb_houdini/DW_VDBUtils.h> #endif #include <GA/GA_Handle.h> #include <GA/GA_Types.h> #include <GU/GU_ConvertParms.h> #include <GU/GU_Detail.h> #include <GU/GU_PolyReduce.h> #include <GU/GU_Surfacer.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <UT/UT_VectorTypes.h> // for UT_Vector3i #include <UT/UT_UniquePtr.h> #include <algorithm> #include <stdexcept> #include <string> #include <type_traits> #include <vector> namespace { template <typename T> struct IsGridTypeIntegral : std::conditional_t< std::is_integral<T>::value || std::is_same<T,openvdb::PointIndex32>::value || std::is_same<T,openvdb::PointIndex64>::value || std::is_same<T,openvdb::PointDataIndex32>::value || std::is_same<T,openvdb::PointDataIndex64>::value , std::true_type , std::false_type> { }; template <typename T> struct IsGridTypeArithmetic : std::conditional_t< IsGridTypeIntegral<T>::value || std::is_floating_point<T>::value , std::true_type , std::false_type> { }; } // anonymous namespace namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; // HAVE_SURFACING_PARM is disabled in H12.5 #ifdef SESI_OPENVDB #define HAVE_SURFACING_PARM 0 #else #define HAVE_SURFACING_PARM 1 #endif enum RenderStyle { STYLE_NONE = 0, STYLE_POINTS, STYLE_WIRE_BOX, STYLE_SOLID_BOX }; enum MeshMode { MESH_NONE = 0, MESH_OPENVDB, MESH_HOUDINI }; class SOP_OpenVDB_Visualize: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Visualize(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Visualize() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } static UT_Vector3 colorLevel(int level) { return sColors[std::max(3-level,0)]; } static const UT_Vector3& colorSign(bool negative) { return sColors[negative ? 5 : 4]; } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; private: static const UT_Vector3 sColors[]; }; // Same color scheme as the VDB TOG paper. const UT_Vector3 SOP_OpenVDB_Visualize::sColors[] = { UT_Vector3(0.045f, 0.045f, 0.045f), // 0. Root UT_Vector3(0.0432f, 0.33f, 0.0411023f), // 1. First internal node level UT_Vector3(0.871f, 0.394f, 0.01916f), // 2. Intermediate internal node levels UT_Vector3(0.00608299f, 0.279541f, 0.625f), // 3. Leaf level UT_Vector3(0.523f, 0.0325175f, 0.0325175f), // 4. Value >= ZeroVal (for voxels or tiles) UT_Vector3(0.92f, 0.92f, 0.92f) // 5. Value < ZeroVal (for voxels or tiles) }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group pattern parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be processed.") .setDocumentation( "The VDBs to be visualized (see [specifying volumes|/model/volumes#group])")); #if HAVE_SURFACING_PARM // Surfacing parms.add(hutil::ParmFactory(PRM_HEADING, "surfacing", "Surfacing")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "drawsurface", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); { // Meshing scheme char const * const items[] = { "openvdb", "OpenVDB Mesher", "houdini", "Houdini Surfacer", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "mesher", "Mesher") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("Select a meshing scheme.") .setDocumentation("The meshing scheme to be used to visualize scalar volumes")); } parms.add(hutil::ParmFactory(PRM_FLT_J, "adaptivity", "Adaptivity") .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setDocumentation( "How closely to match the surface\n\n" "Higher adaptivity allows for more variation in polygon size," " so that fewer polygons are used to represent the surface.")); //parms.add(hutil::ParmFactory(PRM_TOGGLE, "computeNormals", "Compute Point Normals")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isoValue", "Isovalue") .setRange(PRM_RANGE_FREE, -2.0, PRM_RANGE_FREE, 2.0) .setDocumentation("The isovalue of the surface to be meshed")); parms.add( hutil::ParmFactory(PRM_RGB_J, "surfaceColor", "Surface Color") .setDefault(std::vector<PRM_Default>(3, PRM_Default(0.84))) // RGB = (0.84, 0.84, 0.84) .setVectorSize(3) .setDocumentation("The color of the surface mesh")); // Tree Topology parms.add(hutil::ParmFactory(PRM_HEADING,"treeTopology", "Tree Topology")); #endif // HAVE_SURFACING_PARM parms.add(hutil::ParmFactory(PRM_TOGGLE, "addcolor", "Color") .setDefault(PRMoneDefaults) .setTooltip("Specify whether to draw in color.") .setDocumentation( "Specify whether to generate geometry with the `Cd` color attribute.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "previewfrustum", "Frustum") .setTooltip( "Specify whether to draw the camera frustums\n" "of VDBs with frustum transforms.") .setDocumentation( "For VDBs with [frustum transforms|http://www.openvdb.org/documentation/" "doxygen/transformsAndMaps.html#sFrustumTransforms]," " generate geometry representing the frustum bounding box.")); #ifdef DWA_OPENVDB parms.add(hutil::ParmFactory(PRM_TOGGLE, "previewroi", "Region of Interest") .setDocumentation( "If enabled, generate geometry representing the region of interest" " (for VDBs with ROI metadata).")); #endif char const * const boxItems[] = { "wirebox", "Wireframe Boxes", "box", "Solid Boxes", nullptr }; char const * const pointAndBoxItems[] = { "points", "Points", "wirebox", "Wireframe Boxes", "box", "Solid Boxes", nullptr }; parms.add(hutil::ParmFactory(PRM_TOGGLE, "drawleafnodes", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "leafstyle", "Leaf Nodes") .setChoiceListItems(PRM_CHOICELIST_SINGLE, pointAndBoxItems) .setDefault("wirebox") .setDocumentation( "Specify whether to render the leaf nodes of VDB trees" " as wireframe boxes, as solid boxes, or as a single point" " in the middle of each node.\n\n" "If __Color__ is enabled, leaf nodes will be blue.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "drawinternalnodes", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "internalstyle", "Internal Nodes") .setChoiceListItems(PRM_CHOICELIST_SINGLE, boxItems) .setDefault("wirebox") .setDocumentation( "Specify whether to render the internal nodes of VDB trees" " as wireframe boxes or as solid boxes.\n\n" "If __Color__ is enabled, the lowest-level internal nodes will be green" " and higher-level internal nodes will be orange.")); // Active tiles parms.add(hutil::ParmFactory(PRM_TOGGLE, "drawtiles", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "tilestyle", "Active Tiles") .setChoiceListItems(PRM_CHOICELIST_SINGLE, pointAndBoxItems) .setDefault("wirebox") .setDocumentation( "Specify whether to render the active tiles of VDB trees" " as wireframe boxes, as solid boxes, or as a single point" " in the middle of each tile.\n\n" "If __Color__ is enabled, negative-valued tiles will be white" " and nonnegative tiles will be red.")); // Active voxels parms.add(hutil::ParmFactory(PRM_TOGGLE, "drawvoxels", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_ORD, "voxelstyle", "Active Voxels") .setChoiceListItems(PRM_CHOICELIST_SINGLE, pointAndBoxItems) .setDefault("points") .setDocumentation( "Specify whether to render the active voxels of VDB trees" " as wireframe boxes, as solid boxes, or as a single point" " in the middle of each voxel.\n\n" "If __Color__ is enabled, negative-valued voxels will be white" " and nonnegative voxels will be red.\n\n" "WARNING:\n" " Rendering active voxels as boxes can generate large amounts of geometry.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "ignorestaggered", "Ignore Staggered Vectors") .setTooltip("Draw staggered vectors as if they were collocated.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "addindexcoord", "Points with Index Coordinates") .setTooltip("Add a voxel/tile index coordinate attribute to points.") .setDocumentation( "For voxels, tiles, and leaf nodes rendered as points, add an attribute to" " the points that gives the coordinates of the points in the VDB's [index space|" "http://www.openvdb.org/documentation/doxygen/overview.html#secSpaceAndTrans].")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "addvalue", "Points with Values") .setTooltip("Add a voxel/tile value attribute to points.") .setDocumentation( "For voxels and tiles rendered as points, add an attribute to the points" " that gives the voxel and tile values.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usegridname", "Name Point Attributes After VDBs") .setTooltip( "If enabled, use the VDB name as the attribute name when\n" "displaying points with values.\n" "If disabled or if a VDB has no name, use either \"vdb_int\",\n" "\"vdb_float\" or \"vdb_vec3f\" as the attribute name.") .setDocumentation( "If enabled, name the attribute added by __Points with Values__ after" " the VDB primitive. If disabled or if a VDB has no name, name the point" " attribute according to its type: `vdb_int`, `vdb_float`, `vdb_vec3f`, etc.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.beginSwitcher("tabMenu"); obsoleteParms.addFolder("Tree Topology"); obsoleteParms.endSwitcher(); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING,"renderOptions", "Render Options")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "extractMesh", "Extract Mesh")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "computeNormals", "Compute Point Normals")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "reverse", "Reverse Faces")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "optionsHeading", "Options")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "Other", "Other")); { char const * const items[] = { "none", "Disabled", "opevdb", "OpenVDB Mesher", // note the misspelling "houdini", "Houdini Surfacer", nullptr }; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "meshing", "Meshing") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } { char const * const items[] = { "none", "Disabled", "leaf", "Leaf Nodes and Active Tiles", "nonconst", "Leaf and Internal Nodes", nullptr }; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "nodes", "Tree Nodes") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } { char const * const items[] = { "none", "Disabled", "points", "Points", "pvalue", "Points with Values", "wirebox", "Wireframe Box", "box", "Solid Box", nullptr }; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "tiles", "Active Constant Tiles") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "voxels", "Active Voxels") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "previewFrustum", "Frustum")); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "leafmode", "Leaf Nodes") .setChoiceListItems(PRM_CHOICELIST_SINGLE, boxItems)); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "internalmode", "Internal Nodes") .setChoiceListItems(PRM_CHOICELIST_SINGLE, boxItems)); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "tilemode", "Active Tiles") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, pointAndBoxItems)); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "voxelmode", "Active Voxels") .setChoiceListItems(PRM_CHOICELIST_SINGLE, pointAndBoxItems)); #ifndef DWA_OPENVDB // We probably need this to share hip files. obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "previewroi", "")); #endif // Register this operator. hvdb::OpenVDBOpFactory("VDB Visualize Tree", SOP_OpenVDB_Visualize::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBVisualize") #endif .setObsoleteParms(obsoleteParms) .addInput("Input with VDBs to visualize") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_Visualize::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Generate geometry to visualize the internal\n\ [tree structure|http://www.openvdb.org/documentation/doxygen/overview.html#secTree]\n\ of a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node can be a useful troubleshooting tool.\n\ Among other things, it allows one to evaluate the\n\ [sparseness|http://www.openvdb.org/documentation/doxygen/overview.html#secSparsity]\n\ of VDB volumes as well as to examine their extents and the values of individual voxels.\n\ \n\ @related\n\ - [Node:sop/vdbvisualizetree]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Visualize::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Visualize(net, name, op); } SOP_OpenVDB_Visualize::SOP_OpenVDB_Visualize(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Visualize::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; // The "extractMesh" toggle switched Houdini surfacing on or off. PRM_Parm* parm = obsoleteParms->getParmPtr("extractMesh"); if (parm && !parm->isFactoryDefault()) { const bool extractMesh = obsoleteParms->evalInt("extractMesh", 0, time); setInt("drawsurface", 0, time, extractMesh); if (extractMesh) setString(UT_String("houdini"), CH_STRING_LITERAL, "mesher", 0, time); } // The "meshing" menu included a "Disabled" option, which is now handled with a toggle. parm = obsoleteParms->getParmPtr("meshing"); if (parm && !parm->isFactoryDefault()) { // 0: disabled, 1: OpenVDB mesher, 2: Houdini surfacer const int meshing = obsoleteParms->evalInt("meshing", 0, time); setInt("drawsurface", 0, time, meshing > 0); if (meshing) { setString(UT_String(meshing == 2 ? "houdini" : "openvdb"), CH_STRING_LITERAL, "mesher", 0, time); } } // The "nodes", "tiles" and "voxels" menus all included "Disabled" options, // which are now handled with toggles. The old scheme also had two conflicting // ways to enable display of tiles, which the following attempts to reconcile. const UT_String pointStr("points"), boxStr("box"), wireStr("wirebox"); parm = obsoleteParms->getParmPtr("nodes"); if (parm && !parm->isFactoryDefault()) { // 0: disabled, 1: leaf nodes and active tiles, 2: leaf and internal nodes const int mode = obsoleteParms->evalInt("nodes", 0, time); // Enable leaf nodes if the old mode was not "Disabled". setInt("drawleafnodes", 0, time, mode > 0); // Set the leaf style to wire box, but only if leaf nodes are displayed. if (mode > 0) setString(wireStr, CH_STRING_LITERAL, "leafstyle", 0, time); // Enable internal nodes if the old mode was "Leaf and Internal Nodes". setInt("drawinternalnodes", 0, time, mode == 2); // Set the internal node style to wire box, but only if internal nodes are displayed. if (mode == 2) { setString(wireStr, CH_STRING_LITERAL, "internalstyle", 0, time); } // Disable tiles if the old mode was not "Leaf Nodes and Active Tiles". setInt("drawtiles", 0, time, mode == 1); if (mode == 1) { // Display tiles as wire boxes if the old mode was "Leaf Nodes and Active Tiles". // (This setting took precedence over the tile mode, below.) setString(wireStr, CH_STRING_LITERAL, "tilestyle", 0, time); } } parm = obsoleteParms->getParmPtr("tiles"); if (parm && !parm->isFactoryDefault()) { // 0: disabled, 1: points, 2: points with values, 3: wire boxes, 4: solid boxes const int mode = obsoleteParms->evalInt("tiles", 0, time); if (mode > 0) setInt("drawtiles", 0, time, true); switch (mode) { case 1: setString(pointStr, CH_STRING_LITERAL, "tilestyle", 0, time); setInt("addvalue", 0, time, false); break; case 2: setString(pointStr, CH_STRING_LITERAL, "tilestyle", 0, time); setInt("addvalue", 0, time, true); break; case 3: setString(wireStr, CH_STRING_LITERAL, "tilestyle", 0, time); break; case 4: setString(boxStr, CH_STRING_LITERAL, "tilestyle", 0, time); break; } } parm = obsoleteParms->getParmPtr("voxels"); if (parm && !parm->isFactoryDefault()) { // 0: disabled, 1: points, 2: points with values, 3: wire boxes, 4: solid boxes const int mode = obsoleteParms->evalInt("voxels", 0, time); setInt("drawvoxels", 0, time, mode > 0); switch (mode) { case 1: setString(pointStr, CH_STRING_LITERAL, "voxelstyle", 0, time); setInt("addvalue", 0, time, false); break; case 2: setString(pointStr, CH_STRING_LITERAL, "voxelstyle", 0, time); setInt("addvalue", 0, time, true); break; case 3: setString(wireStr, CH_STRING_LITERAL, "voxelstyle", 0, time); break; case 4: setString(boxStr, CH_STRING_LITERAL, "voxelstyle", 0, time); break; } } for (const auto* name: {"leaf", "internal"}) { const auto oldName = std::string(name) + "mode"; const auto newName = std::string(name) + "style"; parm = obsoleteParms->getParmPtr(oldName.c_str()); if (parm && !parm->isFactoryDefault()) { const int mode = obsoleteParms->evalInt(oldName.c_str(), 0, time); setString(mode == 0 ? wireStr : boxStr, CH_STRING_LITERAL, newName.c_str(), 0, time); } } for (const auto* name: {"tile", "voxel"}) { const auto oldName = std::string(name) + "mode"; const auto newName = std::string(name) + "style"; parm = obsoleteParms->getParmPtr(oldName.c_str()); if (parm && !parm->isFactoryDefault()) { switch (obsoleteParms->evalInt(oldName.c_str(), 0, time)) { case 0: setString(pointStr, CH_STRING_LITERAL, newName.c_str(), 0, time); break; case 1: setString(wireStr, CH_STRING_LITERAL, newName.c_str(), 0, time); break; case 2: setString(boxStr, CH_STRING_LITERAL, newName.c_str(), 0, time); break; } } } resolveRenamedParm(*obsoleteParms, "previewFrustum", "previewfrustum"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Update UI parm display bool SOP_OpenVDB_Visualize::updateParmsFlags() { bool changed = false; const fpreal time = 0.0; #if HAVE_SURFACING_PARM const bool extractMesh = bool(evalInt("drawsurface", 0, time)); changed |= enableParm("mesher", extractMesh); //changed += enableParm("computeNormals", extractMesh); changed |= enableParm("adaptivity", extractMesh); changed |= enableParm("surfaceColor", extractMesh); changed |= enableParm("isoValue", extractMesh); #endif const std::string leafMode = evalStdString("leafstyle", time), tileMode = evalStdString("tilestyle", time), voxelMode = evalStdString("voxelstyle", time); const bool drawLeafNodes = bool(evalInt("drawleafnodes", 0, time)), drawVoxels = bool(evalInt("drawvoxels", 0, time)), drawTiles = bool(evalInt("drawtiles", 0, time)), drawPoints = (drawTiles && tileMode == "points") || (drawVoxels && voxelMode == "points"); changed |= enableParm("leafstyle", drawLeafNodes); changed |= enableParm("internalstyle", bool(evalInt("drawinternalnodes", 0, time))); changed |= enableParm("tilestyle", drawTiles); changed |= enableParm("voxelstyle", drawVoxels); changed |= enableParm("ignorestaggered", drawVoxels); changed |= enableParm("addvalue", drawPoints); changed |= enableParm("addindexcoord", drawPoints || (drawLeafNodes && leafMode == "points")); changed |= enableParm("usegridname", drawPoints); return changed; } //////////////////////////////////////// template<typename OpType> inline RenderStyle evalRenderStyle(OpType& op, const char* toggleName, const char* modeName, fpreal time) { RenderStyle style = STYLE_NONE; if (op.evalInt(toggleName, 0, time)) { const std::string mode = op.evalStdString(modeName, time); if (mode == "points") { style = STYLE_POINTS; } else if (mode == "wirebox") { style = STYLE_WIRE_BOX; } else if (mode == "box") { style = STYLE_SOLID_BOX; } } return style; } //////////////////////////////////////// inline void createBox(GU_Detail& geo, const openvdb::math::Transform& xform, const openvdb::CoordBBox& bbox, const UT_Vector3* color = nullptr, bool solid = false) { struct Local { static inline UT_Vector3 Vec3dToUTV3(const openvdb::Vec3d& v) { return UT_Vector3(float(v.x()), float(v.y()), float(v.z())); } }; UT_Vector3 corners[8]; #if 1 // Nodes are rendered as cell-centered (0.5 voxel dilated) AABBox in world space const openvdb::Vec3d min(bbox.min().x()-0.5, bbox.min().y()-0.5, bbox.min().z()-0.5); const openvdb::Vec3d max(bbox.max().x()+0.5, bbox.max().y()+0.5, bbox.max().z()+0.5); #else // Render as node-centered (used for debugging) const openvdb::Vec3d min(bbox.min().x(), bbox.min().y(), bbox.min().z()); const openvdb::Vec3d max(bbox.max().x()+1.0, bbox.max().y()+1.0, bbox.max().z()+1.0); #endif openvdb::Vec3d ptn = xform.indexToWorld(min); corners[0] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(min.x(), min.y(), max.z()); ptn = xform.indexToWorld(ptn); corners[1] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(max.x(), min.y(), max.z()); ptn = xform.indexToWorld(ptn); corners[2] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(max.x(), min.y(), min.z()); ptn = xform.indexToWorld(ptn); corners[3] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(min.x(), max.y(), min.z()); ptn = xform.indexToWorld(ptn); corners[4] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(min.x(), max.y(), max.z()); ptn = xform.indexToWorld(ptn); corners[5] = Local::Vec3dToUTV3(ptn); ptn = xform.indexToWorld(max); corners[6] = Local::Vec3dToUTV3(ptn); ptn = openvdb::Vec3d(max.x(), max.y(), min.z()); ptn = xform.indexToWorld(ptn); corners[7] = Local::Vec3dToUTV3(ptn); hutil::createBox(geo, corners, color, solid); } //////////////////////////////////////// struct TreeParms { RenderStyle internalStyle = STYLE_NONE; RenderStyle tileStyle = STYLE_NONE; RenderStyle leafStyle = STYLE_NONE; RenderStyle voxelStyle = STYLE_NONE; bool addColor = true; bool ignoreStaggeredVectors = false; bool addValue = false; bool addIndexCoord = false; bool useGridName = false; }; class TreeVisualizer { public: TreeVisualizer(GU_Detail&, const TreeParms&, hvdb::Interrupter* = nullptr); template<typename GridType> void operator()(const GridType&); private: /// @param pos position in index coordinates GA_Offset createPoint(const openvdb::Vec3d& pos); GA_Offset createPoint(const openvdb::CoordBBox&, const UT_Vector3& color); template<typename ValType> typename std::enable_if<IsGridTypeIntegral<ValType>::value>::type addPoint(const openvdb::CoordBBox&, const UT_Vector3& color, ValType s, bool); template<typename ValType> typename std::enable_if<std::is_floating_point<ValType>::value>::type addPoint(const openvdb::CoordBBox&, const UT_Vector3& color, ValType s, bool); template<typename ValType> typename std::enable_if<!IsGridTypeArithmetic<ValType>::value>::type addPoint(const openvdb::CoordBBox&, const UT_Vector3& color, ValType v, bool staggered); void addPoint(const openvdb::CoordBBox&, const UT_Vector3& color, bool staggered); void addBox(const openvdb::CoordBBox&, const UT_Vector3& color, bool solid); bool wasInterrupted(int percent = -1) const { return mInterrupter && mInterrupter->wasInterrupted(percent); } TreeParms mParms; GU_Detail* mGeo; hvdb::Interrupter* mInterrupter; const openvdb::math::Transform* mXform; GA_RWHandleF mFloatHandle; GA_RWHandleI mInt32Handle; GA_RWHandleV3 mVec3fHandle; GA_RWHandleV3 mCdHandle; GA_RWHandleT<UT_Vector3i> mIndexCoordHandle; }; //////////////////////////////////////// TreeVisualizer::TreeVisualizer(GU_Detail& geo, const TreeParms& parms, hvdb::Interrupter* interrupter) : mParms(parms) , mGeo(&geo) , mInterrupter(interrupter) , mXform(nullptr) { } template<typename GridType> void TreeVisualizer::operator()(const GridType& grid) { using TreeType = typename GridType::TreeType; mXform = &grid.transform(); const bool staggered = !mParms.ignoreStaggeredVectors && (grid.getGridClass() == openvdb::GRID_STAGGERED); //{ // Create point attributes. if (mParms.addColor) { mCdHandle.bind(mGeo->findDiffuseAttribute(GA_ATTRIB_POINT)); if (!mCdHandle.isValid()) { mCdHandle.bind(mGeo->addDiffuseAttribute(GA_ATTRIB_POINT)); } } if (mParms.addIndexCoord && ((mParms.tileStyle == STYLE_POINTS) || (mParms.voxelStyle == STYLE_POINTS) || (mParms.leafStyle == STYLE_POINTS))) { const UT_String attrName = "vdb_ijk"; GA_RWAttributeRef attribHandle = mGeo->findIntTuple(GA_ATTRIB_POINT, attrName, 3); if (!attribHandle.isValid()) { attribHandle = mGeo->addIntTuple(GA_ATTRIB_POINT, attrName, 3, GA_Defaults(0)); } mIndexCoordHandle = attribHandle.getAttribute(); UT_String varName = attrName; varName.toUpper(); mGeo->addVariableName(attrName, varName); } if (mParms.addValue && ((mParms.tileStyle == STYLE_POINTS) || (mParms.voxelStyle == STYLE_POINTS))) { const std::string valueType = grid.valueType(); UT_String attrName; if (mParms.useGridName) { attrName = grid.getName(); attrName.forceValidVariableName(); } if (valueType == openvdb::typeNameAsString<float>() || valueType == openvdb::typeNameAsString<double>()) { if (!attrName.isstring()) attrName = "vdb_float"; UT_String varName = attrName; varName.toUpper(); GA_RWAttributeRef attribHandle = mGeo->findFloatTuple(GA_ATTRIB_POINT, attrName, 1); if (!attribHandle.isValid()) { attribHandle = mGeo->addFloatTuple( GA_ATTRIB_POINT, attrName, 1, GA_Defaults(0)); } mFloatHandle = attribHandle.getAttribute(); mGeo->addVariableName(attrName, varName); } else if (valueType == openvdb::typeNameAsString<int32_t>() || valueType == openvdb::typeNameAsString<int64_t>() || valueType == openvdb::typeNameAsString<bool>()) { if (!attrName.isstring()) attrName = "vdb_int"; UT_String varName = attrName; varName.toUpper(); GA_RWAttributeRef attribHandle = mGeo->findIntTuple(GA_ATTRIB_POINT, attrName, 1); if (!attribHandle.isValid()) { attribHandle = mGeo->addIntTuple( GA_ATTRIB_POINT, attrName, 1, GA_Defaults(0)); } mInt32Handle = attribHandle.getAttribute(); mGeo->addVariableName(attrName, varName); } else if (valueType == openvdb::typeNameAsString<openvdb::Vec3s>() || valueType == openvdb::typeNameAsString<openvdb::Vec3d>()) { if (!attrName.isstring()) attrName = "vdb_vec3f"; UT_String varName = attrName; varName.toUpper(); GA_RWAttributeRef attribHandle = mGeo->findFloatTuple(GA_ATTRIB_POINT, attrName, 3); if (!attribHandle.isValid()) { attribHandle = mGeo->addFloatTuple( GA_ATTRIB_POINT, attrName, 3, GA_Defaults(0)); } mVec3fHandle = attribHandle.getAttribute(); mGeo->addVariableName(attrName, varName); } else { throw std::runtime_error( "value attributes are not supported for values of type " + valueType); } } //} // Render nodes. if (mParms.internalStyle || mParms.leafStyle) { openvdb::CoordBBox bbox; for (typename TreeType::NodeCIter iter(grid.tree()); iter; ++iter) { if (iter.getDepth() == 0) continue; // don't draw the root node const bool isLeaf = (iter.getLevel() == 0); if (isLeaf && !mParms.leafStyle) continue; if (!isLeaf && !mParms.internalStyle) continue; const bool solid = (isLeaf ? mParms.leafStyle == STYLE_SOLID_BOX : mParms.internalStyle == STYLE_SOLID_BOX); const auto color = SOP_OpenVDB_Visualize::colorLevel(iter.getLevel()); iter.getBoundingBox(bbox); if (isLeaf && mParms.leafStyle == STYLE_POINTS) { addPoint(bbox, color, staggered); } else { addBox(bbox, color, solid); } } } if (!mParms.tileStyle && !mParms.voxelStyle) return; // Render tiles and voxels. openvdb::CoordBBox bbox; for (auto iter = grid.cbeginValueOn(); iter; ++iter) { if (wasInterrupted()) break; const int style = iter.isVoxelValue() ? mParms.voxelStyle : mParms.tileStyle; if (style == STYLE_NONE) continue; const bool negative = openvdb::math::isNegative(iter.getValue()); const UT_Vector3& color = SOP_OpenVDB_Visualize::colorSign(negative); iter.getBoundingBox(bbox); if (style == STYLE_POINTS) { if (mParms.addValue) { addPoint(bbox, color, iter.getValue(), staggered); } else { addPoint(bbox, color, staggered); } } else { addBox(bbox, color, style == STYLE_SOLID_BOX); } } } inline GA_Offset TreeVisualizer::createPoint(const openvdb::Vec3d& pos) { openvdb::Vec3d wpos = mXform->indexToWorld(pos); GA_Offset offset = mGeo->appendPointOffset(); mGeo->setPos3(offset, wpos[0], wpos[1], wpos[2]); if (mIndexCoordHandle.isValid()) { // Attach the (integer) index coordinates of the voxel at the given pos. openvdb::Coord idxPos = openvdb::Coord::floor(pos); mIndexCoordHandle.set(offset, UT_Vector3i(idxPos[0], idxPos[1], idxPos[2])); } return offset; } inline GA_Offset TreeVisualizer::createPoint(const openvdb::CoordBBox& bbox, const UT_Vector3& color) { openvdb::Vec3d pos = openvdb::Vec3d(0.5*(bbox.min().x()+bbox.max().x()), 0.5*(bbox.min().y()+bbox.max().y()), 0.5*(bbox.min().z()+bbox.max().z())); GA_Offset offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, color); return offset; } template<typename ValType> typename std::enable_if<IsGridTypeIntegral<ValType>::value>::type TreeVisualizer::addPoint(const openvdb::CoordBBox& bbox, const UT_Vector3& color, ValType s, bool) { mInt32Handle.set(createPoint(bbox, color), int(s)); } template<typename ValType> typename std::enable_if<std::is_floating_point<ValType>::value>::type TreeVisualizer::addPoint(const openvdb::CoordBBox& bbox, const UT_Vector3& color, ValType s, bool) { mFloatHandle.set(createPoint(bbox, color), float(s)); } template<typename ValType> typename std::enable_if<!IsGridTypeArithmetic<ValType>::value>::type TreeVisualizer::addPoint(const openvdb::CoordBBox& bbox, const UT_Vector3& color, ValType v, bool staggered) { if (!staggered) { mVec3fHandle.set(createPoint(bbox, color), UT_Vector3(float(v[0]), float(v[1]), float(v[2]))); } else { openvdb::Vec3d pos = openvdb::Vec3d(0.5*(bbox.min().x()+bbox.max().x()), 0.5*(bbox.min().y()+bbox.max().y()), 0.5*(bbox.min().z()+bbox.max().z())); pos[0] -= 0.5; // -x GA_Offset offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, UT_Vector3(1.0, 0.0, 0.0)); // r mVec3fHandle.set(offset, UT_Vector3(float(v[0]), 0.0, 0.0)); pos[0] += 0.5; pos[1] -= 0.5; // -y offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, UT_Vector3(0.0, 1.0, 0.0)); // g mVec3fHandle.set(offset, UT_Vector3(0.0, float(v[1]), 0.0)); pos[1] += 0.5; pos[2] -= 0.5; // -z offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, UT_Vector3(0.0, 0.0, 1.0)); // b mVec3fHandle.set(offset, UT_Vector3(0.0, 0.0, float(v[2]))); } } void TreeVisualizer::addPoint(const openvdb::CoordBBox& bbox, const UT_Vector3& color, bool staggered) { if (!staggered) { createPoint(bbox, color); } else { openvdb::Vec3d pos = openvdb::Vec3d(0.5*(bbox.min().x()+bbox.max().x()), 0.5*(bbox.min().y()+bbox.max().y()), 0.5*(bbox.min().z()+bbox.max().z())); pos[0] -= 0.5; // -x GA_Offset offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, color); pos[0] += 0.5; pos[1] -= 0.5; // -y offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, color); pos[1] += 0.5; pos[2] -= 0.5; // -z offset = createPoint(pos); if (mCdHandle.isValid()) mCdHandle.set(offset, color); } } void TreeVisualizer::addBox(const openvdb::CoordBBox& bbox, const UT_Vector3& color, bool solid) { createBox(*mGeo, *mXform, bbox, mParms.addColor ? &color : nullptr, solid); } //////////////////////////////////////// #if HAVE_SURFACING_PARM class GridSurfacer { public: GridSurfacer(GU_Detail& geo, float iso = 0.0, float adaptivityThreshold = 0.0, bool generateNormals = false, hvdb::Interrupter* interrupter = nullptr); template<typename GridType> void operator()(const GridType&); private: bool wasInterrupted(int percent = -1) const { return mInterrupter && mInterrupter->wasInterrupted(percent); } GU_Detail* mGeo; const float mIso, mAdaptivityThreshold; const bool mGenerateNormals; hvdb::Interrupter* mInterrupter; }; GridSurfacer::GridSurfacer(GU_Detail& geo, float iso, float adaptivityThreshold, bool generateNormals, hvdb::Interrupter* interrupter) : mGeo(&geo) , mIso(iso) , mAdaptivityThreshold(adaptivityThreshold) , mGenerateNormals(generateNormals) , mInterrupter(interrupter) { } template<typename GridType> void GridSurfacer::operator()(const GridType& grid) { using TreeType = typename GridType::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; openvdb::CoordBBox bbox; // Gets min & max and checks if the grid is empty if (grid.tree().evalLeafBoundingBox(bbox)) { openvdb::Coord dim(bbox.max() - bbox.min()); GU_Detail tmpGeo; GU_Surfacer surfacer(tmpGeo, UT_Vector3(float(bbox.min().x()), float(bbox.min().y()), float(bbox.min().z())), UT_Vector3(float(dim[0]), float(dim[1]), float(dim[2])), dim[0], dim[1], dim[2], mGenerateNormals); typename GridType::ConstAccessor accessor = grid.getConstAccessor(); openvdb::Coord xyz; fpreal density[8]; // for each leaf.. for (typename TreeType::LeafCIter iter = grid.tree().cbeginLeaf(); iter; iter.next()) { if (wasInterrupted()) break; bool isLess = false, isMore = false; // for each active voxel.. typename LeafNodeType::ValueOnCIter it = iter.getLeaf()->cbeginValueOn(); for ( ; it; ++it) { xyz = it.getCoord(); // Sample values at each corner of the voxel for (unsigned int d = 0; d < 8; ++d) { openvdb::Coord valueCoord( xyz.x() + (d & 1), xyz.y() + ((d & 2) >> 1), xyz.z() + ((d & 4) >> 2)); // Houdini uses the inverse sign convention for level sets! density[d] = mIso - float(accessor.getValue(valueCoord)); density[d] <= 0.0f ? isLess = true : isMore = true; } // If there is a crossing, surface this voxel if (isLess && isMore) { surfacer.addCell( xyz.x() - bbox.min().x(), xyz.y() - bbox.min().y(), xyz.z() - bbox.min().z(), density, 0); } } // end active voxel traversal } // end leaf traversal if (wasInterrupted()) return; if (mAdaptivityThreshold > 1e-6) { GU_PolyReduceParms parms; parms.percentage = static_cast<float>(100.0 * (1.0 - std::min(mAdaptivityThreshold, 0.99f))); parms.usepercent = 1; tmpGeo.polyReduce(parms); } // world space transform for (GA_Iterator it(tmpGeo.getPointRange()); !it.atEnd(); it.advance()) { GA_Offset ptOffset = it.getOffset(); UT_Vector3 pos = tmpGeo.getPos3(ptOffset); openvdb::Vec3d vPos(pos.x(), pos.y(), pos.z()); openvdb::Vec3d wPos = grid.indexToWorld(vPos); tmpGeo.setPos3(ptOffset, UT_Vector3( static_cast<float>(wPos.x()), static_cast<float>(wPos.y()), static_cast<float>(wPos.z()))); } mGeo->merge(tmpGeo); } } #endif // HAVE_SURFACING_PARM //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Visualize::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Visualizing VDBs"); const GU_Detail* refGdp = inputGeo(0); if (refGdp == nullptr) return error(); // Get the group of grids to visualize. const GA_PrimitiveGroup* group = matchGroup(*refGdp, evalStdString("group", time)); // Evaluate the UI parameters. MeshMode meshing = MESH_NONE; #if HAVE_SURFACING_PARM if (evalInt("drawsurface", 0, time)) { std::string s = evalStdString("mesher", time); meshing = (s == "houdini") ? MESH_HOUDINI : MESH_OPENVDB; } const double adaptivity = evalFloat("adaptivity", 0, time); const double iso = double(evalFloat("isoValue", 0, time)); #endif TreeParms treeParms; treeParms.internalStyle = evalRenderStyle(*this, "drawinternalnodes", "internalstyle", time); treeParms.tileStyle = evalRenderStyle(*this, "drawtiles", "tilestyle", time); treeParms.leafStyle = evalRenderStyle(*this, "drawleafnodes", "leafstyle", time); treeParms.voxelStyle = evalRenderStyle(*this, "drawvoxels", "voxelstyle", time); treeParms.addColor = bool(evalInt("addcolor", 0, time)); treeParms.addValue = bool(evalInt("addvalue", 0, time)); treeParms.addIndexCoord = bool(evalInt("addindexcoord", 0, time)); treeParms.useGridName = bool(evalInt("usegridname", 0, time)); treeParms.ignoreStaggeredVectors = bool(evalInt("ignorestaggered", 0, time)); const bool drawTree = (treeParms.internalStyle || treeParms.tileStyle || treeParms.leafStyle || treeParms.voxelStyle); const bool showFrustum = bool(evalInt("previewfrustum", 0, time)); #ifdef DWA_OPENVDB const bool showROI = bool(evalInt("previewroi", 0, time)); #else const bool showROI = false; #endif #if HAVE_SURFACING_PARM if (meshing != MESH_NONE) { fpreal values[3] = { evalFloat("surfaceColor", 0, time), evalFloat("surfaceColor", 1, time), evalFloat("surfaceColor", 2, time)}; GA_Defaults color; color.set(values, 3); gdp->addFloatTuple(GA_ATTRIB_POINT, "Cd", 3, color); } // mesh using OpenVDB mesher if (meshing == MESH_OPENVDB) { GU_ConvertParms parms; parms.setToType(GEO_PrimTypeCompat::GEOPRIMPOLY); parms.myOffset = static_cast<float>(iso); parms.preserveGroups = false; UT_UniquePtr<GA_PrimitiveGroup> groupDeleter; if (!group) { parms.primGroup = nullptr; } else { // parms.primGroup might be modified, so make a copy. parms.primGroup = new GA_PrimitiveGroup(*refGdp); groupDeleter.reset(parms.primGroup); parms.primGroup->copyMembership(*group); } GU_PrimVDB::convertVDBs(*gdp, *refGdp, parms, adaptivity, /*keep_original*/true); } #endif // HAVE_SURFACING_PARM if (!boss.wasInterrupted() && (meshing == MESH_HOUDINI || drawTree || showFrustum || showROI)) { // for each VDB primitive... for (hvdb::VdbPrimCIterator it(refGdp, group); it; ++it) { if (boss.wasInterrupted()) break; const GU_PrimVDB *vdb = *it; #if HAVE_SURFACING_PARM // mesh using houdini surfacer if (meshing == MESH_HOUDINI) { GridSurfacer surfacer(*gdp, static_cast<float>(iso), static_cast<float>(adaptivity), false, &boss); hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, surfacer); } #endif // draw tree topology if (drawTree) { TreeVisualizer draw(*gdp, treeParms, &boss); hvdb::GEOvdbApply<hvdb::AllGridTypes>(*vdb, draw); } if (showFrustum) { UT_Vector3 box_color(0.6f, 0.6f, 0.6f); UT_Vector3 tick_color(0.0f, 0.0f, 0.0f); hvdb::drawFrustum(*gdp, vdb->getGrid().transform(), &box_color, &tick_color, /*shaded*/true); } #ifdef DWA_OPENVDB if (showROI) { const openvdb::GridBase& grid = vdb->getConstGrid(); openvdb::Vec3IMetadata::ConstPtr metaMin = grid.getMetadata<openvdb::Vec3IMetadata>( openvdb::Name(openvdb_houdini::METADATA_ROI_MIN)); openvdb::Vec3IMetadata::ConstPtr metaMax = grid.getMetadata<openvdb::Vec3IMetadata>( openvdb::Name(openvdb_houdini::METADATA_ROI_MAX)); if (metaMin && metaMax) { const UT_Vector3 roiColor(1.0, 0.0, 0.0); openvdb::CoordBBox roi( openvdb::Coord(metaMin->value()), openvdb::Coord(metaMax->value())); createBox(*gdp, grid.transform(), roi, &roiColor); } } #endif } } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
46,858
C++
35.551482
98
0.599898
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_NodeVDB.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_NodeVDB.cc /// @author FX R&D OpenVDB team #include "SOP_NodeVDB.h" #include <houdini_utils/geometry.h> #include <openvdb/points/PointDataGrid.h> #include "PointUtils.h" #include "Utils.h" #include "GEO_PrimVDB.h" #include "GU_PrimVDB.h" #include <GU/GU_Detail.h> #include <GU/GU_PrimPoly.h> #include <OP/OP_NodeInfoParms.h> #include <PRM/PRM_Parm.h> #include <PRM/PRM_Type.h> #include <SOP/SOP_Cache.h> // for stealable #include <SYS/SYS_Version.h> #include <UT/UT_InfoTree.h> #include <UT/UT_SharedPtr.h> #include <tbb/mutex.h> #include <algorithm> #include <cctype> // std::tolower #include <iostream> #include <map> #include <memory> #include <mutex> // std::call_once #include <sstream> #include <stdexcept> /// Enables custom UT_InfoTree data from SOP_NodeVDB::fillInfoTreeNodeSpecific() /// which is used to populate the mako templates in Houdini 16 and greater. /// The templates are used to provide MMB information on Houdini primitives and /// are installed as part of the Houdini toolkit $HH/config/NodeInfoTemplates. /// This code has since been absorbed by SideFX, but we continue to keep /// it around to demonstrate how to extend the templates in Houdini. Note /// that the current implementation is a close duplicate of the data populated /// by Houdini, so this will clash with native Houdini names. The templates /// may also change in future Houdini versions, so do not expect this to /// produce valid results out the box. /// /// For users wishing to customize the .mako files, you can use python to /// inspect the current mako structure. /// /// @code /// infoTree = hou.node('/obj/geo1/vdbfrompolygons1').infoTree() /// sopInfo = infoTree.branches()['SOP Info'] /// sparseInfo = sopInfo.branches()['Sparse Volumes'] /// @endcode /// /// These mako branches are the paths that are populated by UT_InfoTree. The /// mako files responsible for producing VDB specific data are geometry.mako, /// called by sop.mako. /// //#define OPENVDB_CUSTOM_MAKO namespace { const std::string& getOpHidePolicy() { static std::string sOpHidePolicy; static std::once_flag once; std::call_once(once, []() { const char* opHidePolicy = std::getenv("OPENVDB_OPHIDE_POLICY"); #ifdef OPENVDB_OPHIDE_POLICY if (opHidePolicy == nullptr) { opHidePolicy = OPENVDB_PREPROC_STRINGIFY(OPENVDB_OPHIDE_POLICY); } #endif if (opHidePolicy != nullptr) { std::string opHidePolicyStr(opHidePolicy); // to lower-case std::transform(opHidePolicyStr.begin(), opHidePolicyStr.end(), opHidePolicyStr.begin(), [](unsigned char c) { return std::tolower(c); }); sOpHidePolicy = opHidePolicy; } else { sOpHidePolicy = ""; } }); return sOpHidePolicy; } } // anonymous namespace namespace openvdb_houdini { namespace node_info_text { using Mutex = tbb::mutex; using Lock = Mutex::scoped_lock; // map of function callbacks to grid types using ApplyGridSpecificInfoTextMap = std::map<openvdb::Name, ApplyGridSpecificInfoText>; struct LockedInfoTextRegistry { LockedInfoTextRegistry() {} ~LockedInfoTextRegistry() {} Mutex mMutex; ApplyGridSpecificInfoTextMap mApplyGridSpecificInfoTextMap; }; // Declare this at file scope to ensure thread-safe initialization static Mutex theInitInfoTextRegistryMutex; // Global function for accessing the regsitry static LockedInfoTextRegistry* getInfoTextRegistry() { Lock lock(theInitInfoTextRegistryMutex); static LockedInfoTextRegistry *registry = nullptr; if (registry == nullptr) { #if defined(__ICC) __pragma(warning(disable:1711)) // disable ICC "assignment to static variable" warnings #endif registry = new LockedInfoTextRegistry(); #if defined(__ICC) __pragma(warning(default:1711)) #endif } return registry; } void registerGridSpecificInfoText(const std::string&, ApplyGridSpecificInfoText); ApplyGridSpecificInfoText getGridSpecificInfoText(const std::string&); void registerGridSpecificInfoText(const std::string& gridType, ApplyGridSpecificInfoText callback) { LockedInfoTextRegistry *registry = getInfoTextRegistry(); Lock lock(registry->mMutex); if(registry->mApplyGridSpecificInfoTextMap.find(gridType) != registry->mApplyGridSpecificInfoTextMap.end()) return; registry->mApplyGridSpecificInfoTextMap[gridType] = callback; } /// @brief Return a pointer to a grid information function, or @c nullptr /// if no specific function has been registered for the given grid type. /// @note The defaultNodeSpecificInfoText() method is always returned prior to Houdini 14. ApplyGridSpecificInfoText getGridSpecificInfoText(const std::string& gridType) { LockedInfoTextRegistry *registry = getInfoTextRegistry(); Lock lock(registry->mMutex); const ApplyGridSpecificInfoTextMap::const_iterator iter = registry->mApplyGridSpecificInfoTextMap.find(gridType); if (iter == registry->mApplyGridSpecificInfoTextMap.end() || iter->second == nullptr) { return nullptr; // Native prim info is sufficient } return iter->second; } } // namespace node_info_text //////////////////////////////////////// SOP_NodeVDB::SOP_NodeVDB(OP_Network* net, const char* name, OP_Operator* op): SOP_Node(net, name, op) { #ifndef SESI_OPENVDB // Initialize the OpenVDB library openvdb::initialize(); // Forward OpenVDB log messages to the UT_ErrorManager (for all SOPs). startLogForwarding(SOP_OPTYPE_ID); #endif // Register grid-specific info text for Point Data Grids node_info_text::registerGridSpecificInfoText<openvdb::points::PointDataGrid>( &pointDataGridSpecificInfoText); // Set the flag to draw guide geometry mySopFlags.setNeedGuide1(true); } //////////////////////////////////////// const GA_PrimitiveGroup* SOP_NodeVDB::matchGroup(GU_Detail& aGdp, const std::string& pattern) { /// @internal Presumably, when a group name pattern matches multiple groups, /// a new group must be created that is the union of the matching groups, /// and therefore the detail must be non-const. Since inputGeo() returns /// a const detail, we can't match groups in input details; however, /// we usually copy input 0 to the output detail, so we can in effect /// match groups from input 0 by matching them in the output instead. const GA_PrimitiveGroup* group = nullptr; if (!pattern.empty()) { // If a pattern was provided, try to match it. group = parsePrimitiveGroups(pattern.c_str(), GroupCreator(&aGdp, false)); if (!group) { // Report an error if the pattern didn't match. throw std::runtime_error(("Invalid group (" + pattern + ")").c_str()); } } return group; } const GA_PrimitiveGroup* SOP_NodeVDB::matchGroup(const GU_Detail& aGdp, const std::string& pattern) { const GA_PrimitiveGroup* group = nullptr; if (!pattern.empty()) { // If a pattern was provided, try to match it. group = parsePrimitiveGroups(pattern.c_str(), GroupCreator(&aGdp)); if (!group) { // Report an error if the pattern didn't match. throw std::runtime_error(("Invalid group (" + pattern + ")").c_str()); } } return group; } //////////////////////////////////////// void SOP_NodeVDB::fillInfoTreeNodeSpecific(UT_InfoTree& tree, const OP_NodeInfoTreeParms& parms) { SOP_Node::fillInfoTreeNodeSpecific(tree, parms); // Add the OpenVDB library version number to this node's // extended operator information. if (UT_InfoTree* child = tree.addChildMap("OpenVDB")) { child->addProperties("OpenVDB Version", openvdb::getLibraryAbiVersionString()); } #ifdef OPENVDB_CUSTOM_MAKO UT_StringArray sparseVolumeTreePath({"SOP Info", "Sparse Volumes"}); if (UT_InfoTree* sparseVolumes = tree.getDescendentPtr(sparseVolumeTreePath)) { if (UT_InfoTree* info = sparseVolumes->addChildBranch("OpenVDB Points")) { OP_Context context(parms.getTime()); GU_DetailHandle gdHandle = getCookedGeoHandle(context); if (gdHandle.isNull()) return; GU_DetailHandleAutoReadLock gdLock(gdHandle); const GU_Detail* tmpGdp = gdLock.getGdp(); if (!tmpGdp) return; info->addColumnHeading("Point Count"); info->addColumnHeading("Point Groups"); info->addColumnHeading("Point Attributes"); for (VdbPrimCIterator it(tmpGdp); it; ++it) { const openvdb::GridBase::ConstPtr grid = it->getConstGridPtr(); if (!grid) continue; if (!grid->isType<openvdb::points::PointDataGrid>()) continue; const openvdb::points::PointDataGrid& points = *openvdb::gridConstPtrCast<openvdb::points::PointDataGrid>(grid); std::string countStr, groupStr, attributeStr; collectPointInfo(points, countStr, groupStr, attributeStr); ut_PropertyRow* row = info->addProperties(); row->append(countStr); row->append(groupStr); row->append(attributeStr); } } } #endif } void SOP_NodeVDB::getNodeSpecificInfoText(OP_Context &context, OP_NodeInfoParms &parms) { SOP_Node::getNodeSpecificInfoText(context, parms); #ifdef SESI_OPENVDB // Nothing needed since we will report it as part of native prim info #else // Get a handle to the geometry. GU_DetailHandle gd_handle = getCookedGeoHandle(context); // Check if we have a valid detail handle. if(gd_handle.isNull()) return; // Lock it for reading. GU_DetailHandleAutoReadLock gd_lock(gd_handle); // Finally, get at the actual GU_Detail. const GU_Detail* tmp_gdp = gd_lock.getGdp(); std::ostringstream infoStr; unsigned gridn = 0; for (VdbPrimCIterator it(tmp_gdp); it; ++it) { const openvdb::GridBase& grid = it->getGrid(); node_info_text::ApplyGridSpecificInfoText callback = node_info_text::getGridSpecificInfoText(grid.type()); if (callback) { // Note, the output string stream for every new grid is initialized with // its index and houdini primitive name prior to executing the callback const UT_String gridName = it.getPrimitiveName(); infoStr << " (" << it.getIndex() << ")"; if(gridName.isstring()) infoStr << " name: '" << gridName << "',"; (*callback)(infoStr, grid); infoStr<<"\n"; ++gridn; } } if (gridn > 0) { std::ostringstream headStr; headStr << gridn << " Custom VDB grid" << (gridn == 1 ? "" : "s") << "\n"; parms.append(headStr.str().c_str()); parms.append(infoStr.str().c_str()); } #endif } //////////////////////////////////////// OP_ERROR SOP_NodeVDB::duplicateSourceStealable(const unsigned index, OP_Context& context, GU_Detail **pgdp, GU_DetailHandle& gdh, bool clean) { OPENVDB_NO_DEPRECATION_WARNING_BEGIN // traverse upstream nodes, if unload is not possible, duplicate the source if (!isSourceStealable(index, context)) { duplicateSource(index, context, *pgdp, clean); unlockInput(index); return error(); } OPENVDB_NO_DEPRECATION_WARNING_END // get the input GU_Detail handle and unlock the inputs GU_DetailHandle inputgdh = inputGeoHandle(index); unlockInput(index); SOP_Node *input = CAST_SOPNODE(getInput(index)); if (!input) { addError(SOP_MESSAGE, "Invalid input SOP Node when attempting to unload."); return error(); } // explicitly unload the data from the input SOP const bool unloadSuccessful = input->unloadData(); // check if we only have one reference const bool soleReference = (inputgdh.getRefCount() == 1); // if the unload was unsuccessful or the reference count is not one, we fall back to // explicitly copying the input onto the gdp if (!(unloadSuccessful && soleReference)) { const GU_Detail *src = inputgdh.readLock(); UT_ASSERT(src); if (src) (*pgdp)->copy(*src); inputgdh.unlock(src); return error(); } // release our old write lock on gdp (setup by cookMe()) gdh.unlock(*pgdp); // point to the input's old gdp and setup a write lock gdh = inputgdh; *pgdp = gdh.writeLock(); return error(); } bool SOP_NodeVDB::isSourceStealable(const unsigned index, OP_Context& context) const { struct Local { static inline OP_Node* nextStealableInput( const unsigned idx, const fpreal now, const OP_Node* node) { OP_Node* input = node->getInput(idx); while (input) { OP_Node* passThrough = input->getPassThroughNode(now); if (!passThrough) break; input = passThrough; } return input; } }; // struct Local const fpreal now = context.getTime(); for (OP_Node* node = Local::nextStealableInput(index, now, this); node != nullptr; node = Local::nextStealableInput(index, now, node)) { // cont'd if it is a SOP_NULL. std::string opname = node->getName().toStdString().substr(0, 4); if (opname == "null") continue; // if the SOP is a cache SOP we don't want to try and alter its data without a deep copy if (dynamic_cast<SOP_Cache*>(node)) return false; if (node->getUnload() != 0) return true; else return false; } return false; } OP_ERROR SOP_NodeVDB::duplicateSourceStealable(const unsigned index, OP_Context& context) { OPENVDB_NO_DEPRECATION_WARNING_BEGIN auto error = this->duplicateSourceStealable(index, context, &gdp, myGdpHandle, true); OPENVDB_NO_DEPRECATION_WARNING_END return error; } //////////////////////////////////////// const SOP_NodeVerb* SOP_NodeVDB::cookVerb() const { if (const auto* verb = SOP_NodeVerb::lookupVerb(getOperator()->getName())) { return verb; ///< @todo consider caching this } return SOP_Node::cookVerb(); } OP_ERROR SOP_NodeVDB::cookMySop(OP_Context& context) { if (cookVerb()) { return cookMyselfAsVerb(context); } return cookVDBSop(context); } //////////////////////////////////////// namespace { void createEmptyGridGlyph(GU_Detail& gdp, GridCRef grid) { openvdb::Vec3R lines[6]; lines[0].init(-0.5, 0.0, 0.0); lines[1].init( 0.5, 0.0, 0.0); lines[2].init( 0.0,-0.5, 0.0); lines[3].init( 0.0, 0.5, 0.0); lines[4].init( 0.0, 0.0,-0.5); lines[5].init( 0.0, 0.0, 0.5); const openvdb::math::Transform &xform = grid.transform(); lines[0] = xform.indexToWorld(lines[0]); lines[1] = xform.indexToWorld(lines[1]); lines[2] = xform.indexToWorld(lines[2]); lines[3] = xform.indexToWorld(lines[3]); lines[4] = xform.indexToWorld(lines[4]); lines[5] = xform.indexToWorld(lines[5]); UT_SharedPtr<GU_Detail> tmpGDP(new GU_Detail); UT_Vector3 color(0.1f, 1.0f, 0.1f); tmpGDP->addFloatTuple(GA_ATTRIB_POINT, "Cd", 3, GA_Defaults(color.data(), 3)); GU_PrimPoly *poly; for (int i = 0; i < 6; i += 2) { poly = GU_PrimPoly::build(&*tmpGDP, 2, GU_POLY_OPEN); tmpGDP->setPos3(poly->getPointOffset(i % 2), UT_Vector3(float(lines[i][0]), float(lines[i][1]), float(lines[i][2]))); tmpGDP->setPos3(poly->getPointOffset(i % 2 + 1), UT_Vector3(float(lines[i + 1][0]), float(lines[i + 1][1]), float(lines[i + 1][2]))); } gdp.merge(*tmpGDP); } } // unnamed namespace OP_ERROR SOP_NodeVDB::cookMyGuide1(OP_Context& context) { #ifndef SESI_OPENVDB myGuide1->clearAndDestroy(); UT_Vector3 color(0.1f, 0.1f, 1.0f); UT_Vector3 corners[8]; // For each VDB primitive (with a non-null grid pointer) in the group... for (VdbPrimIterator it(gdp); it; ++it) { if (evalGridBBox(it->getGrid(), corners, /*expandHalfVoxel=*/true)) { houdini_utils::createBox(*myGuide1, corners, &color); } else { createEmptyGridGlyph(*myGuide1, it->getGrid()); } } #endif return SOP_Node::cookMyGuide1(context); } //////////////////////////////////////// openvdb::Vec3f SOP_NodeVDB::evalVec3f(const char *name, fpreal time) const { return openvdb::Vec3f(float(evalFloat(name, 0, time)), float(evalFloat(name, 1, time)), float(evalFloat(name, 2, time))); } openvdb::Vec3R SOP_NodeVDB::evalVec3R(const char *name, fpreal time) const { return openvdb::Vec3R(evalFloat(name, 0, time), evalFloat(name, 1, time), evalFloat(name, 2, time)); } openvdb::Vec3i SOP_NodeVDB::evalVec3i(const char *name, fpreal time) const { using ValueT = openvdb::Vec3i::value_type; return openvdb::Vec3i(static_cast<ValueT>(evalInt(name, 0, time)), static_cast<ValueT>(evalInt(name, 1, time)), static_cast<ValueT>(evalInt(name, 2, time))); } openvdb::Vec2R SOP_NodeVDB::evalVec2R(const char *name, fpreal time) const { return openvdb::Vec2R(evalFloat(name, 0, time), evalFloat(name, 1, time)); } openvdb::Vec2i SOP_NodeVDB::evalVec2i(const char *name, fpreal time) const { using ValueT = openvdb::Vec2i::value_type; return openvdb::Vec2i(static_cast<ValueT>(evalInt(name, 0, time)), static_cast<ValueT>(evalInt(name, 1, time))); } std::string SOP_NodeVDB::evalStdString(const char* name, fpreal time, int index) const { UT_String str; evalString(str, name, index, time); return str.toStdString(); } //////////////////////////////////////// void SOP_NodeVDB::resolveRenamedParm(PRM_ParmList& obsoleteParms, const char* oldName, const char* newName) { PRM_Parm* parm = obsoleteParms.getParmPtr(oldName); if (parm && !parm->isFactoryDefault()) { if (this->hasParm(newName)) { this->getParm(newName).copyParm(*parm); } } } //////////////////////////////////////// namespace { /// @brief Default OpPolicy for OpenVDB operator types class DefaultOpenVDBOpPolicy: public houdini_utils::OpPolicy { public: std::string getValidName(const std::string& english) { UT_String s(english); // Remove non-alphanumeric characters from the name. s.forceValidVariableName(); std::string name = s.toStdString(); // Remove spaces and underscores. name.erase(std::remove(name.begin(), name.end(), ' '), name.end()); name.erase(std::remove(name.begin(), name.end(), '_'), name.end()); return name; } std::string getLowercaseName(const std::string& english) { UT_String s(english); // Lowercase s.toLower(); return s.toStdString(); } /// @brief OpenVDB operators of each flavor (SOP, POP, etc.) share /// an icon named "SOP_OpenVDB", "POP_OpenVDB", etc. std::string getIconName(const houdini_utils::OpFactory& factory) override { return factory.flavorString() + "_OpenVDB"; } /// @brief Return the name of the equivalent native operator as shipped with Houdini. /// @details An empty string indicates that there is no equivalent native operator. virtual std::string getNativeName(const houdini_utils::OpFactory&) { return ""; } }; /// @brief SideFX OpPolicy for OpenVDB operator types class SESIOpenVDBOpPolicy: public DefaultOpenVDBOpPolicy { public: std::string getName(const houdini_utils::OpFactory&, const std::string& english) override { return this->getLowercaseName(this->getValidName(english)); } std::string getTabSubMenuPath(const houdini_utils::OpFactory&) override { return "VDB"; } }; /// @brief ASWF OpPolicy for OpenVDB operator types class ASWFOpenVDBOpPolicy: public DefaultOpenVDBOpPolicy { public: std::string getName(const houdini_utils::OpFactory&, const std::string& english) override { return "DW_Open" + this->getValidName(english); } std::string getLabelName(const houdini_utils::OpFactory& factory) override { return factory.english(); } std::string getFirstName(const houdini_utils::OpFactory& factory) override { return this->getLowercaseName(this->getValidName(this->getLabelName(factory))); } std::string getNativeName(const houdini_utils::OpFactory& factory) override { return this->getLowercaseName(this->getValidName(factory.english())); } std::string getTabSubMenuPath(const houdini_utils::OpFactory&) override { return "VDB/ASWF"; } }; #ifdef SESI_OPENVDB using OpenVDBOpPolicy = SESIOpenVDBOpPolicy; #else using OpenVDBOpPolicy = ASWFOpenVDBOpPolicy; #endif // SESI_OPENVDB } // unnamed namespace OpenVDBOpFactory::OpenVDBOpFactory( const std::string& english, OP_Constructor ctor, houdini_utils::ParmList& parms, OP_OperatorTable& table, houdini_utils::OpFactory::OpFlavor flavor): houdini_utils::OpFactory(OpenVDBOpPolicy(), english, ctor, parms, table, flavor) { setNativeName(OpenVDBOpPolicy().getNativeName(*this)); std::stringstream ss; ss << "vdb" << OPENVDB_LIBRARY_VERSION_STRING << " "; ss << "houdini" << SYS_Version::full(); // Define an operator version of the format "vdb6.1.0 houdini18.0.222", // which can be returned by OP_OperatorDW::getVersion() and used to // handle compatibility between specific versions of VDB or Houdini addSpareData({{"operatorversion", ss.str()}}); } OpenVDBOpFactory& OpenVDBOpFactory::setNativeName(const std::string& name) { // SideFX nodes have no native equivalent. #ifndef SESI_OPENVDB addSpareData({{"nativename", name}}); // if native name was previously defined and is present // in the hidden table then remove it regardless of policy if (!mNativeName.empty() && this->table().isOpHidden(mNativeName.c_str())) { this->table().delOpHidden(mNativeName.c_str()); } mNativeName = name; if (!name.empty()) { const std::string& opHidePolicy = getOpHidePolicy(); if (opHidePolicy == "aswf") { // set this SOP to be hidden (if a native equivalent exists) this->setInvisible(); } else if (opHidePolicy == "native") { // mark the native equivalent SOP to be hidden this->table().addOpHidden(name.c_str()); } } #endif return *this; } } // namespace openvdb_houdini
22,919
C++
28.727626
96
0.641084
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Prune.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Prune.cc /// /// @author FX R&D OpenVDB team /// /// @brief SOP to prune tree branches from OpenVDB grids #include <houdini_utils/ParmFactory.h> #include <openvdb/tools/Prune.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Prune: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Prune(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Prune() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be pruned.") .setDocumentation( "A subset of the input VDBs to be pruned" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "mode", "Mode") .setDefault("value") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "value", "Value", "inactive", "Inactive", "levelset", "Level Set" }) .setTooltip( "Value:\n" " Collapse regions in which all voxels have the same\n" " value and active state into tiles with those values\n" " and active states.\n" "Inactive:\n" " Collapse regions in which all voxels are inactive\n" " into inactive background tiles.\n" "Level Set:\n" " Collapse regions in which all voxels are inactive\n" " into inactive tiles with either the inside or\n" " the outside background value, depending on\n" " the signs of the voxel values.\n")); parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip( "Voxel values are considered equal if they differ\n" "by less than the specified threshold.")); hvdb::OpenVDBOpFactory("VDB Prune", SOP_OpenVDB_Prune::factory, parms, *table) .setNativeName("") .addInput("Grids to process") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Prune::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Reduce the memory footprint of VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node prunes branches of VDB\n\ [trees|http://www.openvdb.org/documentation/doxygen/overview.html#secTree]\n\ where all voxels have the same or similar values.\n\ This can help to reduce the memory footprint of a VDB, without changing its topology.\n\ With a suitably high tolerance, pruning can function as a simple\n\ form of lossy compression.\n\ \n\ @related\n\ - [OpenVDB Densify|Node:sop/DW_OpenVDBDensify]\n\ - [Node:sop/vdbactivate]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Prune::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Prune(net, name, op); } SOP_OpenVDB_Prune::SOP_OpenVDB_Prune(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// // Enable/disable or show/hide parameters in the UI. bool SOP_OpenVDB_Prune::updateParmsFlags() { bool changed = false; changed |= enableParm("tolerance", evalStdString("mode", 0) == "value"); return changed; } //////////////////////////////////////// namespace { struct PruneOp { PruneOp(const std::string m, fpreal tol = 0.0): mode(m), pruneTolerance(tol) {} template<typename GridT> void operator()(GridT& grid) const { using ValueT = typename GridT::ValueType; if (mode == "value") { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT tolerance(openvdb::zeroVal<ValueT>() + pruneTolerance); OPENVDB_NO_TYPE_CONVERSION_WARNING_END openvdb::tools::prune(grid.tree(), tolerance); } else if (mode == "inactive") { openvdb::tools::pruneInactive(grid.tree()); } else if (mode == "levelset") { openvdb::tools::pruneLevelSet(grid.tree()); } } std::string mode; fpreal pruneTolerance; }; } OP_ERROR SOP_OpenVDB_Prune::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Get the group of grids to process. const GA_PrimitiveGroup* group = this->matchGroup(*gdp, evalStdString("group", time)); // Get other UI parameters. const fpreal tolerance = evalFloat("tolerance", 0, time); // Construct a functor to process grids of arbitrary type. const PruneOp pruneOp(evalStdString("mode", time), tolerance); UT_AutoInterrupt progress("Pruning OpenVDB grids"); // Process each VDB primitive that belongs to the selected group. for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, pruneOp); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
6,046
C++
28.354369
94
0.618095
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GU_VDBPointTools.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file GU_VDBPointTools.h /// @author FX R&D OpenVDB team /// /// @brief Collection of PointIndexGrid helpers for Houdini #ifndef GU_VDBPOINTTOOLS_H_HAS_BEEN_INCLUDED #define GU_VDBPOINTTOOLS_H_HAS_BEEN_INCLUDED #if defined(SESI_OPENVDB) #include "GU_Detail.h" #include "GU_DetailHandle.h" #include "GU_PackedContext.h" #include "GU_PackedFragment.h" #include "GU_PackedGeometry.h" #include "GU_PrimPacked.h" #else #include <GU/GU_Detail.h> #include <GU/GU_DetailHandle.h> #include <GU/GU_PackedContext.h> #include <GU/GU_PackedFragment.h> #include <GU/GU_PackedGeometry.h> #include <GU/GU_PrimPacked.h> #endif #include <GA/GA_ElementGroup.h> #include <UT/UT_SharedPtr.h> #include <UT/UT_VectorTypes.h> #include <openvdb/Platform.h> #include <openvdb/tools/PointIndexGrid.h> #include <openvdb/tools/ParticleAtlas.h> #include <openvdb/tools/PointsToMask.h> #include <vector> /// @brief Houdini point attribute wrapper template<typename VectorType> struct GU_VDBPointList { using Ptr = UT_SharedPtr<GU_VDBPointList>; using ConstPtr = UT_SharedPtr<const GU_VDBPointList>; using PosType = VectorType; using ScalarType = typename PosType::value_type; GU_VDBPointList(const GU_Detail& detail, const GA_PointGroup* group = nullptr) : mPositionHandle(detail.getP()) , mVelocityHandle() , mRadiusHandle() , mIndexMap(&detail.getP()->getIndexMap()) , mOffsets() , mSize(mIndexMap->indexSize()) { if (group) { mSize = group->entries(); mOffsets.reserve(mSize); GA_Offset start, end; GA_Range range(*group); for (GA_Iterator it = range.begin(); it.blockAdvance(start, end); ) { for (GA_Offset off = start; off < end; ++off) { mOffsets.push_back(off); } } getOffset = &GU_VDBPointList::offsetFromGroupMap; } else if (mIndexMap->isTrivialMap()) { getOffset = &GU_VDBPointList::offsetFromIndexCast; } else { getOffset = &GU_VDBPointList::offsetFromGeoMap; } // Bind optional attributes GA_ROAttributeRef velRef = detail.findFloatTuple(GA_ATTRIB_POINT, GEO_STD_ATTRIB_VELOCITY, 3); if (velRef.isValid()) { mVelocityHandle.bind(velRef.getAttribute()); } GA_ROAttributeRef radRef = detail.findFloatTuple(GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE); if (radRef.isValid()) { mRadiusHandle.bind(radRef.getAttribute()); } } static Ptr create(const GU_Detail& detail, const GA_PointGroup* group = nullptr) { return Ptr(new GU_VDBPointList(detail, group)); } size_t size() const { return mSize; } bool hasVelocity() const { return mVelocityHandle.isValid(); } bool hasRadius() const { return mRadiusHandle.isValid(); } // Index access methods void getPos(size_t n, PosType& xyz) const { getPosFromOffset((this->*getOffset)(n), xyz); } void getVelocity(size_t n, PosType& v) const { getVelocityFromOffset((this->*getOffset)(n), v); } void getRadius(size_t n, ScalarType& r) const { getRadiusFromOffset((this->*getOffset)(n), r); } // Offset access methods GA_Offset offsetFromIndex(size_t n) const { return (this->*getOffset)(n); } void getPosFromOffset(const GA_Offset offset, PosType& xyz) const { const UT_Vector3 data = mPositionHandle.get(offset); xyz[0] = ScalarType(data[0]); xyz[1] = ScalarType(data[1]); xyz[2] = ScalarType(data[2]); } void getVelocityFromOffset(const GA_Offset offset, PosType& v) const { const UT_Vector3 data = mVelocityHandle.get(offset); v[0] = ScalarType(data[0]); v[1] = ScalarType(data[1]); v[2] = ScalarType(data[2]); } void getRadiusFromOffset(const GA_Offset offset, ScalarType& r) const { r = ScalarType(mRadiusHandle.get(offset)); } private: // Disallow copying GU_VDBPointList(const GU_VDBPointList&); GU_VDBPointList& operator=(const GU_VDBPointList&); GA_Offset (GU_VDBPointList::* getOffset)(const size_t) const; GA_Offset offsetFromGeoMap(const size_t n) const { return mIndexMap->offsetFromIndex(GA_Index(n)); } GA_Offset offsetFromGroupMap(const size_t n) const { return mOffsets[n]; } GA_Offset offsetFromIndexCast(const size_t n) const { return GA_Offset(n); } GA_ROHandleV3 mPositionHandle, mVelocityHandle; GA_ROHandleF mRadiusHandle; GA_IndexMap const * const mIndexMap; std::vector<GA_Offset> mOffsets; size_t mSize; }; // GU_VDBPointList //////////////////////////////////////// // PointIndexGrid utility methods namespace GU_VDBPointToolsInternal { template<typename PointArrayType> struct IndexToOffsetOp { IndexToOffsetOp(const PointArrayType& points): mPointList(&points) {} template <typename LeafT> void operator()(LeafT &leaf, size_t /*leafIndex*/) const { typename LeafT::IndexArray& indices = leaf.indices(); for (size_t n = 0, N = indices.size(); n < N; ++n) { indices[n] = static_cast<typename LeafT::ValueType::IntType>( mPointList->offsetFromIndex(GA_Index{indices[n]})); } } PointArrayType const * const mPointList; }; struct PackedMaskConstructor { PackedMaskConstructor(const std::vector<const GA_Primitive*>& prims, const openvdb::math::Transform& xform) : mPrims(prims.empty() ? nullptr : &prims.front()) , mXForm(xform) , mMaskGrid(new openvdb::MaskGrid(false)) { mMaskGrid->setTransform(mXForm.copy()); } PackedMaskConstructor(PackedMaskConstructor& rhs, tbb::split) : mPrims(rhs.mPrims) , mXForm(rhs.mXForm) , mMaskGrid(new openvdb::MaskGrid(false)) { mMaskGrid->setTransform(mXForm.copy()); } openvdb::MaskGrid::Ptr getMaskGrid() { return mMaskGrid; } void join(PackedMaskConstructor& rhs) { mMaskGrid->tree().topologyUnion(rhs.mMaskGrid->tree()); } void operator()(const tbb::blocked_range<size_t>& range) { GU_PackedContext packedcontext; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const GA_Primitive *prim = mPrims[n]; if (!prim || !GU_PrimPacked::isPackedPrimitive(*prim)) continue; const GU_PrimPacked * pprim = static_cast<const GU_PrimPacked*>(prim); GU_Detail tmpdetail; const GU_Detail *detailtouse; GU_DetailHandleAutoReadLock readlock(pprim->getPackedDetail(packedcontext)); UT_Matrix4D mat; pprim->getFullTransform4(mat); if (mat.isIdentity() && readlock.isValid() && readlock.getGdp()) { detailtouse = readlock.getGdp(); } else { pprim->unpackWithContext(tmpdetail, packedcontext); detailtouse = &tmpdetail; } GU_VDBPointList<openvdb::Vec3R> points(*detailtouse); openvdb::MaskGrid::Ptr grid = openvdb::tools::createPointMask(points, mXForm); mMaskGrid->tree().topologyUnion(grid->tree()); } } private: GA_Primitive const * const * const mPrims; openvdb::math::Transform mXForm; openvdb::MaskGrid::Ptr mMaskGrid; }; // struct PackedMaskConstructor inline void getPackedPrimitiveOffsets(const GU_Detail& detail, std::vector<const GA_Primitive*>& primitives) { const GA_Size numPacked = GU_PrimPacked::countPackedPrimitives(detail); primitives.clear(); primitives.reserve(size_t(numPacked)); if (numPacked != GA_Size(0)) { GA_Offset start, end; GA_Range range = detail.getPrimitiveRange(); const GA_PrimitiveList& primList = detail.getPrimitiveList(); for (GA_Iterator it = range.begin(); it.blockAdvance(start, end); ) { for (GA_Offset off = start; off < end; ++off) { const GA_Primitive *prim = primList.get(off); if (prim && GU_PrimPacked::isPackedPrimitive(*prim)) { primitives.push_back(prim); } } } } } } // namespace GU_VDBPointToolsInternal //////////////////////////////////////// /// @brief Utility method to construct a GU_VDBPointList. /// @details The GU_VDBPointList is compatible with the PointIndexGrid and ParticleAtals structures. inline GU_VDBPointList<openvdb::Vec3s>::Ptr GUvdbCreatePointList(const GU_Detail& detail, const GA_PointGroup* pointGroup = nullptr) { return GU_VDBPointList<openvdb::Vec3s>::create(detail, pointGroup); } /// @brief Utility method to change point indices into Houdini geometry offsets. /// @note PointIndexGrid's that store Houdini geometry offsets are not /// safe to write to disk, offsets are not guaranteed to be immutable /// under defragmentation operations or I/O. template<typename PointIndexTreeType, typename PointArrayType> inline void GUvdbConvertIndexToOffset(PointIndexTreeType& tree, const PointArrayType& points) { openvdb::tree::LeafManager<PointIndexTreeType> leafnodes(tree); leafnodes.foreach(GU_VDBPointToolsInternal::IndexToOffsetOp<PointArrayType>(points)); } /// @brief Utility method to construct a PointIndexGrid. /// @details The PointIndexGrid supports fast spatial queries for points. inline openvdb::tools::PointIndexGrid::Ptr GUvdbCreatePointIndexGrid( const openvdb::math::Transform& xform, const GU_Detail& detail, const GA_PointGroup* pointGroup = nullptr) { GU_VDBPointList<openvdb::Vec3s> points(detail, pointGroup); return openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(points, xform); } /// @brief Utility method to construct a PointIndexGrid. /// @details The PointIndexGrid supports fast spatial queries for points. template<typename PointArrayType> inline openvdb::tools::PointIndexGrid::Ptr GUvdbCreatePointIndexGrid(const openvdb::math::Transform& xform, const PointArrayType& points) { return openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(points, xform); } /// @brief Utility method to construct a ParticleAtals. /// @details The ParticleAtals supports fast spatial queries for particles. template<typename ParticleArrayType> inline openvdb::tools::ParticleIndexAtlas::Ptr GUvdbCreateParticleAtlas(const double minVoxelSize, const ParticleArrayType& particles) { using ParticleIndexAtlas = openvdb::tools::ParticleIndexAtlas; ParticleIndexAtlas::Ptr atlas(new ParticleIndexAtlas()); if (particles.hasRadius()) { atlas->construct(particles, minVoxelSize); } return atlas; } /// @brief Utility method to construct a boolean PointMaskGrid /// @details This method supports packed points. inline openvdb::MaskGrid::Ptr GUvdbCreatePointMaskGrid( const openvdb::math::Transform& xform, const GU_Detail& detail, const GA_PointGroup* pointGroup = nullptr) { std::vector<const GA_Primitive*> packed; GU_VDBPointToolsInternal::getPackedPrimitiveOffsets(detail, packed); if (!packed.empty()) { GU_VDBPointToolsInternal::PackedMaskConstructor op(packed, xform); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, packed.size()), op); return op.getMaskGrid(); } GU_VDBPointList<openvdb::Vec3R> points(detail, pointGroup); return openvdb::tools::createPointMask(points, xform); } /// @brief Utility method to construct a PointIndexGrid that stores /// Houdini geometry offsets. /// /// @note PointIndexGrid's that store Houdini geometry offsets are not /// safe to write to disk, offsets are not guaranteed to be immutable /// under defragmentation operations or I/O. inline openvdb::tools::PointIndexGrid::Ptr GUvdbCreatePointOffsetGrid( const openvdb::math::Transform& xform, const GU_Detail& detail, const GA_PointGroup* pointGroup = nullptr) { GU_VDBPointList<openvdb::Vec3s> points(detail, pointGroup); openvdb::tools::PointIndexGrid::Ptr grid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(points, xform); GUvdbConvertIndexToOffset(grid->tree(), points); return grid; } #endif // GU_VDBPOINTTOOLS_H_HAS_BEEN_INCLUDED
12,546
C
31.337629
102
0.661326
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Scatter.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Scatter.cc /// /// @author FX R&D OpenVDB team /// /// @brief Scatter points on a VDB grid, either by fixed count or by /// global or local point density. #include <UT/UT_Assert.h> #include <UT/UT_ParallelUtil.h> // for UTparallelForLightItems() #include <GA/GA_SplittableRange.h> #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/math/Math.h> #include <openvdb/math/Stencils.h> #include <openvdb/points/PointDelete.h> #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointScatter.h> #include <openvdb/tools/GridOperators.h> // for tools::cpt() #include <openvdb/tools/Interpolation.h> // for tools::BoxSampler #include <openvdb/tools/LevelSetRebuild.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/Morphology.h> // for tools::dilateVoxels() #include <openvdb/tools/PointScatter.h> #include <openvdb/tree/LeafManager.h> #include <hboost/algorithm/string/join.hpp> #include <iostream> #include <random> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Scatter: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Scatter(OP_Network* net, const char* name, OP_Operator* op); ~SOP_OpenVDB_Scatter() override {} static OP_Node* factory(OP_Network*, const char*, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; void syncNodeVersion(const char* oldVersion, const char*, bool*) override; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs over which to scatter points.") .setDocumentation( "A subset of the input VDBs over which to scatter points" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "keep", "Keep Original Geometry") .setDefault(PRMzeroDefaults) .setTooltip("If enabled, the incoming geometry will not be deleted.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "vdbpoints", "Scatter VDB Points") .setDefault(PRMzeroDefaults) .setTooltip("Generate VDB Points instead of Houdini Points.")); parms.add(hutil::ParmFactory(PRM_ORD, "outputname", "Output Name") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Original Name", "append", "Add Suffix", "replace", "Custom Name", }) .setTooltip( "Give the output VDB Points volumes the same names as the input VDBs,\n" "or add a suffix to the input name, or use a custom name.")); parms.add(hutil::ParmFactory(PRM_STRING, "customname", "Custom Name") .setDefault("points") .setTooltip("The suffix or custom name to be used")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "dogroup", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDefault(PRMzeroDefaults)); parms.add(hutil::ParmFactory(PRM_STRING, "sgroup", "Scatter Group") .setDefault(0, "scatter") .setTooltip("If enabled, add scattered points to the group with this name.")); parms.add(hutil::ParmFactory(PRM_INT_J, "seed", "Random Seed") .setDefault(PRMzeroDefaults) .setTooltip("The random number seed")); parms.add(hutil::ParmFactory(PRM_FLT_J, "spread", "Spread") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip( "How far each point may be displaced from the center of its voxel or tile,\n" "as a fraction of the voxel or tile size\n\n" "A value of zero means that the point is placed exactly at the center." " A value of one means that the point can be placed randomly anywhere" " inside the voxel or tile.\n\n" "When the __SDF Domain__ is an __Isosurface__, a value of zero means that the point" " is placed exactly on the isosurface, and a value of one means that the point" " can be placed randomly anywhere within one voxel of the isosurface.\n" "Frustum grids are currently not properly supported, however.")); parms.add(hutil::ParmFactory(PRM_ORD, "pointmode", "Mode") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "count", "Point Total", "density", "Point Density", "pointspervoxel", "Points Per Voxel", }) .setTooltip( "How to determine the number of points to scatter\n\n" "Point Total:\n" " Specify a fixed, total point count.\n" "Point Density:\n" " Specify the number of points per unit volume.\n" "Points Per Voxel:\n" " Specify the number of points per voxel.")); parms.add(hutil::ParmFactory(PRM_INT_J, "count", "Point Total") .setDefault(5000) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10000) .setTooltip("The total number of points to scatter")); parms.add(hutil::ParmFactory(PRM_FLT_J , "ppv", "Points Per Voxel") .setDefault(8) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("The number of points per voxel")); parms.add(hutil::ParmFactory(PRM_FLT_LOG, "density", "Point Density") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1000000) .setTooltip("The number of points per unit volume")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "multiply", "Scale Density by Voxel Values") .setDefault(PRMzeroDefaults) .setTooltip( "For scalar-valued VDBs other than signed distance fields," " use voxel values as local multipliers for point density.")); parms.add(hutil::ParmFactory(PRM_ORD, "poscompression", "Position Compression") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "none", "None", "int16", "16-bit Fixed Point", "int8", "8-bit Fixed Point" }) .setTooltip("The position attribute compression setting.") .setDocumentation( "The position can be stored relative to the center of the voxel.\n" "This means it does not require the full 32-bit float representation,\n" "but can be quantized to a smaller fixed-point value.")); parms.add(hutil::ParmFactory(PRM_STRING, "sdfdomain", "SDF Domain") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "interior", "Interior", "surface", "Isosurface", "band", "Narrow Band", }) .setDefault("band") .setTooltip( "For signed distance field VDBs, the region over which to scatter points\n\n" "Interior:\n" " Scatter points inside the specified isosurface.\n" "Isosurface:\n" " Scatter points only on the specified isosurface.\n" "Narrow Band:\n" " Scatter points only in the narrow band surrounding the zero crossing.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip("The voxel value that determines the isosurface") .setDocumentation( "The voxel value that determines the isosurface\n\n" "For fog volumes, use a value larger than zero.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "cliptoisosurface", "Clip to Isosurface") .setDefault(PRMzeroDefaults) .setTooltip("When scattering VDB Points, remove points outside the isosurface.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setDefault(PRMzeroDefaults) .setTooltip("Print the sequence of operations to the terminal.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_ORD| PRM_TYPE_JOIN_NEXT, "pointMode", "Point")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "interior", "Scatter Points Inside Level Sets") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "")); // Register the SOP. hvdb::OpenVDBOpFactory("VDB Scatter", SOP_OpenVDB_Scatter::factory, parms, *table) .setNativeName("") .setObsoleteParms(obsoleteParms) .addInput("VDBs on which points will be scattered") .setVerb(SOP_NodeVerb::COOK_GENERIC, []() { return new SOP_OpenVDB_Scatter::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Scatter Houdini or VDB points on a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node scatters points randomly on or inside VDB volumes.\n\ The number of points generated can be specified either by fixed count\n\ or by global or local point density.\n\ \n\ Output can be in the form of either Houdini points or VDB Points volumes.\n\ In the latter case, a VDB Points volume is created for each source VDB,\n\ with the same transform and topology as the source.\n\ \n\ For signed distance field or fog volume VDBs, points can be scattered\n\ either throughout the interior of the volume or only on an isosurface.\n\ For level sets, an additional option is to scatter points only in the\n\ [narrow band|http://www.openvdb.org/documentation/doxygen/overview.html#secGrid]\n\ surrounding the zero crossing.\n\ For all other volumes, points are scattered in active voxels.\n\ \n\ @related\n\ - [Node:sop/scatter]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Scatter::syncNodeVersion(const char* oldVersion, const char*, bool*) { // Since VDB 7.0.0, position compression is now set to 16-bit fixed point // by default. Detect if the VDB version that this node was created with // was earlier than 7.0.0 and revert back to null compression if so to // prevent potentially breaking older scenes. // VDB version string prior to 6.2.0 - "17.5.204" // VDB version string since 6.2.0 - "vdb6.2.0 houdini17.5.204" openvdb::Name oldVersionStr(oldVersion); bool disableCompression = false; size_t spacePos = oldVersionStr.find_first_of(' '); if (spacePos == std::string::npos) { // no space in VDB versions prior to 6.2.0 disableCompression = true; } else if (oldVersionStr.size() > 3 && oldVersionStr.substr(0,3) == "vdb") { std::string vdbVersion = oldVersionStr.substr(3,spacePos-3); // disable compression in VDB version 6.2.1 or earlier if (UT_String::compareVersionString(vdbVersion.c_str(), "6.2.1") <= 0) { disableCompression = true; } } if (disableCompression) { setInt("poscompression", 0, 0, 0); } } void SOP_OpenVDB_Scatter::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; PRM_Parm* parm = obsoleteParms->getParmPtr("interior"); if (parm && !parm->isFactoryDefault()) { // default was to scatter in the narrow band setString(UT_String("interior"), CH_STRING_LITERAL, "sdfdomain", 0, 0.0); } hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_Scatter::updateParmsFlags() { bool changed = false; const fpreal time = 0; const auto vdbpoints = evalInt("vdbpoints", /*idx=*/0, time); const auto pmode = evalInt("pointmode", /*idx=*/0, time); const auto sdfdomain = evalStdString("sdfdomain", time); changed |= setVisibleState("count", (0 == pmode)); changed |= setVisibleState("density", (1 == pmode)); changed |= setVisibleState("multiply", (1 == pmode)); changed |= setVisibleState("ppv", (2 == pmode)); changed |= setVisibleState("name", (1 == vdbpoints)); changed |= setVisibleState("outputname", (1 == vdbpoints)); changed |= setVisibleState("customname", (1 == vdbpoints)); changed |= setVisibleState("cliptoisosurface", (1 == vdbpoints)); changed |= setVisibleState("poscompression", (1 == vdbpoints)); changed |= enableParm("customname", (0 != evalInt("outputname", 0, time))); changed |= enableParm("sgroup", (1 == evalInt("dogroup", 0, time))); changed |= enableParm("isovalue", (sdfdomain != "band")); changed |= enableParm("cliptoisosurface", (sdfdomain != "band")); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Scatter::factory(OP_Network* net, const char* name, OP_Operator *op) { return new SOP_OpenVDB_Scatter(net, name, op); } SOP_OpenVDB_Scatter::SOP_OpenVDB_Scatter(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// // Simple wrapper class required by openvdb::tools::UniformPointScatter and // NonUniformPointScatter class PointAccessor { public: PointAccessor(GEO_Detail* gdp): mGdp(gdp) {} void add(const openvdb::Vec3R& pos) { const GA_Offset ptoff = mGdp->appendPointOffset(); mGdp->setPos3(ptoff, pos.x(), pos.y(), pos.z()); } protected: GEO_Detail* mGdp; }; //////////////////////////////////////// /// @brief Functor to translate points toward an isosurface class SnapPointsOp { public: using Sampler = openvdb::tools::BoxSampler; enum class PointType { kInvalid, kHoudini, kVDB }; // Constructor for Houdini points SnapPointsOp(GEO_Detail& detail, const GA_Range& range, float spread, float isovalue, bool rebuild, bool dilate, openvdb::BoolGrid::Ptr mask, hvdb::Interrupter* interrupter) : mPointType(range.isValid() && !range.empty() ? PointType::kHoudini : PointType::kInvalid) , mDetail(&detail) , mRange(range) , mSpread(spread) , mIsovalue(isovalue) , mRebuild(rebuild) , mDilate(dilate) , mMask(mask) , mBoss(interrupter) { } // Constructor for VDB points SnapPointsOp(openvdb::points::PointDataGrid& vdbpts, float spread, float isovalue, bool rebuild, bool dilate, openvdb::BoolGrid::Ptr mask, hvdb::Interrupter* interrupter) : mVdbPoints(&vdbpts) , mSpread(spread) , mIsovalue(isovalue) , mRebuild(rebuild) , mDilate(dilate) , mMask(mask) , mBoss(interrupter) { const auto leafIter = vdbpts.tree().cbeginLeaf(); const auto descriptor = leafIter->attributeSet().descriptor(); mAttrIdx = descriptor.find("P"); mPointType = (mAttrIdx != openvdb::points::AttributeSet::INVALID_POS) ? PointType::kVDB : PointType::kInvalid; } template<typename GridT> void operator()(const GridT& aGrid) { if (mPointType == PointType::kInvalid) return; const GridT* grid = &aGrid; // Replace the input grid with a rebuilt narrow-band level set, if requested // (typically because the isovalue is nonzero). typename GridT::Ptr sdf; if (mRebuild) { const float width = openvdb::LEVEL_SET_HALF_WIDTH; sdf = openvdb::tools::levelSetRebuild(*grid, mIsovalue, /*exterior=*/width, /*interior=*/width, /*xform=*/nullptr, mBoss); if (sdf) { grid = sdf.get(); mMask.reset(); // no need for a mask now that the input is a narrow-band level set } } // Compute the closest point transform of the SDF. const auto cpt = [&]() { if (!mMask) { return openvdb::tools::cpt(*grid, /*threaded=*/true, mBoss); } else { if (mDilate) { // Dilate the isosurface mask to produce a suitably large CPT mask, // to avoid unnecessary work in case the input is a dense SDF. const int iterations = static_cast<int>(openvdb::LEVEL_SET_HALF_WIDTH); openvdb::tools::dilateVoxels( mMask->tree(), iterations, openvdb::tools::NN_FACE_EDGE); } return openvdb::tools::cpt(*grid, *mMask, /*threaded=*/true, mBoss); } }(); const auto& xform = aGrid.transform(); if (mPointType == PointType::kHoudini) { // Translate Houdini points toward the isosurface. UTparallelForLightItems(GA_SplittableRange(mRange), [&](const GA_SplittableRange& r) { const auto cptAcc = cpt->getConstAccessor(); auto start = GA_Offset(GA_INVALID_OFFSET), end = GA_Offset(GA_INVALID_OFFSET); for (GA_Iterator it(r); it.blockAdvance(start, end); ) { if (mBoss && mBoss->wasInterrupted()) break; for (auto offset = start; offset < end; ++offset) { openvdb::Vec3d p{UTvdbConvert(mDetail->getPos3(offset))}; // Compute the closest surface point by linear interpolation. const auto surfaceP = Sampler::sample(cptAcc, xform.worldToIndex(p)); // Translate the input point toward the surface. p = surfaceP + mSpread * (p - surfaceP); // (1-spread)*surfaceP + spread*p mDetail->setPos3(offset, p.x(), p.y(), p.z()); } } }); } else /*if (mPointType == PointType::kVDB)*/ { // Translate VDB points toward the isosurface. using LeafMgr = openvdb::tree::LeafManager<openvdb::points::PointDataTree>; LeafMgr leafMgr(mVdbPoints->tree()); UTparallelForLightItems(leafMgr.leafRange(), [&](const LeafMgr::LeafRange& range) { const auto cptAcc = cpt->getConstAccessor(); for (auto leafIter = range.begin(); leafIter; ++leafIter) { if (mBoss && mBoss->wasInterrupted()) break; // Get a handle to this leaf node's point position array. auto& posArray = leafIter->attributeArray(mAttrIdx); openvdb::points::AttributeWriteHandle<openvdb::Vec3f> posHandle(posArray); // For each point in this leaf node... for (auto idxIter = leafIter->beginIndexOn(); idxIter; ++idxIter) { // The point position is in index space and is relative to // the center of the voxel. const auto idxCenter = idxIter.getCoord().asVec3d(); const auto idxP = posHandle.get(*idxIter) + idxCenter; // Compute the closest surface point by linear interpolation. const openvdb::Vec3f surfaceP(Sampler::sample(cptAcc, idxP)); // Translate the input point toward the surface. auto p = xform.indexToWorld(idxP); p = surfaceP + mSpread * (p - surfaceP); // (1-spread)*surfaceP + spread*p // Transform back to index space relative to the voxel center. posHandle.set(*idxIter, xform.worldToIndex(p) - idxCenter); } } }); } } private: PointType mPointType = PointType::kInvalid; openvdb::points::PointDataGrid* mVdbPoints = nullptr; // VDB points to be processed openvdb::Index64 mAttrIdx = openvdb::points::AttributeSet::INVALID_POS; GEO_Detail* mDetail = nullptr; // the detail containing Houdini points to be processed GA_Range mRange; // the range of points to be processed float mSpread = 1; // if 0, place points on the isosurface; if 1, don't move them float mIsovalue = 0; bool mRebuild = false; // if true, generate a new SDF from the input grid bool mDilate = false; // if true, dilate the isosurface mask openvdb::BoolGrid::Ptr mMask; // an optional isosurface mask hvdb::Interrupter* mBoss = nullptr; }; // class SnapPointsOp //////////////////////////////////////// struct BaseScatter { using NullCodec = openvdb::points::NullCodec; using FixedCodec16 = openvdb::points::FixedPointCodec<false>; using FixedCodec8 = openvdb::points::FixedPointCodec<true>; using PositionArray = openvdb::points::TypedAttributeArray<openvdb::Vec3f, NullCodec>; using PositionArray16 = openvdb::points::TypedAttributeArray<openvdb::Vec3f, FixedCodec16>; using PositionArray8 = openvdb::points::TypedAttributeArray<openvdb::Vec3f, FixedCodec8>; BaseScatter(const unsigned int seed, const float spread, hvdb::Interrupter* interrupter) : mPoints() , mSeed(seed) , mSpread(spread) , mInterrupter(interrupter) {} virtual ~BaseScatter() {} /// @brief Print information about the scattered points /// @parm name A name to insert into the printed info /// @parm os The output stream virtual void print(const std::string &name, std::ostream& os = std::cout) const { if (!mPoints) return; const openvdb::Index64 points = openvdb::points::pointCount(mPoints->tree()); const openvdb::Index64 voxels = mPoints->activeVoxelCount(); os << points << " points into " << voxels << " active voxels in \"" << name << "\" corresponding to " << (double(points) / double(voxels)) << " points per voxel." << std::endl; } inline openvdb::points::PointDataGrid::Ptr points() { UT_ASSERT(mPoints); return mPoints; } protected: openvdb::points::PointDataGrid::Ptr mPoints; const unsigned int mSeed; const float mSpread; hvdb::Interrupter* mInterrupter; }; // BaseScatter struct VDBUniformScatter : public BaseScatter { VDBUniformScatter(const openvdb::Index64 count, const unsigned int seed, const float spread, const int compression, hvdb::Interrupter* interrupter) : BaseScatter(seed, spread, interrupter) , mCount(count) , mCompression(compression) {} template <typename PositionT, typename GridT> inline void resolveCompression(const GridT& grid) { using namespace openvdb::points; using PointDataGridT = openvdb::Grid<typename TreeConverter<typename GridT::TreeType>::Type>; mPoints = openvdb::points::uniformPointScatter< GridT, std::mt19937, PositionT, PointDataGridT, hvdb::Interrupter>( grid, mCount, mSeed, mSpread, mInterrupter); } template <typename GridT> inline void operator()(const GridT& grid) { if (mCompression == 1) { this->resolveCompression<PositionArray16>(grid); } else if (mCompression == 2) { this->resolveCompression<PositionArray8>(grid); } else { this->resolveCompression<PositionArray>(grid); } } void print(const std::string &name, std::ostream& os = std::cout) const override { os << "Uniformly scattered "; BaseScatter::print(name, os); } const openvdb::Index64 mCount; const int mCompression; }; // VDBUniformScatter struct VDBDenseUniformScatter : public BaseScatter { VDBDenseUniformScatter(const float pointsPerVoxel, const unsigned int seed, const float spread, const int compression, hvdb::Interrupter* interrupter) : BaseScatter(seed, spread, interrupter) , mPointsPerVoxel(pointsPerVoxel) , mCompression(compression) {} template <typename PositionT, typename GridT> inline void resolveCompression(const GridT& grid) { using namespace openvdb::points; using PointDataGridT = openvdb::Grid<typename TreeConverter<typename GridT::TreeType>::Type>; mPoints = denseUniformPointScatter<GridT, std::mt19937, PositionT, PointDataGridT, hvdb::Interrupter>(grid, mPointsPerVoxel, mSeed, mSpread, mInterrupter); } template <typename GridT> inline void operator()(const GridT& grid) { if (mCompression == 1) { this->resolveCompression<PositionArray16>(grid); } else if (mCompression == 2) { this->resolveCompression<PositionArray8>(grid); } else { this->resolveCompression<PositionArray>(grid); } } void print(const std::string &name, std::ostream& os = std::cout) const override { os << "Dense uniformly scattered "; BaseScatter::print(name, os); } const float mPointsPerVoxel; const int mCompression; }; // VDBDenseUniformScatter struct VDBNonUniformScatter : public BaseScatter { VDBNonUniformScatter(const float pointsPerVoxel, const unsigned int seed, const float spread, const int compression, hvdb::Interrupter* interrupter) : BaseScatter(seed, spread, interrupter) , mPointsPerVoxel(pointsPerVoxel) , mCompression(compression) {} template <typename PositionT, typename GridT> inline void resolveCompression(const GridT& grid) { using namespace openvdb::points; using PointDataGridT = openvdb::Grid<typename TreeConverter<typename GridT::TreeType>::Type>; mPoints = nonUniformPointScatter<GridT, std::mt19937, PositionT, PointDataGridT, hvdb::Interrupter>(grid, mPointsPerVoxel, mSeed, mSpread, mInterrupter); } template <typename GridT> inline void operator()(const GridT& grid) { if (mCompression == 1) { this->resolveCompression<PositionArray16>(grid); } else if (mCompression == 2) { this->resolveCompression<PositionArray8>(grid); } else { this->resolveCompression<PositionArray>(grid); } } void print(const std::string &name, std::ostream& os = std::cout) const override { os << "Non-uniformly scattered "; BaseScatter::print(name, os); } const float mPointsPerVoxel; const int mCompression; }; // VDBNonUniformScatter template <typename SurfaceGridT> struct MarkPointsOutsideIso { using GroupIndex = openvdb::points::AttributeSet::Descriptor::GroupIndex; using LeafManagerT = openvdb::tree::LeafManager<openvdb::points::PointDataTree>; using PositionHandleT = openvdb::points::AttributeHandle<openvdb::Vec3f, openvdb::points::NullCodec>; using SurfaceValueT = typename SurfaceGridT::ValueType; MarkPointsOutsideIso(const SurfaceGridT& grid, const GroupIndex& deadIndex) : mGrid(grid) , mDeadIndex(deadIndex) {} void operator()(const LeafManagerT::LeafRange& range) const { openvdb::math::BoxStencil<const SurfaceGridT> stencil(mGrid); for (auto leaf = range.begin(); leaf; ++leaf) { PositionHandleT::Ptr positionHandle = PositionHandleT::create(leaf->constAttributeArray(0)); openvdb::points::GroupWriteHandle deadHandle = leaf->groupWriteHandle(mDeadIndex); for (auto voxel = leaf->cbeginValueOn(); voxel; ++voxel) { const openvdb::Coord& ijk = voxel.getCoord(); const openvdb::Vec3d vec = ijk.asVec3d(); for (auto iter = leaf->beginIndexVoxel(ijk); iter; ++iter) { const openvdb::Index index = *iter; const openvdb::Vec3d pos = openvdb::Vec3d(positionHandle->get(index)) + vec; stencil.moveTo(pos); if (stencil.interpolation(pos) > openvdb::zeroVal<SurfaceValueT>()) { deadHandle.set(index, true); } } } } } private: const SurfaceGridT& mGrid; const GroupIndex& mDeadIndex; }; // MarkPointsOutsideIso template<typename OpType> inline bool process(const UT_VDBType type, const openvdb::GridBase& grid, OpType& op, const std::string* name) { bool success = grid.apply<hvdb::AllGridTypes>(op); if (name) op.print(*name); return success; } // Extract an SDF interior mask in which to scatter points. inline openvdb::BoolGrid::Ptr extractInteriorMask(const openvdb::GridBase::ConstPtr grid, const float isovalue) { if (grid->isType<openvdb::FloatGrid>()) { return openvdb::tools::sdfInteriorMask( static_cast<const openvdb::FloatGrid&>(*grid), isovalue); } else if (grid->isType<openvdb::DoubleGrid>()) { return openvdb::tools::sdfInteriorMask( static_cast<const openvdb::DoubleGrid&>(*grid), isovalue); } return nullptr; } // Extract an SDF isosurface mask in which to scatter points. inline openvdb::BoolGrid::Ptr extractIsosurfaceMask(const openvdb::GridBase::ConstPtr grid, const float isovalue) { if (grid->isType<openvdb::FloatGrid>()) { return openvdb::tools::extractIsosurfaceMask( static_cast<const openvdb::FloatGrid&>(*grid), isovalue); } else if (grid->isType<openvdb::DoubleGrid>()) { return openvdb::tools::extractIsosurfaceMask( static_cast<const openvdb::DoubleGrid&>(*grid), double(isovalue)); } return nullptr; } // Remove VDB Points scattered outside of a level set inline void cullVDBPoints(openvdb::points::PointDataTree& tree, const openvdb::GridBase::ConstPtr grid) { const auto leaf = tree.cbeginLeaf(); if (leaf) { using GroupIndex = openvdb::points::AttributeSet::Descriptor::GroupIndex; openvdb::points::appendGroup(tree, "dead"); const GroupIndex idx = leaf->attributeSet().groupIndex("dead"); openvdb::tree::LeafManager<openvdb::points::PointDataTree> leafManager(tree); if (grid->isType<openvdb::FloatGrid>()) { const openvdb::FloatGrid& typedGrid = static_cast<const openvdb::FloatGrid&>(*grid); MarkPointsOutsideIso<openvdb::FloatGrid> mark(typedGrid, idx); tbb::parallel_for(leafManager.leafRange(), mark); } else if (grid->isType<openvdb::DoubleGrid>()) { const openvdb::DoubleGrid& typedGrid = static_cast<const openvdb::DoubleGrid&>(*grid); MarkPointsOutsideIso<openvdb::DoubleGrid> mark(typedGrid, idx); tbb::parallel_for(leafManager.leafRange(), mark); } openvdb::points::deleteFromGroup(tree, "dead"); } } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Scatter::Cache::cookVDBSop(OP_Context& context) { try { hvdb::Interrupter boss("Scattering points on VDBs"); const fpreal time = context.getTime(); const bool keepGrids = (0 != evalInt("keep", 0, time)); const auto* vdbgeo = inputGeo(0); if (keepGrids && vdbgeo) { gdp->replaceWith(*vdbgeo); } else { gdp->stashAll(); } const int seed = static_cast<int>(evalInt("seed", 0, time)); const auto theSpread = static_cast<float>(evalFloat("spread", 0, time)); const bool verbose = evalInt("verbose", 0, time) != 0; const openvdb::Index64 pointCount = evalInt("count", 0, time); const float ptsPerVox = static_cast<float>(evalFloat("ppv", 0, time)); const auto sdfdomain = evalStdString("sdfdomain", time); const float density = static_cast<float>(evalFloat("density", 0, time)); const bool multiplyDensity = evalInt("multiply", 0, time) != 0; const auto theIsovalue = static_cast<float>(evalFloat("isovalue", 0, time)); const int outputName = static_cast<int>(evalInt("outputname", 0, time)); const std::string customName = evalStdString("customname", time); // Get the group of grids to process. const GA_PrimitiveGroup* group = matchGroup(*vdbgeo, evalStdString("group", time)); // Choose a fast random generator with a long period. Drawback here for // mt11213b is that it requires 352*sizeof(uint32) bytes. using RandGen = std::mersenne_twister_engine<uint32_t, 32, 351, 175, 19, 0xccab8ee7, 11, 0xffffffff, 7, 0x31b6ab00, 15, 0xffe50000, 17, 1812433253>; // mt11213b RandGen mtRand(seed); const auto pmode = evalInt("pointmode", 0, time); const bool vdbPoints = evalInt("vdbpoints", 0, time) == 1; const bool clipPoints = vdbPoints && bool(evalInt("cliptoisosurface", 0, time)); const int posCompression = vdbPoints ? static_cast<int>(evalInt("poscompression", 0, time)) : 0; const bool snapPointsToSurface = ((sdfdomain == "surface") && !openvdb::math::isApproxEqual(theSpread, 1.0f)); // If the domain is the isosurface, set the spread to 1 while generating points // so that each point ends up snapping to a unique point on the surface. const float spread = (snapPointsToSurface ? 1.f : theSpread); std::vector<std::string> emptyGrids; std::vector<openvdb::points::PointDataGrid::Ptr> pointGrids; PointAccessor pointAccessor(gdp); const GA_Offset firstOffset = gdp->getNumPointOffsets(); // Process each VDB primitive (with a non-null grid pointer) // that belongs to the selected group. for (hvdb::VdbPrimCIterator primIter(vdbgeo, group); primIter; ++primIter) { // Retrieve a read-only grid pointer. UT_VDBType gridType = primIter->getStorageType(); openvdb::GridBase::ConstPtr grid = primIter->getConstGridPtr(); const std::string gridName = primIter.getPrimitiveName().toStdString(); if (grid->empty()) { emptyGrids.push_back(gridName); continue; } const std::string* const name = verbose ? &gridName : nullptr; const openvdb::GridClass gridClass = grid->getGridClass(); const bool isSignedDistance = (gridClass == openvdb::GRID_LEVEL_SET); bool performCull = false; const auto isovalue = (gridClass != openvdb::GRID_FOG_VOLUME) ? theIsovalue : openvdb::math::Clamp(theIsovalue, openvdb::math::Tolerance<float>::value(), 1.f); openvdb::BoolGrid::Ptr mask; if (sdfdomain != "band") { auto iso = isovalue; if (clipPoints) { const openvdb::Vec3d voxelSize = grid->voxelSize(); const double maxVoxelSize = openvdb::math::Max(voxelSize.x(), voxelSize.y(), voxelSize.z()); iso += static_cast<float>(maxVoxelSize / 2.0); performCull = true; } if (sdfdomain == "interior") { if (isSignedDistance) { // If the input is an SDF, compute a mask of its interior. // (Fog volumes are their own interior masks.) mask = extractInteriorMask(grid, iso); } } else if (sdfdomain == "surface") { mask = extractIsosurfaceMask(grid, iso); } if (mask) { grid = mask; gridType = UT_VDB_BOOL; } } std::string vdbName; if (vdbPoints) { if (outputName == 0) vdbName = gridName; else if (outputName == 1) vdbName = gridName + customName; else vdbName = customName; } openvdb::points::PointDataGrid::Ptr pointGrid; const auto postprocessVDBPoints = [&](BaseScatter& scatter, bool cull) { pointGrid = scatter.points(); if (cull) { cullVDBPoints(pointGrid->tree(), primIter->getConstGridPtr()); } pointGrid->setName(vdbName); pointGrids.push_back(pointGrid); if (verbose) scatter.print(gridName); }; using DenseScatterer = openvdb::tools::DenseUniformPointScatter< PointAccessor, RandGen, hvdb::Interrupter>; using NonuniformScatterer = openvdb::tools::NonUniformPointScatter< PointAccessor, RandGen, hvdb::Interrupter>; using UniformScatterer = openvdb::tools::UniformPointScatter< PointAccessor, RandGen, hvdb::Interrupter>; const GA_Offset startOffset = gdp->getNumPointOffsets(); switch (pmode) { case 0: // fixed point count if (vdbPoints) { // VDB points VDBUniformScatter scatter(pointCount, seed, spread, posCompression, &boss); if (process(gridType, *grid, scatter, name)) { postprocessVDBPoints(scatter, performCull); } } else { // Houdini points UniformScatterer scatter(pointAccessor, pointCount, mtRand, spread, &boss); process(gridType, *grid, scatter, name); } break; case 1: // points per unit volume if (multiplyDensity && !isSignedDistance) { // local density if (vdbPoints) { // VDB points const auto dim = grid->transform().voxelSize(); VDBNonUniformScatter scatter(static_cast<float>(density * dim.product()), seed, spread, posCompression, &boss); if (!grid->apply<hvdb::NumericGridTypes>(scatter)) { throw std::runtime_error( "Only scalar grids support voxel scaling of density"); } postprocessVDBPoints(scatter, /*cull=*/false); } else { // Houdini points NonuniformScatterer scatter(pointAccessor, density, mtRand, spread, &boss); if (!grid->apply<hvdb::NumericGridTypes>(scatter)) { throw std::runtime_error( "Only scalar grids support voxel scaling of density"); } if (verbose) scatter.print(gridName); } } else { // global density if (vdbPoints) { // VDB points const auto dim = grid->transform().voxelSize(); const auto totalPointCount = openvdb::Index64( density * dim.product() * double(grid->activeVoxelCount())); VDBUniformScatter scatter( totalPointCount, seed, spread, posCompression, &boss); if (process(gridType, *grid, scatter, name)) { postprocessVDBPoints(scatter, performCull); } } else { // Houdini points UniformScatterer scatter(pointAccessor, density, mtRand, spread, &boss); process(gridType, *grid, scatter, name); } } break; case 2: // points per voxel if (vdbPoints) { // VDB points VDBDenseUniformScatter scatter( ptsPerVox, seed, spread, posCompression, &boss); if (process(gridType, *grid, scatter, name)) { postprocessVDBPoints(scatter, performCull); } } else { // Houdini points DenseScatterer scatter(pointAccessor, ptsPerVox, mtRand, spread, &boss); process(gridType, *grid, scatter, name); } break; default: throw std::runtime_error( "Expected 0, 1 or 2 for \"pointmode\", got " + std::to_string(pmode)); } // switch pmode if (snapPointsToSurface) { // Dilate the mask if it is a single-voxel-wide isosurface mask. const bool dilate = (mask && (sdfdomain == "surface")); // Generate a new SDF if the input is a fog volume or if the isovalue is nonzero. const bool rebuild = (!isSignedDistance || !openvdb::math::isApproxZero(isovalue)); if (!vdbPoints) { const GA_Range range(gdp->getPointMap(),startOffset,gdp->getNumPointOffsets()); // Use the original spread value to control how close to the surface points lie. SnapPointsOp op{*gdp, range, theSpread, isovalue, rebuild, dilate, mask, &boss}; hvdb::GEOvdbApply<hvdb::RealGridTypes>(**primIter, op); // process the original input grid } else if (vdbPoints && pointGrid) { SnapPointsOp op{*pointGrid, theSpread, isovalue, rebuild, dilate, mask, &boss}; hvdb::GEOvdbApply<hvdb::RealGridTypes>(**primIter, op); } } } // for each grid if (!emptyGrids.empty()) { std::string s = "The following grids were empty: " + hboost::algorithm::join(emptyGrids, ", "); addWarning(SOP_MESSAGE, s.c_str()); } // add points to a group if requested if (1 == evalInt("dogroup", 0, time)) { const std::string groupName = evalStdString("sgroup", time); GA_PointGroup* ptgroup = gdp->newPointGroup(groupName.c_str()); // add the scattered points to this group const GA_Offset lastOffset = gdp->getNumPointOffsets(); ptgroup->addRange(GA_Range(gdp->getPointMap(), firstOffset, lastOffset)); for (auto& pointGrid: pointGrids) { openvdb::points::appendGroup(pointGrid->tree(), groupName); openvdb::points::setGroup(pointGrid->tree(), groupName); } } for (auto& pointGrid: pointGrids) { hvdb::createVdbPrimitive(*gdp, pointGrid, pointGrid->getName().c_str()); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
43,299
C++
39.810556
138
0.599621
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/pythonrc.py
# Copyright Contributors to the OpenVDB Project # SPDX-License-Identifier: MPL-2.0 # Startup script to set the visibility of (and otherwise customize) # open-source (ASWF) OpenVDB nodes and their native Houdini equivalents # # To be installed as <dir>/python2.7libs/pythonrc.py, # where <dir> is a path in $HOUDINI_PATH. import hou import os # Construct a mapping from ASWF SOP names to names of equivalent # native Houdini SOPs. sopcategory = hou.sopNodeTypeCategory() namemap = {} for name, sop in sopcategory.nodeTypes().items(): try: nativename = sop.spareData('nativename') if nativename: namemap[name] = nativename except AttributeError: pass # Print the list of correspondences. #from pprint import pprint #pprint(namemap) # Determine which VDB SOPs should be visible in the Tab menu: # - If $OPENVDB_OPHIDE_POLICY is set to 'aswf', hide AWSF SOPs for which # a native Houdini equivalent exists. # - If $OPENVDB_OPHIDE_POLICY is set to 'native', hide native Houdini SOPs # for which an ASWF equivalent exists. # - Otherwise, show both the ASWF and the native SOPs. names = [] ophide = os.getenv('OPENVDB_OPHIDE_POLICY', 'none').strip().lower() if ophide == 'aswf': names = namemap.keys() elif ophide == 'native': names = namemap.values() for name in names: sop = sopcategory.nodeType(name) if sop: sop.setHidden(True) # Customize SOP visibility with code like the following: # # # Hide the ASWF Clip SOP. # sopcategory.nodeType('DW_OpenVDBClip').setHidden(True) # # # Show the native VDB Clip SOP. # sopcategory.nodeType('vdbclip').setHidden(False) # # # Hide all ASWF advection SOPs for which a native equivalent exists. # for name in namemap.keys(): # if 'Advect' in name: # sopcategory.nodeType(name).setHidden(True)
1,853
Python
28.903225
74
0.697248
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Filter_Level_Set.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Filter_Level_Set.cc /// /// @author FX R&D OpenVDB team /// /// @brief Performs various types of level set deformations with /// interface tracking. These unrestricted deformations include /// surface smoothing (e.g., Laplacian flow), filtering (e.g., mean /// value) and morphological operations (e.g., morphological opening). /// All these operations can optionally be masked with another grid that /// acts as an alpha-mask. /// /// @note Works with level set grids of floating point type (float/double). #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/LevelSetFilter.h> #include <OP/OP_AutoLockInputs.h> #include <UT/UT_Interrupt.h> #include <hboost/algorithm/string/case_conv.hpp> #include <hboost/algorithm/string/trim.hpp> #include <algorithm> #include <iostream> #include <stdexcept> #include <string> #include <vector> #undef DWA_DEBUG_MODE //#define DWA_DEBUG_MODE namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Utilities namespace { // Add new items to the *end* of this list, and update NUM_OPERATOR_TYPES. enum OperatorType { OP_TYPE_RENORM = 0, OP_TYPE_RESHAPE, OP_TYPE_SMOOTH, OP_TYPE_RESIZE }; enum { NUM_OPERATOR_TYPES = OP_TYPE_RESIZE + 1 }; // Add new items to the *end* of this list, and update NUM_FILTER_TYPES. enum FilterType { FILTER_TYPE_NONE = -1, FILTER_TYPE_RENORMALIZE = 0, FILTER_TYPE_MEAN_VALUE, FILTER_TYPE_MEDIAN_VALUE, FILTER_TYPE_MEAN_CURVATURE, FILTER_TYPE_LAPLACIAN_FLOW, FILTER_TYPE_DILATE, FILTER_TYPE_ERODE, FILTER_TYPE_OPEN, FILTER_TYPE_CLOSE, FILTER_TYPE_TRACK, FILTER_TYPE_GAUSSIAN, FILTER_TYPE_RESIZE }; enum { NUM_FILTER_TYPES = FILTER_TYPE_RESIZE + 1 }; std::string filterTypeToString(FilterType filter) { std::string ret; switch (filter) { case FILTER_TYPE_NONE: ret = "none"; break; case FILTER_TYPE_RENORMALIZE: ret = "renormalize"; break; case FILTER_TYPE_RESIZE: ret = "resize narrow band"; break; case FILTER_TYPE_GAUSSIAN: ret = "gaussian"; break; case FILTER_TYPE_DILATE: ret = "dilate"; break; case FILTER_TYPE_ERODE: ret = "erode"; break; case FILTER_TYPE_OPEN: ret = "open"; break; case FILTER_TYPE_CLOSE: ret = "close"; break; case FILTER_TYPE_TRACK: ret = "track"; break; #ifndef SESI_OPENVDB case FILTER_TYPE_MEAN_VALUE: ret = "mean value"; break; case FILTER_TYPE_MEDIAN_VALUE: ret = "median value"; break; case FILTER_TYPE_MEAN_CURVATURE: ret = "mean curvature"; break; case FILTER_TYPE_LAPLACIAN_FLOW: ret = "laplacian flow"; break; #else case FILTER_TYPE_MEAN_VALUE: ret = "meanvalue"; break; case FILTER_TYPE_MEDIAN_VALUE: ret = "medianvalue"; break; case FILTER_TYPE_MEAN_CURVATURE: ret = "meancurvature"; break; case FILTER_TYPE_LAPLACIAN_FLOW: ret = "laplacianflow"; break; #endif } return ret; } std::string filterTypeToMenuName(FilterType filter) { std::string ret; switch (filter) { case FILTER_TYPE_NONE: ret = "None"; break; case FILTER_TYPE_RENORMALIZE: ret = "Renormalize"; break; case FILTER_TYPE_RESIZE: ret = "Resize Narrow Band"; break; case FILTER_TYPE_MEAN_VALUE: ret = "Mean Value"; break; case FILTER_TYPE_GAUSSIAN: ret = "Gaussian"; break; case FILTER_TYPE_MEDIAN_VALUE: ret = "Median Value"; break; case FILTER_TYPE_MEAN_CURVATURE: ret = "Mean Curvature Flow"; break; case FILTER_TYPE_LAPLACIAN_FLOW: ret = "Laplacian Flow"; break; case FILTER_TYPE_DILATE: ret = "Dilate"; break; case FILTER_TYPE_ERODE: ret = "Erode"; break; case FILTER_TYPE_OPEN: ret = "Open"; break; case FILTER_TYPE_CLOSE: ret = "Close"; break; case FILTER_TYPE_TRACK: ret = "Track Narrow Band"; break; } return ret; } FilterType stringToFilterType(const std::string& s) { FilterType ret = FILTER_TYPE_NONE; std::string str = s; hboost::trim(str); hboost::to_lower(str); if (str == filterTypeToString(FILTER_TYPE_RENORMALIZE)) { ret = FILTER_TYPE_RENORMALIZE; } else if (str == filterTypeToString(FILTER_TYPE_RESIZE)) { ret = FILTER_TYPE_RESIZE; } else if (str == filterTypeToString(FILTER_TYPE_MEAN_VALUE)) { ret = FILTER_TYPE_MEAN_VALUE; } else if (str == filterTypeToString(FILTER_TYPE_GAUSSIAN)) { ret = FILTER_TYPE_GAUSSIAN; } else if (str == filterTypeToString(FILTER_TYPE_MEDIAN_VALUE)) { ret = FILTER_TYPE_MEDIAN_VALUE; } else if (str == filterTypeToString(FILTER_TYPE_MEAN_CURVATURE)) { ret = FILTER_TYPE_MEAN_CURVATURE; } else if (str == filterTypeToString(FILTER_TYPE_LAPLACIAN_FLOW)) { ret = FILTER_TYPE_LAPLACIAN_FLOW; } else if (str == filterTypeToString(FILTER_TYPE_DILATE)) { ret = FILTER_TYPE_DILATE; } else if (str == filterTypeToString(FILTER_TYPE_ERODE)) { ret = FILTER_TYPE_ERODE; } else if (str == filterTypeToString(FILTER_TYPE_OPEN)) { ret = FILTER_TYPE_OPEN; } else if (str == filterTypeToString(FILTER_TYPE_CLOSE)) { ret = FILTER_TYPE_CLOSE; } else if (str == filterTypeToString(FILTER_TYPE_TRACK)) { ret = FILTER_TYPE_TRACK; } return ret; } // Add new items to the *end* of this list, and update NUM_ACCURACY_TYPES. enum Accuracy { ACCURACY_UPWIND_FIRST = 0, ACCURACY_UPWIND_SECOND, ACCURACY_UPWIND_THIRD, ACCURACY_WENO, ACCURACY_HJ_WENO }; enum { NUM_ACCURACY_TYPES = ACCURACY_HJ_WENO + 1 }; std::string accuracyToString(Accuracy ac) { std::string ret; switch (ac) { case ACCURACY_UPWIND_FIRST: ret = "upwind first"; break; case ACCURACY_UPWIND_SECOND: ret = "upwind second"; break; case ACCURACY_UPWIND_THIRD: ret = "upwind third"; break; case ACCURACY_WENO: ret = "weno"; break; case ACCURACY_HJ_WENO: ret = "hj weno"; break; } return ret; } std::string accuracyToMenuName(Accuracy ac) { std::string ret; switch (ac) { case ACCURACY_UPWIND_FIRST: ret = "First-order upwinding"; break; case ACCURACY_UPWIND_SECOND: ret = "Second-order upwinding"; break; case ACCURACY_UPWIND_THIRD: ret = "Third-order upwinding"; break; case ACCURACY_WENO: ret = "Fifth-order WENO"; break; case ACCURACY_HJ_WENO: ret = "Fifth-order HJ-WENO"; break; } return ret; } Accuracy stringToAccuracy(const std::string& s) { Accuracy ret = ACCURACY_UPWIND_FIRST; std::string str = s; hboost::trim(str); hboost::to_lower(str); if (str == accuracyToString(ACCURACY_UPWIND_SECOND)) { ret = ACCURACY_UPWIND_SECOND; } else if (str == accuracyToString(ACCURACY_UPWIND_THIRD)) { ret = ACCURACY_UPWIND_THIRD; } else if (str == accuracyToString(ACCURACY_WENO)) { ret = ACCURACY_WENO; } else if (str == accuracyToString(ACCURACY_HJ_WENO)) { ret = ACCURACY_HJ_WENO; } return ret; } void buildFilterMenu(std::vector<std::string>& items, OperatorType op) { items.clear(); if (OP_TYPE_SMOOTH == op) { items.push_back(filterTypeToString(FILTER_TYPE_MEAN_VALUE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_MEAN_VALUE)); items.push_back(filterTypeToString(FILTER_TYPE_GAUSSIAN)); items.push_back(filterTypeToMenuName(FILTER_TYPE_GAUSSIAN)); items.push_back(filterTypeToString(FILTER_TYPE_MEDIAN_VALUE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_MEDIAN_VALUE)); items.push_back(filterTypeToString(FILTER_TYPE_MEAN_CURVATURE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_MEAN_CURVATURE)); items.push_back(filterTypeToString(FILTER_TYPE_LAPLACIAN_FLOW)); items.push_back(filterTypeToMenuName(FILTER_TYPE_LAPLACIAN_FLOW)); } else if (OP_TYPE_RESHAPE == op) { items.push_back(filterTypeToString(FILTER_TYPE_DILATE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_DILATE)); items.push_back(filterTypeToString(FILTER_TYPE_ERODE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_ERODE)); items.push_back(filterTypeToString(FILTER_TYPE_OPEN)); items.push_back(filterTypeToMenuName(FILTER_TYPE_OPEN)); items.push_back(filterTypeToString(FILTER_TYPE_CLOSE)); items.push_back(filterTypeToMenuName(FILTER_TYPE_CLOSE)); #ifdef DWA_DEBUG_MODE items.push_back(filterTypeToString(FILTER_TYPE_TRACK)); items.push_back(filterTypeToMenuName(FILTER_TYPE_TRACK)); #endif } } struct FilterParms { using TrimMode = openvdb::tools::lstrack::TrimMode; std::string mGroup; std::string mMaskName; bool mSecondInputConnected = false; FilterType mFilterType = FILTER_TYPE_NONE; int mIterations = 0; int mHalfWidth = 3; int mStencilWidth = 0; float mVoxelOffset = 0.0f; float mHalfWidthWorld = 0.1f; float mStencilWidthWorld = 0.1f; bool mWorldUnits = false; float mMinMask = 0; float mMaxMask = 1; bool mInvertMask = false; Accuracy mAccuracy = ACCURACY_UPWIND_FIRST; TrimMode mTrimMode = TrimMode::kAll; bool mMaskInputNode = false; }; } // namespace //////////////////////////////////////// // SOP Declaration class SOP_OpenVDB_Filter_Level_Set: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Filter_Level_Set(OP_Network*, const char* name, OP_Operator*, OperatorType); ~SOP_OpenVDB_Filter_Level_Set() override {} static OP_Node* factoryRenormalize(OP_Network*, const char* name, OP_Operator*); static OP_Node* factorySmooth(OP_Network*, const char* name, OP_Operator*); static OP_Node* factoryReshape(OP_Network*, const char* name, OP_Operator*); static OP_Node* factoryNarrowBand(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 1); } protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; public: class Cache: public SOP_VDBCacheOptions { public: Cache(OperatorType op): mOpType{op} {} protected: OP_ERROR cookVDBSop(OP_Context&) override; private: using BossT = hvdb::Interrupter; OP_ERROR evalFilterParms(OP_Context&, FilterParms&); template<typename GridT> bool applyFilters(GU_PrimVDB*, std::vector<FilterParms>&, BossT&, OP_Context&, GU_Detail&, bool verbose); template<typename FilterT> void filterGrid(OP_Context&, FilterT&, const FilterParms&, BossT&, bool verbose); template<typename FilterT> void offset(const FilterParms&, FilterT&, const float offset, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void mean(const FilterParms&, FilterT&, BossT&, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void gaussian(const FilterParms&, FilterT&, BossT&, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void median(const FilterParms&, FilterT&, BossT&, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void meanCurvature(const FilterParms&, FilterT&, BossT&, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void laplacian(const FilterParms&, FilterT&, BossT&, bool verbose, const typename FilterT::MaskType* mask = nullptr); template<typename FilterT> void renormalize(const FilterParms&, FilterT&, BossT&, bool verbose = false); template<typename FilterT> void resizeNarrowBand(const FilterParms&, FilterT&, BossT&, bool verbose = false); template<typename FilterT> void track(const FilterParms&, FilterT&, BossT&, bool verbose); private: const OperatorType mOpType; }; private: const OperatorType mOpType; };//SOP_OpenVDB_Filter_Level_Set //////////////////////////////////////// // Build UI void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; for (int n = 0; n < NUM_OPERATOR_TYPES; ++n) { OperatorType op = OperatorType(n); hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); if (OP_TYPE_RENORM != op && OP_TYPE_RESIZE != op) { // Filter menu parms.add(hutil::ParmFactory(PRM_TOGGLE, "mask", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "Alpha Mask") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Optional VDB used for alpha masking. Assumes values 0->1.") .setDocumentation( "If enabled, operate on the input VDBs using the given VDB" " from the second input as an alpha mask.\n\n" "The mask VDB is assumed to be scalar, with values between zero and one." " Where the mask is zero, no processing occurs. Where the mask is one," " the operation is applied at full strength. For intermediate mask values," " the strength varies linearly.")); std::vector<std::string> items; buildFilterMenu(items, op); parms.add(hutil::ParmFactory(PRM_STRING, "operation", "Operation") .setDefault(items[0]) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("The operation to be applied")); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "useworldspaceunits", "Use World Space Units") .setTooltip("If enabled, use world-space units, otherwise use voxels.")); parms.add(hutil::ParmFactory(PRM_INT_J, "radius", "Filter Voxel Radius") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 5) .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_FLT_J, "radiusworld", "Filter Radius") .setDefault(0.1) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setDocumentation("The desired radius of the filter")); parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(PRMfourDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("The number of times to apply the operation")); parms.add(hutil::ParmFactory(PRM_INT_J, "halfwidth", "Half Width") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "Half the width of the narrow band, in voxels\n\n" "(Many level set operations require this to be a minimum of three voxels.)")); parms.add(hutil::ParmFactory(PRM_FLT_J, "halfwidthworld", "Half Width") .setDefault(0.1) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("Half the width of the narrow band, in world units") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxeloffset", "Offset") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 10.0) .setTooltip( "The distance in voxels by which to offset the level set surface" " along its normals")); { std::vector<std::string> items; for (int i = 0; i < NUM_ACCURACY_TYPES; ++i) { Accuracy ac = Accuracy(i); #ifndef DWA_DEBUG_MODE // Exclude some of the menu options if (ac == ACCURACY_UPWIND_THIRD || ac == ACCURACY_WENO) continue; #endif items.push_back(accuracyToString(ac)); // token items.push_back(accuracyToMenuName(ac)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "accuracy", "Renorm Accuracy") .setDefault(items[0]) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert Alpha Mask") .setTooltip("Invert the optional alpha mask, mapping 0 to 1 and 1 to 0.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "minmask", "Min Mask Cutoff") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold below which voxel values in the mask map to zero")); parms.add(hutil::ParmFactory(PRM_FLT_J, "maxmask", "Max Mask Cutoff") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold above which voxel values in the mask map to one")); parms.add(hutil::ParmFactory(PRM_STRING, "trim", "Trim") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "none", "None", "interior", "Interior", "exterior", "Exterior", "all", "All", }) .setDefault("all") .setTooltip("Set voxels that lie outside the narrow band to the background value.") .setDocumentation( "Optionally set interior, exterior, or all voxels that lie outside" " the narrow band to the background value.\n\n" "Trimming reduces memory usage, but it also reduces dense SDFs\n" "to narrow-band level sets.")); #ifndef SESI_OPENVDB parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setTooltip("If enabled, print the sequence of operations to the terminal.")); #endif // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "Sep")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "Sep")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "worldSpaceUnits", "")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "stencilWidth", "Filter Voxel Radius") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "stencilWidthWorld", "").setDefault(0.1)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "halfWidth", "Half-Width") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "halfWidthWorld", "").setDefault(0.1)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "voxelOffset", "Offset") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "minMask", "").setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxMask", "").setDefault(PRMoneDefaults)); auto cacheAllocator = [op]() { return new SOP_OpenVDB_Filter_Level_Set::Cache{op}; }; // Register operator if (OP_TYPE_RENORM == op) { hvdb::OpenVDBOpFactory("VDB Renormalize SDF", SOP_OpenVDB_Filter_Level_Set::factoryRenormalize, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBRenormalizeLevelSet") #endif .setObsoleteParms(obsoleteParms) .addInput("Input with VDB grids to process") .setVerb(SOP_NodeVerb::COOK_INPLACE, cacheAllocator) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Repair level sets represented by VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ Certain operations on a level set volume can cause the signed distances\n\ to its zero crossing to become invalid.\n\ This node iteratively adjusts voxel values to restore proper distances.\n\ \n\ NOTE:\n\ If the level set departs significantly from a proper signed distance field,\n\ it might be necessary to rebuild it completely.\n\ That can be done with the\ [OpenVDB Rebuild Level Set node|Node:sop/DW_OpenVDBRebuildLevelSet],\n\ which converts an input level set to polygons and then back to a level set.\n\ \n\ @related\n\ - [OpenVDB Offset Level Set|Node:sop/DW_OpenVDBOffsetLevelSet]\n\ - [OpenVDB Rebuild Level Set|Node:sop/DW_OpenVDBRebuildLevelSet]\n\ - [OpenVDB Smooth Level Set|Node:sop/DW_OpenVDBSmoothLevelSet]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } else if (OP_TYPE_RESHAPE == op) { hvdb::OpenVDBOpFactory("VDB Reshape SDF", SOP_OpenVDB_Filter_Level_Set::factoryReshape, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBOffsetLevelSet") #endif .setObsoleteParms(obsoleteParms) .addInput("Input with VDBs to process") .addOptionalInput("Optional VDB Alpha Mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, cacheAllocator) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Offset level sets represented by VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node changes the shape of a level set by moving the surface in or out\n\ along its normals.\n\ Unlike just adding an offset to a signed distance field, this node properly\n\ updates the active voxels to account for the transformation.\n\ \n\ @related\n\ - [OpenVDB Renormalize Level Set|Node:sop/DW_OpenVDBRenormalizeLevelSet]\n\ - [OpenVDB Smooth Level Set|Node:sop/DW_OpenVDBSmoothLevelSet]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } else if (OP_TYPE_SMOOTH == op) { hvdb::OpenVDBOpFactory("VDB Smooth SDF", SOP_OpenVDB_Filter_Level_Set::factorySmooth, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBSmoothLevelSet") #endif .setObsoleteParms(obsoleteParms) .addInput("Input with VDBs to process") .addOptionalInput("Optional VDB Alpha Mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, cacheAllocator) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Smooth the surface of a level set represented by a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node applies a simulated flow operation, moving the surface of a\n\ signed distance field according to some local property.\n\ \n\ For example, if you move along the normal by an amount dependent on the curvature,\n\ you will flatten out dimples and hills and leave flat areas unchanged.\n\ \n\ Unlike the [OpenVDB Filter|Node:sop/DW_OpenVDBFilter] node,\n\ this node ensures that the level set remains a valid signed distance field.\n\ \n\ @related\n\ - [OpenVDB Filter|Node:sop/DW_OpenVDBFilter]\n\ - [OpenVDB Offset Level Set|Node:sop/DW_OpenVDBOffsetLevelSet]\n\ - [OpenVDB Renormalize Level Set|Node:sop/DW_OpenVDBRenormalizeLevelSet]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } else if (OP_TYPE_RESIZE == op) { hvdb::OpenVDBOpFactory("VDB Activate SDF", SOP_OpenVDB_Filter_Level_Set::factoryNarrowBand, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBResizeNarrowBand") #endif .setObsoleteParms(obsoleteParms) .addInput("Input with VDBs to process") .setVerb(SOP_NodeVerb::COOK_INPLACE, cacheAllocator) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Change the width of the narrow band of a VDB signed distance field.\"\"\"\n\ \n\ @overview\n\ \n\ This node adjusts the width of the narrow band of a signed distance field\n\ represented by a VDB volume.\n\ \n\ @related\n\ - [OpenVDB Offset Level Set|Node:sop/DW_OpenVDBOffsetLevelSet]\n\ - [OpenVDB Rebuild Level Set|Node:sop/DW_OpenVDBRebuildLevelSet]\n\ - [OpenVDB Renormalize Level Set|Node:sop/DW_OpenVDBRenormalizeLevelSet]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } } } void SOP_OpenVDB_Filter_Level_Set::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "halfWidth", "halfwidth"); resolveRenamedParm(*obsoleteParms, "halfWidthWorld", "halfwidthworld"); resolveRenamedParm(*obsoleteParms, "maxMask", "maxmask"); resolveRenamedParm(*obsoleteParms, "minMask", "minmask"); resolveRenamedParm(*obsoleteParms, "stencilWidth", "radius"); resolveRenamedParm(*obsoleteParms, "stencilWidthWorld", "radiusworld"); resolveRenamedParm(*obsoleteParms, "voxelOffset", "voxeloffset"); resolveRenamedParm(*obsoleteParms, "worldSpaceUnits", "useworldspaceunits"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// // Operator registration OP_Node* SOP_OpenVDB_Filter_Level_Set::factoryRenormalize( OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Filter_Level_Set(net, name, op, OP_TYPE_RENORM); } OP_Node* SOP_OpenVDB_Filter_Level_Set::factoryReshape( OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Filter_Level_Set(net, name, op, OP_TYPE_RESHAPE); } OP_Node* SOP_OpenVDB_Filter_Level_Set::factorySmooth( OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Filter_Level_Set(net, name, op, OP_TYPE_SMOOTH); } OP_Node* SOP_OpenVDB_Filter_Level_Set::factoryNarrowBand( OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Filter_Level_Set(net, name, op, OP_TYPE_RESIZE); } SOP_OpenVDB_Filter_Level_Set::SOP_OpenVDB_Filter_Level_Set( OP_Network* net, const char* name, OP_Operator* op, OperatorType opType) : hvdb::SOP_NodeVDB(net, name, op) , mOpType(opType) { } //////////////////////////////////////// // Disable UI Parms. bool SOP_OpenVDB_Filter_Level_Set::updateParmsFlags() { bool changed = false, stencil = false; const bool renorm = mOpType == OP_TYPE_RENORM; const bool smooth = mOpType == OP_TYPE_SMOOTH; const bool reshape = mOpType == OP_TYPE_RESHAPE; const bool resize = mOpType == OP_TYPE_RESIZE; if (renorm || resize) { changed |= setVisibleState("invert", false); changed |= setVisibleState("minmask",false); changed |= setVisibleState("maxmask",false); } else { const FilterType operation = stringToFilterType(evalStdString("operation", 0)); stencil = operation == FILTER_TYPE_MEAN_VALUE || operation == FILTER_TYPE_GAUSSIAN || operation == FILTER_TYPE_MEDIAN_VALUE; const bool hasMask = (this->nInputs() == 2); changed |= enableParm("mask", hasMask); const bool useMask = hasMask && bool(evalInt("mask", 0, 0)); changed |= enableParm("invert", useMask); changed |= enableParm("minmask", useMask); changed |= enableParm("maxmask", useMask); changed |= enableParm("maskname", useMask); } const bool worldUnits = bool(evalInt("useworldspaceunits", 0, 0)); changed |= setVisibleState("halfwidth", resize && !worldUnits); changed |= setVisibleState("halfwidthworld", resize && worldUnits); changed |= enableParm("iterations", smooth || renorm); changed |= enableParm("radius", stencil && !worldUnits); changed |= enableParm("radiusworld", stencil && worldUnits); changed |= setVisibleState("radius", getEnableState("radius")); changed |= setVisibleState("radiusworld", getEnableState("radiusworld")); changed |= setVisibleState("iterations", getEnableState("iterations")); changed |= setVisibleState("useworldspaceunits", !renorm); changed |= setVisibleState("voxeloffset", reshape); return changed; } //////////////////////////////////////// // Cook OP_ERROR SOP_OpenVDB_Filter_Level_Set::Cache::cookVDBSop( OP_Context& context) { try { BossT boss("Processing level sets"); const fpreal time = context.getTime(); #ifndef SESI_OPENVDB const bool verbose = bool(evalInt("verbose", 0, time)); #else const bool verbose = false; #endif // Collect filter parameters starting from the topmost node. std::vector<FilterParms> filterParms; filterParms.resize(1); evalFilterParms(context, filterParms[0]); // Filter grids const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { // Check grid class const openvdb::GridClass gridClass = it->getGrid().getGridClass(); if (gridClass != openvdb::GRID_LEVEL_SET) { std::string s = it.getPrimitiveNameOrIndex().toStdString(); s = "VDB primitive " + s + " was skipped because it is not a level-set grid."; addWarning(SOP_MESSAGE, s.c_str()); continue; } // Appply filters bool wasFiltered = applyFilters<openvdb::FloatGrid>( *it, filterParms, boss, context, *gdp, verbose); if (boss.wasInterrupted()) break; if (!wasFiltered) { wasFiltered = applyFilters<openvdb::DoubleGrid>( *it, filterParms, boss, context, *gdp, verbose); } if (boss.wasInterrupted()) break; if (!wasFiltered) { std::string msg = "VDB primitive " + it.getPrimitiveNameOrIndex().toStdString() + " is not of floating point type."; addWarning(SOP_MESSAGE, msg.c_str()); continue; } if (boss.wasInterrupted()) break; } if (boss.wasInterrupted()) addWarning(SOP_MESSAGE, "processing was interrupted"); boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Filter_Level_Set::Cache::evalFilterParms( OP_Context& context, FilterParms& parms) { fpreal now = context.getTime(); parms.mIterations = static_cast<int>(evalInt("iterations", 0, now)); parms.mHalfWidth = static_cast<int>(evalInt("halfwidth", 0, now)); parms.mHalfWidthWorld = float(evalFloat("halfwidthworld", 0, now)); parms.mStencilWidth = static_cast<int>(evalInt("radius", 0, now)); parms.mStencilWidthWorld = float(evalFloat("radiusworld", 0, now)); parms.mVoxelOffset = static_cast<float>(evalFloat("voxeloffset", 0, now)); parms.mMinMask = static_cast<float>(evalFloat("minmask", 0, now)); parms.mMaxMask = static_cast<float>(evalFloat("maxmask", 0, now)); parms.mInvertMask = bool(evalInt("invert", 0, now)); parms.mWorldUnits = bool(evalInt("useworldspaceunits", 0, now)); parms.mAccuracy = stringToAccuracy(evalStdString("accuracy", now)); parms.mGroup = evalStdString("group", now); { const auto trimMode = evalStdString("trim", now); if (trimMode == "none") { parms.mTrimMode = FilterParms::TrimMode::kNone; } else if (trimMode == "interior") { parms.mTrimMode = FilterParms::TrimMode::kInterior; } else if (trimMode == "exterior") { parms.mTrimMode = FilterParms::TrimMode::kExterior; } else if (trimMode == "all") { parms.mTrimMode = FilterParms::TrimMode::kAll; } else { addError(SOP_MESSAGE, ("Expected \"none\", \"interior\", \"exterior\" or \"all\" for \"trim\", got \"" + trimMode + "\".").c_str()); } } if (OP_TYPE_RENORM == mOpType) { parms.mFilterType = FILTER_TYPE_RENORMALIZE; } else if (OP_TYPE_RESIZE == mOpType) { parms.mFilterType = FILTER_TYPE_RESIZE; } else { parms.mFilterType = stringToFilterType(evalStdString("operation", now)); } if (OP_TYPE_SMOOTH == mOpType || OP_TYPE_RESHAPE == mOpType) { if (evalInt("mask", 0, now)) { parms.mMaskInputNode = hasInput(1); parms.mMaskName = evalStdString("maskname", now); } } return error(); } //////////////////////////////////////// // Filter callers template<typename GridT> bool SOP_OpenVDB_Filter_Level_Set::Cache::applyFilters( GU_PrimVDB* vdbPrim, std::vector<FilterParms>& filterParms, BossT& boss, OP_Context& context, GU_Detail&, bool verbose) { vdbPrim->makeGridUnique(); typename GridT::Ptr grid = openvdb::gridPtrCast<GridT>(vdbPrim->getGridPtr()); if (!grid) return false; using ValueT = typename GridT::ValueType; using MaskT = openvdb::FloatGrid; using FilterT = openvdb::tools::LevelSetFilter<GridT, MaskT, BossT>; const float voxelSize = static_cast<float>(grid->voxelSize()[0]); FilterT filter(*grid, &boss); filter.setTemporalScheme(openvdb::math::TVD_RK1); if (grid->background() < ValueT(openvdb::LEVEL_SET_HALF_WIDTH * voxelSize)) { std::string msg = "VDB primitive '" + std::string(vdbPrim->getGridName()) + "' has a narrow band width that is less than 3 voxel units. "; addWarning(SOP_MESSAGE, msg.c_str()); } for (size_t n = 0, N = filterParms.size(); n < N; ++n) { const GA_PrimitiveGroup *group = matchGroup(*gdp, filterParms[n].mGroup); // Skip this node if it doesn't operate on this primitive if (group && !group->containsOffset(vdbPrim->getMapOffset())) continue; filterGrid(context, filter, filterParms[n], boss, verbose); if (boss.wasInterrupted()) break; } return true; } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::filterGrid( OP_Context& /*context*/, FilterT& filter, const FilterParms& parms, BossT& boss, bool verbose) { // Alpha-masking using MaskT = typename FilterT::MaskType; typename MaskT::ConstPtr maskGrid; if (parms.mMaskInputNode) { const GU_Detail* maskGeo = inputGeo(1); if (maskGeo) { const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups(parms.mMaskName.c_str(), GroupCreator(maskGeo)); if (!maskGroup && !parms.mMaskName.empty()) { addWarning(SOP_MESSAGE, "Mask not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { if (maskIt->getStorageType() == UT_VDB_FLOAT) { maskGrid = openvdb::gridConstPtrCast<MaskT>(maskIt->getGridPtr()); } else { addWarning(SOP_MESSAGE, "The mask grid has to be a FloatGrid."); } } else { addWarning(SOP_MESSAGE, "The mask input is empty."); } } } filter.setMaskRange(parms.mMinMask, parms.mMaxMask); filter.invertMask(parms.mInvertMask); } filter.setTrimming(parms.mTrimMode); switch (parms.mAccuracy) { case ACCURACY_UPWIND_FIRST: filter.setSpatialScheme(openvdb::math::FIRST_BIAS); break; case ACCURACY_UPWIND_SECOND: filter.setSpatialScheme(openvdb::math::SECOND_BIAS); break; case ACCURACY_UPWIND_THIRD: filter.setSpatialScheme(openvdb::math::THIRD_BIAS); break; case ACCURACY_WENO: filter.setSpatialScheme(openvdb::math::WENO5_BIAS); break; case ACCURACY_HJ_WENO: filter.setSpatialScheme(openvdb::math::HJWENO5_BIAS); break; } const float voxelSize = float(filter.grid().voxelSize()[0]); const float ds = (parms.mWorldUnits ? 1.0f : voxelSize) * parms.mVoxelOffset; switch (parms.mFilterType) { case FILTER_TYPE_NONE: break; case FILTER_TYPE_RENORMALIZE: renormalize(parms, filter, boss, verbose); break; case FILTER_TYPE_RESIZE: resizeNarrowBand(parms, filter, boss, verbose); break; case FILTER_TYPE_MEAN_VALUE: mean(parms, filter, boss, verbose, maskGrid.get()); break; case FILTER_TYPE_GAUSSIAN: gaussian(parms, filter, boss, verbose, maskGrid.get()); break; case FILTER_TYPE_MEDIAN_VALUE: median(parms, filter, boss, verbose, maskGrid.get()); break; case FILTER_TYPE_MEAN_CURVATURE: meanCurvature(parms, filter, boss, verbose, maskGrid.get()); break; case FILTER_TYPE_LAPLACIAN_FLOW: laplacian(parms, filter, boss, verbose, maskGrid.get()); break; case FILTER_TYPE_TRACK: track(parms, filter, boss, verbose); break; case FILTER_TYPE_DILATE: offset(parms, filter, -ds, verbose, maskGrid.get()); break; case FILTER_TYPE_ERODE: offset(parms, filter, ds, verbose, maskGrid.get()); break; case FILTER_TYPE_OPEN: offset(parms, filter, ds, verbose, maskGrid.get()); offset(parms, filter, -ds, verbose, maskGrid.get()); break; case FILTER_TYPE_CLOSE: offset(parms, filter, -ds, verbose, maskGrid.get()); offset(parms, filter, ds, verbose, maskGrid.get()); break; } } //////////////////////////////////////// // Filter operations template<typename FilterT> inline void SOP_OpenVDB_Filter_Level_Set::Cache::offset( const FilterParms&, FilterT& filter, const float offset, bool verbose, const typename FilterT::MaskType* mask) { if (verbose) { std::cout << "Morphological " << (offset>0 ? "erosion" : "dilation") << " by the offset " << offset << std::endl; } filter.offset(offset, mask); } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::mean( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose, const typename FilterT::MaskType* mask) { const double voxelScale = 1.0 / filter.grid().voxelSize()[0]; for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { int radius = parms.mStencilWidth; if (parms.mWorldUnits) { double voxelRadius = double(parms.mStencilWidthWorld) * voxelScale; radius = std::max(1, int(voxelRadius)); } if (verbose) { std::cout << "Mean filter of radius " << radius << std::endl; } filter.mean(radius, mask); } } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::gaussian( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose, const typename FilterT::MaskType* mask) { const double voxelScale = 1.0 / filter.grid().voxelSize()[0]; for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { int radius = parms.mStencilWidth; if (parms.mWorldUnits) { double voxelRadius = double(parms.mStencilWidthWorld) * voxelScale; radius = std::max(1, int(voxelRadius)); } if (verbose) { std::cout << "Gaussian filter of radius " << radius << std::endl; } filter.gaussian(radius, mask); } } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::median( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose, const typename FilterT::MaskType* mask) { const double voxelScale = 1.0 / filter.grid().voxelSize()[0]; for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { int radius = parms.mStencilWidth; if (parms.mWorldUnits) { double voxelRadius = double(parms.mStencilWidthWorld) * voxelScale; radius = std::max(1, int(voxelRadius)); } if (verbose) { std::cout << "Median filter of radius " << radius << std::endl; } filter.median(radius, mask); } } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::meanCurvature( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose, const typename FilterT::MaskType* mask) { for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { if (verbose) std::cout << "Mean-curvature flow" << (n+1) << std::endl; filter.meanCurvature(mask); } } template<typename FilterT> void SOP_OpenVDB_Filter_Level_Set::Cache::laplacian( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose, const typename FilterT::MaskType* mask) { for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { if (verbose) std::cout << "Laplacian flow" << (n+1) << std::endl; filter.laplacian(mask); } } template<typename FilterT> inline void SOP_OpenVDB_Filter_Level_Set::Cache::renormalize( const FilterParms& parms, FilterT& filter, BossT&, bool verbose) { // We will restore the old state since it is important to level set tracking const typename FilterT::State s = filter.getState(); filter.setNormCount(parms.mIterations); filter.setTemporalScheme(openvdb::math::TVD_RK3); if (verbose) std::cout << "Renormalize #" << parms.mIterations << std::endl; filter.normalize(); filter.prune(); filter.setState(s); } template<typename FilterT> inline void SOP_OpenVDB_Filter_Level_Set::Cache::resizeNarrowBand( const FilterParms& parms, FilterT& filter, BossT&, bool /*verbose*/) { // The filter is a statemachine so we will restore the old // state since it is important to subsequent level set tracking const typename FilterT::State s = filter.getState(); filter.setNormCount(1); // only one normalization per iteration int width = parms.mHalfWidth; if (parms.mWorldUnits) { double voxelWidth = double(parms.mHalfWidthWorld) / filter.grid().voxelSize()[0]; width = std::max(1, int(voxelWidth)); } filter.resize(width); filter.prune(); filter.setState(s); } template<typename FilterT> inline void SOP_OpenVDB_Filter_Level_Set::Cache::track( const FilterParms& parms, FilterT& filter, BossT& boss, bool verbose) { for (int n = 0, N = parms.mIterations; n < N && !boss.wasInterrupted(); ++n) { if (verbose) std::cout << "Tracking #" << (n+1) << std::endl; filter.track(); } }
44,435
C++
33.715625
100
0.617329
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Rasterize_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Rasterize_Points.cc /// /// @author Mihai Alden /// /// @brief Rasterize points into density and attribute grids. /// /// @note This SOP has a accompanying creation script that adds a default VOP /// subnetwork and UI parameters for cloud and velocity field modeling. /// See the creation script file header for installation details. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/GU_VDBPointTools.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/tools/GridTransformer.h> #include <openvdb/tools/PointIndexGrid.h> #include <openvdb/tools/Prune.h> #include <CH/CH_Manager.h> #include <CVEX/CVEX_Context.h> #include <CVEX/CVEX_Value.h> #include <GA/GA_Handle.h> #include <GA/GA_PageIterator.h> #include <GA/GA_Types.h> #include <GU/GU_Detail.h> #include <GU/GU_SopResolver.h> #include <OP/OP_Caller.h> #include <OP/OP_Channels.h> #include <OP/OP_Director.h> #include <OP/OP_NodeInfoParms.h> #include <OP/OP_Operator.h> #include <OP/OP_OperatorTable.h> #include <OP/OP_VexFunction.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <UT/UT_SharedPtr.h> #include <UT/UT_UniquePtr.h> #include <UT/UT_WorkArgs.h> #include <VEX/VEX_Error.h> #include <VOP/VOP_CodeCompilerArgs.h> #include <VOP/VOP_CodeGenerator.h> #include <VOP/VOP_ExportedParmsManager.h> #include <VOP/VOP_LanguageContextTypeList.h> #include <tbb/atomic.h> #include <tbb/blocked_range.h> #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/task_group.h> #include <hboost/algorithm/string/classification.hpp> // is_any_of #include <hboost/algorithm/string/join.hpp> #include <hboost/algorithm/string/split.hpp> #include <algorithm> // std::sort #include <cmath> // trigonometric functions #include <memory> #include <set> #include <sstream> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Local Utility Methods namespace { inline hvdb::GridCPtr getMaskVDB(const GU_Detail * geoPt, const GA_PrimitiveGroup *group = nullptr) { if (geoPt) { hvdb::VdbPrimCIterator vdbIt(geoPt, group); if (vdbIt) { return (*vdbIt)->getConstGridPtr(); } } return hvdb::GridCPtr(); } inline UT_SharedPtr<openvdb::BBoxd> getMaskGeoBBox(const GU_Detail * geoPt) { if (geoPt) { UT_BoundingBox box; geoPt->getBBox(&box); UT_SharedPtr<openvdb::BBoxd> bbox(new openvdb::BBoxd()); bbox->min()[0] = box.xmin(); bbox->min()[1] = box.ymin(); bbox->min()[2] = box.zmin(); bbox->max()[0] = box.xmax(); bbox->max()[1] = box.ymax(); bbox->max()[2] = box.zmax(); return bbox; } return UT_SharedPtr<openvdb::BBoxd>(); } struct BoolSampler { static const char* name() { return "bin"; } static int radius() { return 2; } static bool mipmap() { return false; } static bool consistent() { return true; } template<class TreeT> static bool sample(const TreeT& inTree, const openvdb::Vec3R& inCoord, typename TreeT::ValueType& result) { openvdb::Coord ijk; ijk[0] = int(std::floor(inCoord[0])); ijk[1] = int(std::floor(inCoord[1])); ijk[2] = int(std::floor(inCoord[2])); return inTree.probeValue(ijk, result); } }; // struct BoolSampler /// z-coordinate comparison operator for std::sort. template<typename LeafNodeType> struct CompZCoord { bool operator()(const LeafNodeType* lhs, const LeafNodeType* rhs) const { return lhs->origin().z() < rhs->origin().z(); } }; // struct CompZCoord /// returns the world space voxel size for the given frustum depth. inline openvdb::Vec3d computeFrustumVoxelSize(int zDepth, const openvdb::math::Transform& xform) { using MapType = openvdb::math::NonlinearFrustumMap; MapType::ConstPtr map = xform.map<MapType>(); if (map) { const openvdb::BBoxd& box = map->getBBox(); openvdb::CoordBBox bbox( openvdb::Coord::floor(box.min()), openvdb::Coord::ceil(box.max())); double nearPlaneX = 0.5 * double(bbox.min().x() + bbox.max().x()); double nearPlaneY = 0.5 * double(bbox.min().y() + bbox.max().y()); zDepth = std::max(zDepth, bbox.min().z()); openvdb::Vec3d xyz(nearPlaneX, nearPlaneY, double(zDepth)); return xform.voxelSize(xyz); } return xform.voxelSize(); } inline double linearBlend(double a, double b, double w) { return a * w + b * (1.0 - w); } /// Inactivates the region defined by @a bbox in @a mask. template <typename MaskTreeType> inline void bboxClip(MaskTreeType& mask, const openvdb::BBoxd& bbox, bool invertMask, const openvdb::math::Transform& maskXform, const openvdb::math::Transform* srcXform = nullptr) { using ValueType = typename MaskTreeType::ValueType; const ValueType offVal = ValueType(0); const ValueType onVal = ValueType(1); if (!srcXform) { openvdb::Vec3d minIS, maxIS; openvdb::math::calculateBounds(maskXform, bbox.min(), bbox.max(), minIS, maxIS); openvdb::CoordBBox clipRegion; clipRegion.min()[0] = int(std::floor(minIS[0])); clipRegion.min()[1] = int(std::floor(minIS[1])); clipRegion.min()[2] = int(std::floor(minIS[2])); clipRegion.max()[0] = int(std::floor(maxIS[0])); clipRegion.max()[1] = int(std::floor(maxIS[1])); clipRegion.max()[2] = int(std::floor(maxIS[2])); MaskTreeType clipMask(offVal); clipMask.fill(clipRegion, onVal, true); if (invertMask) { mask.topologyDifference(clipMask); } else { mask.topologyIntersection(clipMask); } } else { openvdb::Vec3d minIS, maxIS; openvdb::math::calculateBounds(*srcXform, bbox.min(), bbox.max(), minIS, maxIS); openvdb::CoordBBox clipRegion; clipRegion.min()[0] = int(std::floor(minIS[0])); clipRegion.min()[1] = int(std::floor(minIS[1])); clipRegion.min()[2] = int(std::floor(minIS[2])); clipRegion.max()[0] = int(std::floor(maxIS[0])); clipRegion.max()[1] = int(std::floor(maxIS[1])); clipRegion.max()[2] = int(std::floor(maxIS[2])); using MaskGridType = openvdb::Grid<MaskTreeType>; MaskGridType srcClipMask(offVal); srcClipMask.setTransform(srcXform->copy()); srcClipMask.tree().fill(clipRegion, onVal, true); MaskGridType dstClipMask(offVal); dstClipMask.setTransform(maskXform.copy()); hvdb::Interrupter interrupter; openvdb::tools::resampleToMatch<BoolSampler>(srcClipMask, dstClipMask, interrupter); if (invertMask) { mask.topologyDifference(dstClipMask.tree()); } else { mask.topologyIntersection(dstClipMask.tree()); } } } template <typename MaskTreeType> struct GridTopologyClipOp { GridTopologyClipOp( MaskTreeType& mask, const openvdb::math::Transform& maskXform, bool invertMask) : mMask(&mask), mMaskXform(&maskXform), mInvertMask(invertMask) { } /// Inactivates the region defined by @a grid in mMask. template<typename GridType> void operator()(const GridType& grid) { using MaskGridType = openvdb::Grid<MaskTreeType>; using ValueType = typename MaskTreeType::ValueType; const ValueType offVal = ValueType(0); MaskGridType srcClipMask(offVal); srcClipMask.setTransform(grid.transform().copy()); srcClipMask.tree().topologyUnion(grid.tree()); MaskGridType dstClipMask(offVal); dstClipMask.setTransform(mMaskXform->copy()); hvdb::Interrupter interrupter; openvdb::tools::resampleToMatch<BoolSampler>(srcClipMask, dstClipMask, interrupter); if (mInvertMask) { mMask->topologyDifference(dstClipMask.tree()); } else { mMask->topologyIntersection(dstClipMask.tree()); } } private: MaskTreeType * const mMask; openvdb::math::Transform const * const mMaskXform; bool mInvertMask; }; // struct GridTopologyClipOp //////////////////////////////////////// ///@brief Utility structure that caches commonly used point attributes struct PointCache { using Ptr = UT_SharedPtr<PointCache>; using PosType = openvdb::Vec3s; using ScalarType = PosType::value_type; PointCache(const GU_Detail& detail, const float radiusScale, const GA_PointGroup* group = nullptr) : mIndexMap(&detail.getP()->getIndexMap()) , mSize(mIndexMap->indexSize()) , mOffsets() , mRadius() , mPos() { if (group) { mSize = group->entries(); mOffsets.reset(new GA_Offset[mSize]); GA_Offset start, end; GA_Offset* offset = mOffsets.get(); GA_Range range(*group); for (GA_Iterator it = range.begin(); it.blockAdvance(start, end); ) { for (GA_Offset off = start; off < end; ++off, ++offset) { *offset = off; } } mRadius.reset(new float[mSize]); mPos.reset(new openvdb::Vec3s[mSize]); tbb::parallel_for(tbb::blocked_range<size_t>(0, mSize), IFOCachePointGroupData(mOffsets, detail, mRadius, mPos, radiusScale)); getOffset = &PointCache::offsetFromGroupMap; } else if (mIndexMap->isTrivialMap()) { getOffset = &PointCache::offsetFromIndexCast; } else { getOffset = &PointCache::offsetFromGeoMap; } if (!group) { mRadius.reset(new float[mSize]); mPos.reset(new openvdb::Vec3s[mSize]); UTparallelFor(GA_SplittableRange(detail.getPointRange(group)), IFOCachePointData(detail, mRadius, mPos, radiusScale)); } } PointCache(const PointCache& rhs, const std::vector<unsigned>& indices) : mIndexMap(rhs.mIndexMap) , mSize(indices.size()) , mOffsets() , mRadius() , mPos() { mOffsets.reset(new GA_Offset[mSize]); mRadius.reset(new float[mSize]); mPos.reset(new openvdb::Vec3s[mSize]); tbb::parallel_for(tbb::blocked_range<size_t>(0, mSize), IFOCopyPointData(indices, mOffsets, mRadius, mPos, rhs)); getOffset = &PointCache::offsetFromGroupMap; } size_t size() const { return mSize; } const float& radius(size_t n) const { return mRadius[n]; } const openvdb::Vec3s& pos(size_t n) const { return mPos[n]; } void getPos(size_t n, openvdb::Vec3s& xyz) const { xyz = mPos[n]; } GA_Offset offsetFromIndex(size_t n) const { return (this->*getOffset)(n); } const float* radiusData() const { return mRadius.get(); } const openvdb::Vec3s* posData() const { return mPos.get(); } float evalMaxRadius() const { IFOEvalMaxRadius op(mRadius.get()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mSize), op); return op.result; } float evalMinRadius() const { IFOEvalMinRadius op(mRadius.get()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mSize), op); return op.result; } private: // Disallow copying PointCache(const PointCache&); PointCache& operator=(const PointCache&); GA_Offset (PointCache::* getOffset)(const size_t) const; GA_Offset offsetFromGeoMap(const size_t n) const { return mIndexMap->offsetFromIndex(GA_Index(n)); } GA_Offset offsetFromGroupMap(const size_t n) const { return mOffsets[n]; } GA_Offset offsetFromIndexCast(const size_t n) const { return GA_Offset(n); } ////////// // Internal TBB function objects struct IFOCopyPointData { IFOCopyPointData( const std::vector<unsigned>& indices, UT_UniquePtr<GA_Offset[]>& offsets, UT_UniquePtr<float[]>& radius, UT_UniquePtr<openvdb::Vec3s[]>& pos, const PointCache& PointCache) : mIndices(&indices[0]) , mOffsets(offsets.get()) , mRadiusData(radius.get()) , mPosData(pos.get()) , mPointCache(&PointCache) { } void operator()(const tbb::blocked_range<size_t>& range) const { const float* radiusData = mPointCache->radiusData(); const openvdb::Vec3s* posData = mPointCache->posData(); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const size_t idx = size_t(mIndices[n]); mOffsets[n] = mPointCache->offsetFromIndex(idx); mRadiusData[n] = radiusData[idx]; mPosData[n] = posData[idx]; } } unsigned const * const mIndices; GA_Offset * const mOffsets; float * const mRadiusData; openvdb::Vec3s * const mPosData; PointCache const * const mPointCache; }; // struct IFOCopyPointData struct IFOCachePointData { IFOCachePointData(const GU_Detail& detail, UT_UniquePtr<float[]>& radius, UT_UniquePtr<openvdb::Vec3s[]>& pos, float radiusScale = 1.0) : mDetail(&detail) , mRadiusData(radius.get()) , mPosData(pos.get()) , mRadiusScale(radiusScale) { } void operator()(const GA_SplittableRange& range) const { GA_Offset start, end; UT_Vector3 xyz; GA_ROHandleV3 posHandle(mDetail->getP()); GA_ROHandleF scaleHandle; GA_ROAttributeRef aRef = mDetail->findFloatTuple(GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE); bool hasScale = false; if (aRef.isValid()) { hasScale = true; scaleHandle.bind(aRef.getAttribute()); } const float scale = mRadiusScale; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { const GA_Index idx = mDetail->pointIndex(i); mRadiusData[idx] = hasScale ? scaleHandle.get(i) * scale : scale; xyz = posHandle.get(i); openvdb::Vec3s& p = mPosData[idx]; p[0] = xyz[0]; p[1] = xyz[1]; p[2] = xyz[2]; } } } } GU_Detail const * const mDetail; float * const mRadiusData; openvdb::Vec3s * const mPosData; float const mRadiusScale; }; // struct IFOCachePointData struct IFOCachePointGroupData { IFOCachePointGroupData(const UT_UniquePtr<GA_Offset[]>& offsets, const GU_Detail& detail, UT_UniquePtr<float[]>& radius, UT_UniquePtr<openvdb::Vec3s[]>& pos, float radiusScale = 1.0) : mOffsets(offsets.get()) , mDetail(&detail) , mRadiusData(radius.get()) , mPosData(pos.get()) , mRadiusScale(radiusScale) { } void operator()(const tbb::blocked_range<size_t>& range) const { GA_ROHandleV3 posHandle(mDetail->getP()); bool hasScale = false; GA_ROHandleF scaleHandle; GA_ROAttributeRef aRef = mDetail->findFloatTuple(GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE); if (aRef.isValid()) { hasScale = true; scaleHandle.bind(aRef.getAttribute()); } const float scale = mRadiusScale; UT_Vector3 xyz; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const GA_Offset offset = mOffsets[n]; mRadiusData[n] = hasScale ? scaleHandle.get(offset) * scale : scale; xyz = posHandle.get(offset); openvdb::Vec3s& p = mPosData[n]; p[0] = xyz[0]; p[1] = xyz[1]; p[2] = xyz[2]; } } GA_Offset const * const mOffsets; GU_Detail const * const mDetail; float * const mRadiusData; openvdb::Vec3s * const mPosData; float const mRadiusScale; }; // struct IFOCachePointGroupData struct IFOEvalMaxRadius { IFOEvalMaxRadius(float const * const radiusArray) : mRadiusArray(radiusArray), result(-std::numeric_limits<float>::max()) {} IFOEvalMaxRadius(IFOEvalMaxRadius& rhs, tbb::split) // thread safe copy constructor : mRadiusArray(rhs.mRadiusArray), result(-std::numeric_limits<float>::max()){} void operator()(const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { result = std::max(mRadiusArray[n], result); } } void join(const IFOEvalMaxRadius& rhs) { result = std::max(rhs.result, result); } float const * const mRadiusArray; float result; }; // struct IFOEvalMaxRadius struct IFOEvalMinRadius { IFOEvalMinRadius(float const * const radiusArray) : mRadiusArray(radiusArray), result(std::numeric_limits<float>::max()) {} IFOEvalMinRadius(IFOEvalMinRadius& rhs, tbb::split) // thread safe copy constructor : mRadiusArray(rhs.mRadiusArray), result(std::numeric_limits<float>::max()){} void operator()(const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { result = std::min(mRadiusArray[n], result); } } void join(const IFOEvalMinRadius& rhs) { result = std::min(rhs.result, result); } float const * const mRadiusArray; float result; }; // struct IFOEvalMinRadius ////////// GA_IndexMap const * const mIndexMap; size_t mSize; UT_UniquePtr<GA_Offset[]> mOffsets; UT_UniquePtr<float[]> mRadius; UT_UniquePtr<openvdb::Vec3s[]> mPos; }; // struct PointCache ///@brief Radius based partitioning of points into multiple @c openvdb::tools::PointIndexGrid /// acceleration structures. Improves spatial query time for points with varying radius. struct PointIndexGridCollection { using PointIndexGrid = openvdb::tools::PointIndexGrid; using PointIndexTree = PointIndexGrid::TreeType; using PointIndexLeafNode = PointIndexTree::LeafNodeType; using BoolTreeType = PointIndexTree::ValueConverter<bool>::Type; PointIndexGridCollection(const GU_Detail& detail, const float radiusScale, const float minVoxelSize, const GA_PointGroup* group = nullptr, hvdb::Interrupter* interrupter = nullptr) : mPointCacheArray() , mIdxGridArray(), mMinRadiusArray(), mMaxRadiusArray() { mPointCacheArray.push_back(PointCache::Ptr(new PointCache(detail, radiusScale, group))); std::vector<double> voxelSizeList; voxelSizeList.push_back(std::max(minVoxelSize, mPointCacheArray.back()->evalMinRadius())); for (size_t n = 0; n < 50; ++n) { if (interrupter && interrupter->wasInterrupted()) break; PointCache& pointCache = *mPointCacheArray.back(); const float maxRadius = pointCache.evalMaxRadius(); const float limit = float(voxelSizeList.back() * (n < 40 ? 2.0 : 8.0)); if (!(maxRadius > limit)) { break; } std::vector<unsigned> lhsIdx, rhsIdx; float minRadius = maxRadius; const float* radiusData = pointCache.radiusData(); for (unsigned i = 0, I = unsigned(pointCache.size()); i < I; ++i) { if (radiusData[i] > limit) { rhsIdx.push_back(i); minRadius = std::min(minRadius, radiusData[i]); } else lhsIdx.push_back(i); } voxelSizeList.push_back(minRadius); PointCache::Ptr lhsPointCache(new PointCache(pointCache, lhsIdx)); PointCache::Ptr rhsPointCache(new PointCache(pointCache, rhsIdx)); mPointCacheArray.back() = lhsPointCache; mPointCacheArray.push_back(rhsPointCache); } const size_t collectionSize = mPointCacheArray.size(); mIdxGridArray.resize(collectionSize); mMinRadiusArray.reset(new float[collectionSize]); mMaxRadiusArray.reset(new float[collectionSize]); tbb::task_group tasks; for (size_t n = 0; n < collectionSize; ++n) { if (interrupter && interrupter->wasInterrupted()) break; tasks.run(IFOCreateAuxiliaryData(mIdxGridArray[n], *mPointCacheArray[n], voxelSizeList[n], mMinRadiusArray[n], mMaxRadiusArray[n])); } tasks.wait(); } ////////// size_t size() const { return mPointCacheArray.size(); } float minRadius(size_t n) const { return mMinRadiusArray[n]; } float maxRadius(size_t n) const { return mMaxRadiusArray[n]; } float maxRadius() const { float maxradius = mMaxRadiusArray[0]; for (size_t n = 0, N = mPointCacheArray.size(); n < N; ++n) { maxradius = std::max(maxradius, mMaxRadiusArray[n]); } return maxradius; } const PointCache& pointCache(size_t n) const { return *mPointCacheArray[n]; } const PointIndexGrid& idxGrid(size_t n) const { return *mIdxGridArray[n]; } private: // Disallow copying PointIndexGridCollection(const PointIndexGridCollection&); PointIndexGridCollection& operator=(const PointIndexGridCollection&); ////////// // Internal TBB function objects struct IFOCreateAuxiliaryData { IFOCreateAuxiliaryData(PointIndexGrid::Ptr& idxGridPt, PointCache& points, double voxelSize, float& minRadius, float& maxRadius) : mIdxGrid(&idxGridPt), mPointCache(&points), mVoxelSize(voxelSize) , mMinRadius(&minRadius), mMaxRadius(&maxRadius) {} void operator()() const { const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(mVoxelSize); *mIdxGrid = openvdb::tools::createPointIndexGrid<PointIndexGrid>(*mPointCache, *transform); *mMinRadius = mPointCache->evalMinRadius(); *mMaxRadius = mPointCache->evalMaxRadius(); } PointIndexGrid::Ptr * const mIdxGrid; PointCache const * const mPointCache; double const mVoxelSize; float * const mMinRadius; float * const mMaxRadius; }; // struct IFOCreateAuxiliaryData ////////// std::vector<PointCache::Ptr> mPointCacheArray; std::vector<PointIndexGrid::Ptr> mIdxGridArray; UT_UniquePtr<float[]> mMinRadiusArray, mMaxRadiusArray; }; // struct PointIndexGridCollection ///@brief TBB function object to construct a @c BoolTree region of interest /// mask for the gather based rasterization step. struct ConstructCandidateVoxelMask { using PosType = PointCache::PosType; using ScalarType = PosType::value_type; using PointIndexTree = openvdb::tools::PointIndexGrid::TreeType; using PointIndexLeafNode = PointIndexTree::LeafNodeType; using PointIndexType = PointIndexLeafNode::ValueType; using BoolTreeType = PointIndexTree::ValueConverter<bool>::Type; using BoolLeafNode = BoolTreeType::LeafNodeType; ///// ConstructCandidateVoxelMask(BoolTreeType& maskTree, const PointCache& points, const std::vector<const PointIndexLeafNode*>& pointIndexLeafNodes, const openvdb::math::Transform& xform, const openvdb::CoordBBox * clipBox = nullptr, hvdb::Interrupter* interrupter = nullptr) : mMaskTree(false) , mMaskTreePt(&maskTree) , mMaskAccessor(*mMaskTreePt) , mPoints(&points) , mPointIndexNodes(&pointIndexLeafNodes.front()) , mXform(xform) , mClipBox(clipBox) , mInterrupter(interrupter) { } /// Thread safe copy constructor ConstructCandidateVoxelMask(ConstructCandidateVoxelMask& rhs, tbb::split) : mMaskTree(false) , mMaskTreePt(&mMaskTree) , mMaskAccessor(*mMaskTreePt) , mPoints(rhs.mPoints) , mPointIndexNodes(rhs.mPointIndexNodes) , mXform(rhs.mXform) , mClipBox(rhs.mClipBox) , mInterrupter(rhs.mInterrupter) { } void operator()(const tbb::blocked_range<size_t>& range) { openvdb::CoordBBox box; PosType pos, bboxMin, bboxMax, pMin, pMax; ScalarType radius(0.0); const PointIndexType *pointIdxPt = nullptr, *endIdxPt = nullptr; std::vector<PointIndexType> largeParticleIndices; double leafnodeSize = mXform.voxelSize()[0] * double(PointIndexLeafNode::DIM); const bool isTransformLinear = mXform.isLinear(); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { if (this->wasInterrupted()) { tbb::task::self().cancel_group_execution(); break; } const PointIndexLeafNode& node = *mPointIndexNodes[n]; for (PointIndexLeafNode::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { node.getIndices(it.pos(), pointIdxPt, endIdxPt); bboxMin[0] = std::numeric_limits<ScalarType>::max(); bboxMin[1] = std::numeric_limits<ScalarType>::max(); bboxMin[2] = std::numeric_limits<ScalarType>::max(); bboxMax[0] = -bboxMin[0]; bboxMax[1] = -bboxMin[1]; bboxMax[2] = -bboxMin[2]; bool regionIsValid = false; while (pointIdxPt < endIdxPt) { radius = mPoints->radius(*pointIdxPt); if (isTransformLinear && radius > leafnodeSize) { largeParticleIndices.push_back(*pointIdxPt); } else { pos = mPoints->pos(*pointIdxPt); pMin[0] = pos[0] - radius; pMin[1] = pos[1] - radius; pMin[2] = pos[2] - radius; pMax[0] = pos[0] + radius; pMax[1] = pos[1] + radius; pMax[2] = pos[2] + radius; bboxMin[0] = std::min(bboxMin[0], pMin[0]); bboxMin[1] = std::min(bboxMin[1], pMin[1]); bboxMin[2] = std::min(bboxMin[2], pMin[2]); bboxMax[0] = std::max(bboxMax[0], pMax[0]); bboxMax[1] = std::max(bboxMax[1], pMax[1]); bboxMax[2] = std::max(bboxMax[2], pMax[2]); regionIsValid = true; } ++pointIdxPt; } if (regionIsValid) { if (isTransformLinear) { box.min() = mXform.worldToIndexCellCentered(bboxMin); box.max() = mXform.worldToIndexCellCentered(bboxMax); } else { openvdb::math::Vec3d ijkMin, ijkMax; openvdb::math::calculateBounds(mXform, bboxMin, bboxMax, ijkMin, ijkMax); box.min() = openvdb::Coord::round(ijkMin); box.max() = openvdb::Coord::round(ijkMax); } if (mClipBox) { if (mClipBox->hasOverlap(box)) { // Intersect bbox with the region of interest. box.min() = openvdb::Coord::maxComponent(box.min(), mClipBox->min()); box.max() = openvdb::Coord::minComponent(box.max(), mClipBox->max()); activateRegion(box); } } else { activateRegion(box); } } } } for (size_t n = 0, N = largeParticleIndices.size(); n != N; ++n) { radius = mPoints->radius(largeParticleIndices[n]); pos = mPoints->pos(largeParticleIndices[n]); bboxMin[0] = std::numeric_limits<ScalarType>::max(); bboxMin[1] = std::numeric_limits<ScalarType>::max(); bboxMin[2] = std::numeric_limits<ScalarType>::max(); bboxMax[0] = -bboxMin[0]; bboxMax[1] = -bboxMin[1]; bboxMax[2] = -bboxMin[2]; pMin[0] = pos[0] - radius; pMin[1] = pos[1] - radius; pMin[2] = pos[2] - radius; pMax[0] = pos[0] + radius; pMax[1] = pos[1] + radius; pMax[2] = pos[2] + radius; bboxMin[0] = std::min(bboxMin[0], pMin[0]); bboxMin[1] = std::min(bboxMin[1], pMin[1]); bboxMin[2] = std::min(bboxMin[2], pMin[2]); bboxMax[0] = std::max(bboxMax[0], pMax[0]); bboxMax[1] = std::max(bboxMax[1], pMax[1]); bboxMax[2] = std::max(bboxMax[2], pMax[2]); box.min() = mXform.worldToIndexCellCentered(bboxMin); box.max() = mXform.worldToIndexCellCentered(bboxMax); if (mClipBox) { if (mClipBox->hasOverlap(box)) { // Intersect bbox with the region of interest. box.min() = openvdb::Coord::maxComponent(box.min(), mClipBox->min()); box.max() = openvdb::Coord::minComponent(box.max(), mClipBox->max()); activateRegion(box); } } else { activateRadialRegion(box); } } } void join(ConstructCandidateVoxelMask& rhs) { std::vector<BoolLeafNode*> rhsLeafNodes, overlappingLeafNodes; rhsLeafNodes.reserve(rhs.mMaskTreePt->leafCount()); rhs.mMaskTreePt->getNodes(rhsLeafNodes); // Steal unique leafnodes openvdb::tree::ValueAccessor<BoolTreeType> lhsAcc(*mMaskTreePt); openvdb::tree::ValueAccessor<BoolTreeType> rhsAcc(*rhs.mMaskTreePt); using BoolRootNodeType = BoolTreeType::RootNodeType; using BoolNodeChainType = BoolRootNodeType::NodeChainType; using BoolInternalNodeType = BoolNodeChainType::Get<1>; for (size_t n = 0, N = rhsLeafNodes.size(); n < N; ++n) { const openvdb::Coord& ijk = rhsLeafNodes[n]->origin(); if (!lhsAcc.probeLeaf(ijk)) { // add node to lhs tree lhsAcc.addLeaf(rhsLeafNodes[n]); // remove leaf node from rhs tree BoolInternalNodeType* internalNode = rhsAcc.probeNode<BoolInternalNodeType>(ijk); if (internalNode) { internalNode->stealNode<BoolLeafNode>(ijk, false, false); } else { rhs.mMaskTreePt->stealNode<BoolLeafNode>(ijk, false, false); } } else { overlappingLeafNodes.push_back(rhsLeafNodes[n]); } } // Combine overlapping leaf nodes tbb::parallel_for(tbb::blocked_range<size_t>(0, overlappingLeafNodes.size()), IFOTopologyUnion(*mMaskTreePt, &overlappingLeafNodes[0])); } private: bool wasInterrupted() const { return mInterrupter && mInterrupter->wasInterrupted(); } // just a rough estimate, but more accurate than activateRegion(...) for large spheres. void activateRadialRegion(const openvdb::CoordBBox& bbox) { using LeafNodeType = BoolTreeType::LeafNodeType; const openvdb::Vec3d center = bbox.getCenter(); const double radius = double(bbox.dim()[0]) * 0.5; // inscribed box const double iRadius = radius * double(1.0 / std::sqrt(3.0)); openvdb::CoordBBox ibox( openvdb::Coord::round(openvdb::Vec3d( center[0] - iRadius, center[1] - iRadius, center[2] - iRadius)), openvdb::Coord::round(openvdb::Vec3d( center[0] + iRadius, center[1] + iRadius, center[2] + iRadius))); ibox.min() &= ~(LeafNodeType::DIM - 1); ibox.max() &= ~(LeafNodeType::DIM - 1); openvdb::Coord ijk(0); for (ijk[0] = ibox.min()[0]; ijk[0] <= ibox.max()[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = ibox.min()[1]; ijk[1] <= ibox.max()[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = ibox.min()[2]; ijk[2] <= ibox.max()[2]; ijk[2] += LeafNodeType::DIM) { mMaskAccessor.touchLeaf(ijk)->setValuesOn(); } } } const openvdb::Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1); const openvdb::Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1); openvdb::Vec3d xyz; const double leafNodeRadius = double(LeafNodeType::DIM) * std::sqrt(3.0) * 0.5; double distSqr = radius + leafNodeRadius; distSqr *= distSqr; for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) { if (!ibox.isInside(ijk)) { xyz[0] = double(ijk[0]); xyz[1] = double(ijk[1]); xyz[2] = double(ijk[2]); xyz += double(LeafNodeType::DIM - 1) * 0.5; xyz -= center; if (!(xyz.lengthSqr() > distSqr)) { activateLeafNodeRegion(bbox, *mMaskAccessor.touchLeaf(ijk)); } } } } } } void activateRegion(const openvdb::CoordBBox& bbox) { using LeafNodeType = BoolTreeType::LeafNodeType; const openvdb::Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1); const openvdb::Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1); openvdb::Coord ijk(0); for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) { activateLeafNodeRegion(bbox, *mMaskAccessor.touchLeaf(ijk)); } } } } template <typename LeafNodeType> void activateLeafNodeRegion(const openvdb::CoordBBox& bbox, LeafNodeType& node) const { const openvdb::Coord& origin = node.origin(); openvdb::Coord ijk = origin; ijk.offset(LeafNodeType::DIM - 1); if (bbox.isInside(origin) && bbox.isInside(ijk)) { node.setValuesOn(); } else if (!node.isValueMaskOn()) { const openvdb::Coord ijkMin = openvdb::Coord::maxComponent(bbox.min(), origin); const openvdb::Coord ijkMax = openvdb::Coord::minComponent(bbox.max(), ijk); openvdb::Index xPos(0), yPos(0); for (ijk[0] = ijkMin[0]; ijk[0] <= ijkMax[0]; ++ijk[0]) { xPos = (ijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM); for (ijk[1] = ijkMin[1]; ijk[1] <= ijkMax[1]; ++ijk[1]) { yPos = xPos + ((ijk[1] & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM); for (ijk[2] = ijkMin[2]; ijk[2] <= ijkMax[2]; ++ijk[2]) { node.setValueOn(yPos + (ijk[2] & (LeafNodeType::DIM - 1u))); } } } } } ////////// // Internal TBB function objects struct IFOTopologyUnion { IFOTopologyUnion(BoolTreeType& tree, BoolLeafNode ** nodes) : mTree(&tree), mNodes(nodes) { } void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::tree::ValueAccessor<BoolTreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { acc.probeLeaf(mNodes[n]->origin())->topologyUnion(*mNodes[n]); } } BoolTreeType * const mTree; BoolLeafNode * const * const mNodes; }; // struct IFOTopologyUnion ////////// BoolTreeType mMaskTree; BoolTreeType * const mMaskTreePt; openvdb::tree::ValueAccessor<BoolTreeType> mMaskAccessor; PointCache const * const mPoints; PointIndexLeafNode const * const * const mPointIndexNodes; openvdb::math::Transform const mXform; openvdb::CoordBBox const * const mClipBox; hvdb::Interrupter * const mInterrupter; }; // struct ConstructCandidateVoxelMask /// Inactivates candidate leafnodes that have no particle overlap. /// (The ConstructCandidateVoxelMask scheme is overestimating the region /// of intrest when frustum transforms are used, this culls the regions.) template <typename MaskLeafNodeType> struct CullFrustumLeafNodes { CullFrustumLeafNodes( const PointIndexGridCollection& idxGridCollection, std::vector<MaskLeafNodeType*>& nodes, const openvdb::math::Transform& xform) : mIdxGridCollection(&idxGridCollection) , mNodes(nodes.empty() ? nullptr : &nodes.front()) , mXform(xform) { } void operator()(const tbb::blocked_range<size_t>& range) const { using PointIndexTree = openvdb::tools::PointIndexGrid::TreeType; using IndexTreeAccessor = openvdb::tree::ValueAccessor<const PointIndexTree>; using IndexTreeAccessorPtr = UT_SharedPtr<IndexTreeAccessor>; UT_UniquePtr<IndexTreeAccessorPtr[]> accessorList( new IndexTreeAccessorPtr[mIdxGridCollection->size()]); for (size_t i = 0; i < mIdxGridCollection->size(); ++i) { const PointIndexTree& tree = mIdxGridCollection->idxGrid(i).tree(); accessorList[i].reset(new IndexTreeAccessor(tree)); } openvdb::tools::PointIndexIterator<PointIndexTree> pointIndexIter; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { MaskLeafNodeType& maskNode = *mNodes[n]; if (maskNode.isEmpty()) continue; const openvdb::CoordBBox nodeBounds = getInclusiveNodeBounds(maskNode); const openvdb::Vec3d tmpMin = mXform.indexToWorld(nodeBounds.min()); const openvdb::Vec3d tmpMax = mXform.indexToWorld(nodeBounds.max()); const openvdb::Vec3d bMin = openvdb::math::minComponent(tmpMin, tmpMax); const openvdb::Vec3d bMax = openvdb::math::maxComponent(tmpMin, tmpMax); bool hasOverlap = false; for (size_t i = 0; i < mIdxGridCollection->size(); ++i) { const double sarchRadius = double(mIdxGridCollection->maxRadius(i)); const openvdb::math::Transform& idxGridTransform = mIdxGridCollection->idxGrid(i).transform(); const openvdb::CoordBBox searchRegion( idxGridTransform.worldToIndexCellCentered(bMin - sarchRadius), idxGridTransform.worldToIndexCellCentered(bMax + sarchRadius)); pointIndexIter.searchAndUpdate(searchRegion, *accessorList[i]); if (pointIndexIter) { hasOverlap = true; break; } } if (!hasOverlap) { maskNode.setValuesOff(); } } } private: template <typename NodeType> static inline openvdb::CoordBBox getInclusiveNodeBounds(const NodeType& node) { const openvdb::Coord& origin = node.origin(); return openvdb::CoordBBox(origin, origin.offsetBy(NodeType::DIM - 1)); } PointIndexGridCollection const * const mIdxGridCollection; MaskLeafNodeType * * const mNodes; openvdb::math::Transform mXform; }; // struct CullFrustumLeafNodes //////////////////////////////////////// ///@brief Constructs a region of interest mask for the gather based rasterization. inline void maskRegionOfInterest(PointIndexGridCollection::BoolTreeType& mask, const PointIndexGridCollection& idxGridCollection, const openvdb::math::Transform& volumeTransform, bool clipToFrustum = false, hvdb::Interrupter* interrupter = nullptr) { using BoolLeafNodeType = PointIndexGridCollection::BoolTreeType::LeafNodeType; UT_SharedPtr<openvdb::CoordBBox> frustumClipBox; if (clipToFrustum && !volumeTransform.isLinear()) { using MapType = openvdb::math::NonlinearFrustumMap; MapType::ConstPtr map = volumeTransform.map<MapType>(); if (map) { const openvdb::BBoxd& bbox = map->getBBox(); frustumClipBox.reset(new openvdb::CoordBBox( openvdb::Coord::floor(bbox.min()), openvdb::Coord::ceil(bbox.max()))); } } for (size_t n = 0; n < idxGridCollection.size(); ++n) { if (interrupter && interrupter->wasInterrupted()) break; const PointCache& pointCache = idxGridCollection.pointCache(n); const PointIndexGridCollection::PointIndexGrid& idxGrid = idxGridCollection.idxGrid(n); const double voxelSize = idxGrid.transform().voxelSize()[0]; PointIndexGridCollection::PointIndexGrid::Ptr regionPointGridPtr; // optionally used const PointIndexGridCollection::PointIndexTree* regionPointIndexTree = &idxGrid.tree(); const double maxPointRadius = idxGridCollection.maxRadius(n); if (maxPointRadius * 1.5 > voxelSize) { const openvdb::math::Transform::Ptr xform = openvdb::math::Transform::createLinearTransform(maxPointRadius); regionPointGridPtr = openvdb::tools::createPointIndexGrid<PointIndexGridCollection::PointIndexGrid>( pointCache, *xform); regionPointIndexTree = &regionPointGridPtr->tree(); } std::vector<const PointIndexGridCollection::PointIndexLeafNode*> pointIndexLeafNodes; pointIndexLeafNodes.reserve(regionPointIndexTree->leafCount()); regionPointIndexTree->getNodes(pointIndexLeafNodes); ConstructCandidateVoxelMask op(mask, pointCache, pointIndexLeafNodes, volumeTransform, frustumClipBox.get(), interrupter); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, pointIndexLeafNodes.size()), op); } if (interrupter && interrupter->wasInterrupted()) return; if (!volumeTransform.isLinear()) { std::vector<BoolLeafNodeType*> maskNodes; mask.getNodes(maskNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, maskNodes.size()), CullFrustumLeafNodes<BoolLeafNodeType>(idxGridCollection, maskNodes, volumeTransform)); openvdb::tools::pruneInactive(mask); } } template<typename NodeType> struct FillActiveValues { using ValueType = typename NodeType::ValueType; FillActiveValues(std::vector<NodeType*>& nodes, ValueType val) : mNodes(nodes.empty() ? nullptr : &nodes.front()), mValue(val) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { NodeType& node = *mNodes[n]; for (typename NodeType::ValueOnIter it = node.beginValueOn(); it; ++it) { it.setValue(mValue); } } } NodeType * const * const mNodes; ValueType const mValue; }; // struct FillActiveValues /// Fills the @a bbox region with leafnode level tiles. /// (Partially overlapped leafnode tiles are included) template<typename TreeAccessorType> inline void fillWithLeafLevelTiles(TreeAccessorType& treeAcc, const openvdb::CoordBBox& bbox) { using LeafNodeType = typename TreeAccessorType::TreeType::LeafNodeType; openvdb::Coord imin = bbox.min() & ~(LeafNodeType::DIM - 1); openvdb::Coord imax = bbox.max() & ~(LeafNodeType::DIM - 1); openvdb::Coord ijk(0); for (ijk[0] = imin[0]; ijk[0] <= imax[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = imin[1]; ijk[1] <= imax[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = imin[2]; ijk[2] <= imax[2]; ijk[2] += LeafNodeType::DIM) { treeAcc.addTile(LeafNodeType::LEVEL+1, ijk, true, true); } } } } /// Transforms the input @a bbox from an axis-aligned box in @a srcTransform /// world space to an axis-aligned box i @a targetTransform world space. openvdb::CoordBBox remapBBox(openvdb::CoordBBox& bbox, const openvdb::math::Transform& srcTransform, const openvdb::math::Transform& targetTransform) { openvdb::CoordBBox output; // corner 1 openvdb::Coord ijk = bbox.min(); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 2 ijk = openvdb::Coord(bbox.min().x(), bbox.min().y(), bbox.max().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 3 ijk = openvdb::Coord(bbox.max().x(), bbox.min().y(), bbox.max().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 4 ijk = openvdb::Coord(bbox.max().x(), bbox.min().y(), bbox.min().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 5 ijk = openvdb::Coord(bbox.min().x(), bbox.max().y(), bbox.min().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 6 ijk = openvdb::Coord(bbox.min().x(), bbox.max().y(), bbox.max().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 7 ijk = bbox.max(); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); // corner 8 ijk = openvdb::Coord(bbox.max().x(), bbox.max().y(), bbox.min().z()); output.expand(targetTransform.worldToIndexNodeCentered(srcTransform.indexToWorld(ijk))); return output; } //////////////////////////////////////// // Type traits template<typename T> struct ValueTypeTraits { static const bool IsVec = false; static const int TupleSize = 1; using ScalarType = T; using HoudiniType = T; static void convert(T& lhs, const HoudiniType rhs) { lhs = rhs; } }; template<typename T> struct ValueTypeTraits<openvdb::math::Vec3<T> > { static const bool IsVec = true; static const int TupleSize = 3; using ScalarType = T; using HoudiniType = UT_Vector3T<T>; static void convert(openvdb::math::Vec3<T>& lhs, const HoudiniType& rhs) { lhs[0] = rhs[0]; lhs[1] = rhs[1]; lhs[2] = rhs[2]; } }; //////////////////////////////////////// ///@brief Composites point attributes into voxel data using a weighted average approach. template<typename _ValueType> struct WeightedAverageOp { enum { LOG2DIM = openvdb::tools::PointIndexTree::LeafNodeType::LOG2DIM }; using Ptr = UT_SharedPtr<WeightedAverageOp>; using ConstPtr = UT_SharedPtr<const WeightedAverageOp>; using ValueType = _ValueType; using LeafNodeType = openvdb::tree::LeafNode<ValueType, LOG2DIM>; using ScalarType = typename ValueTypeTraits<ValueType>::ScalarType; using HoudiniType = typename ValueTypeTraits<ValueType>::HoudiniType; ///// WeightedAverageOp(const GA_Attribute& attrib, UT_UniquePtr<LeafNodeType*[]>& nodes) : mHandle(&attrib), mNodes(nodes.get()), mNode(nullptr), mNodeVoxelData(nullptr) , mNodeOffset(0), mValue(ScalarType(0.0)), mVaryingDataBuffer(nullptr), mVaryingData(false) { } ~WeightedAverageOp() { if (mNode) delete mNode; } const char* getName() const { return mHandle.getAttribute()->getName(); } void beginNodeProcessing(const openvdb::Coord& origin, size_t nodeOffset) { mVaryingData = false; mNodeOffset = nodeOffset; if (mNode) mNode->setOrigin(origin); else mNode = new LeafNodeType(origin, openvdb::zeroVal<ValueType>()); mNodeVoxelData = const_cast<ValueType*>(&mNode->getValue(0));//mNode->buffer().data(); } void updateValue(const GA_Offset pointOffset) { const HoudiniType val = mHandle.get(pointOffset); ValueTypeTraits<ValueType>::convert(mValue, val); } void updateVoxelData(const std::vector<std::pair<float, openvdb::Index> >& densitySamples) { using DensitySample = std::pair<float, openvdb::Index>; for (size_t n = 0, N = densitySamples.size(); n < N; ++n) { const DensitySample& sample = densitySamples[n]; ValueType& value = mNodeVoxelData[sample.second]; if (mVaryingData) { ValueTypeTraits<ValueType>::convert(mValue, mVaryingDataBuffer[n]); } value += mValue * sample.first; } } template<typename LeafNodeT> void endNodeProcessing(const LeafNodeT& maskNode, float *voxelWeightArray) { mNode->topologyUnion(maskNode); ValueType* values = const_cast<ValueType*>(&mNode->getValue(0)); for (size_t n = 0; n < LeafNodeType::SIZE; ++n) { values[n] *= voxelWeightArray[n]; } mNodes[mNodeOffset] = mNode; mNode = nullptr; } HoudiniType* varyingData() { mVaryingData = true; if (!mVaryingDataBuffer) { mVaryingDataBuffer.reset(new HoudiniType[LeafNodeType::SIZE]); } return mVaryingDataBuffer.get(); } private: GA_ROHandleT<HoudiniType> mHandle; LeafNodeType ** const mNodes; LeafNodeType * mNode; ValueType * mNodeVoxelData; size_t mNodeOffset; ValueType mValue; UT_UniquePtr<HoudiniType[]> mVaryingDataBuffer; bool mVaryingData; }; // struct WeightedAverageOp ///@brief Composites point density into voxel data. template<typename _ValueType> struct DensityOp { enum { LOG2DIM = openvdb::tools::PointIndexTree::LeafNodeType::LOG2DIM }; using Ptr = UT_SharedPtr<DensityOp>; using ConstPtr = UT_SharedPtr<const DensityOp>; using ValueType = _ValueType; using LeafNodeType = openvdb::tree::LeafNode<ValueType, LOG2DIM>; ///// DensityOp(const GA_Attribute& attrib, UT_UniquePtr<LeafNodeType*[]>& nodes) : mPosHandle(&attrib), mNodes(nodes.get()), mNode(nullptr), mNodeOffset(0) { } ~DensityOp() { delete mNode; } void beginNodeProcessing(const openvdb::Coord& origin, size_t nodeOffset) { mNodeOffset = nodeOffset; if (mNode) mNode->setOrigin(origin); else mNode = new LeafNodeType(origin, openvdb::zeroVal<ValueType>()); } ValueType* data() { return const_cast<ValueType*>(&mNode->getValue(0)); /*mNode->buffer().data();*/ } template<typename LeafNodeT> void endNodeProcessing(const LeafNodeT& maskNode) { mNode->topologyUnion(maskNode); mNodes[mNodeOffset] = mNode; mNode = nullptr; } private: GA_ROHandleV3 mPosHandle; LeafNodeType * * const mNodes; LeafNodeType * mNode; size_t mNodeOffset; }; // struct DensityOp template<typename ValueType> inline bool isValidAttribute(const std::string& name, const GU_Detail& detail) { GA_ROAttributeRef ref = detail.findFloatTuple(GA_ATTRIB_POINT, name.c_str(), ValueTypeTraits<ValueType>::TupleSize); return ref.isValid(); } ///@brief Wrapper object for Houdini point attributes. template<typename _ValueType, typename _OperatorType = WeightedAverageOp<_ValueType> > struct Attribute { enum { LOG2DIM = openvdb::tools::PointIndexTree::LeafNodeType::LOG2DIM }; using Ptr = UT_SharedPtr<Attribute>; using ConstPtr = UT_SharedPtr<const Attribute>; using OperatorType = _OperatorType; using ValueType = _ValueType; using LeafNodeType = openvdb::tree::LeafNode<ValueType, LOG2DIM>; using BoolLeafNodeType = openvdb::tree::LeafNode<bool, LOG2DIM>; using Transform = openvdb::math::Transform; using PointIndexTreeType = openvdb::tools::PointIndexTree; using TreeType = typename PointIndexTreeType::template ValueConverter<ValueType>::Type; using GridType = typename openvdb::Grid<TreeType>; ///// static Ptr create(const std::string& name, const GU_Detail& detail, const Transform& transform) { GA_ROAttributeRef ref; std::string gridName; if (name == std::string(GEO_STD_ATTRIB_POSITION)) { ref = detail.getP(); gridName = "density"; } else { ref = detail.findFloatTuple( GA_ATTRIB_POINT, name.c_str(), ValueTypeTraits<ValueType>::TupleSize); gridName = name; } if (ref.isValid()) { return Ptr(new Attribute<ValueType, OperatorType>( *ref.getAttribute(), gridName, transform)); } return Ptr(); } typename OperatorType::Ptr getAccessor() { return typename OperatorType::Ptr(new OperatorType(*mAttrib, mNodes)); } void initNodeBuffer(size_t nodeCount) { clearNodes(); if (nodeCount > mNodeCount) { mNodes.reset(new LeafNodeType*[nodeCount]); for (size_t n = 0; n < nodeCount; ++n) mNodes[n] = nullptr; } mNodeCount = nodeCount; } void cacheNodes() { mOutputNodes.reserve(std::max(mOutputNodes.size() + 1, mNodeCount)); for (size_t n = 0; n < mNodeCount; ++n) { if (mNodes[n]) { mOutputNodes.push_back(mNodes[n]); mNodes[n] = nullptr; } } } void cacheFrustumNodes(std::vector<const BoolLeafNodeType*>& nodes, double voxelSize) { mOutputNodes.reserve(std::max(mOutputNodes.size() + 1, mNodeCount)); typename GridType::Ptr grid = GridType::create(); IFOPopulateTree op(grid->tree(), mNodes.get()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mNodeCount), op); grid->setTransform( openvdb::math::Transform::createLinearTransform(voxelSize)); typename GridType::Ptr frustumGrid = GridType::create(); frustumGrid->setTransform(mTransform.copy()); hvdb::Interrupter interrupter; openvdb::tools::resampleToMatch<openvdb::tools::BoxSampler>( *grid, *frustumGrid, interrupter); TreeType& frustumTree = frustumGrid->tree(); for (size_t n = 0, N = nodes.size(); n < N; ++n) { const BoolLeafNodeType& maskNode = *nodes[n]; LeafNodeType * node = frustumTree.template stealNode<LeafNodeType>( maskNode.origin(), frustumTree.background(), false); if (node) { if (node->getValueMask() != maskNode.getValueMask()) { typename BoolLeafNodeType::ValueOffCIter it = maskNode.cbeginValueOff(); for (; it; ++it) { node->setValueOff(it.pos(), frustumTree.background()); } } if (node->isEmpty()) { delete node; } else { mOutputNodes.push_back(node); } } } // end node loop } void exportGrid(std::vector<openvdb::GridBase::Ptr>& outputGrids) { typename GridType::Ptr grid= GridType::create(); IFOPopulateTree op(grid->tree(), mOutputNodes.empty() ? nullptr : &mOutputNodes.front()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mOutputNodes.size()), op); if (!grid->tree().empty()) { grid->setTransform(mTransform.copy()); grid->setName(mName); if (mName == std::string("density")) { grid->setGridClass(openvdb::GRID_FOG_VOLUME); } if (ValueTypeTraits<ValueType>::IsVec) { if (mName == std::string(GEO_STD_ATTRIB_VELOCITY)) { grid->setVectorType(openvdb::VEC_CONTRAVARIANT_RELATIVE); } else if (mName == std::string(GEO_STD_ATTRIB_NORMAL)) { grid->setVectorType(openvdb::VEC_COVARIANT_NORMALIZE); } else if (mName == std::string(GEO_STD_ATTRIB_POSITION)) { grid->setVectorType(openvdb::VEC_CONTRAVARIANT_ABSOLUTE); } } outputGrids.push_back(grid); } mOutputNodes.clear(); } ~Attribute() { clearNodes(); for (size_t n = 0, N = mOutputNodes.size(); n < N; ++n) { if (mOutputNodes[n] != nullptr) delete mOutputNodes[n]; } } private: void clearNodes() { for (size_t n = 0; n < mNodeCount; ++n) { if (mNodes[n] != nullptr) delete mNodes[n]; } } Attribute(const GA_Attribute& attrib, const std::string& name, const Transform& transform) : mAttrib(&attrib), mName(name), mNodeCount(0), mNodes(nullptr) , mOutputNodes(), mTransform(transform) { } ////////// // Internal TBB function objects struct IFOPopulateTree { IFOPopulateTree(TreeType& tree, LeafNodeType ** nodes) : mTree(), mAccessor(tree) , mNodes(nodes) {} IFOPopulateTree(IFOPopulateTree& rhs, tbb::split) // Thread safe copy constructor : mTree(rhs.mAccessor.tree().background()), mAccessor(mTree), mNodes(rhs.mNodes) {} void operator()(const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { if (mNodes[n]) { mAccessor.addLeaf(mNodes[n]); mNodes[n] = nullptr; } } } void join(const IFOPopulateTree& rhs) { mAccessor.tree().merge(rhs.mAccessor.tree()); } TreeType mTree; openvdb::tree::ValueAccessor<TreeType> mAccessor; LeafNodeType * * const mNodes; }; // struct IFOPopulateTree ////////// GA_Attribute const * const mAttrib; const std::string mName; size_t mNodeCount; UT_UniquePtr<LeafNodeType*[]> mNodes; std::vector<LeafNodeType*> mOutputNodes; openvdb::math::Transform mTransform; }; // struct Attribute // Attribute utility methods template<typename AttributeType> inline void initializeAttributeBuffers(UT_SharedPtr<AttributeType>& attr, size_t nodeCount) { if (attr) attr->initNodeBuffer(nodeCount); } template<typename AttributeType> inline void initializeAttributeBuffers(std::vector<UT_SharedPtr<AttributeType> >& attr, size_t nodeCount) { for (size_t n = 0, N = attr.size(); n < N; ++n) { attr[n]->initNodeBuffer(nodeCount); } } template<typename AttributeType> inline void cacheAttributeBuffers(UT_SharedPtr<AttributeType>& attr) { if (attr) attr->cacheNodes(); } template<typename AttributeType> inline void cacheAttributeBuffers(std::vector<UT_SharedPtr<AttributeType> >& attr) { for (size_t n = 0, N = attr.size(); n < N; ++n) { attr[n]->cacheNodes(); } } template<typename AttributeType, typename LeafNodeType> inline void cacheFrustumAttributeBuffers(UT_SharedPtr<AttributeType>& attr, std::vector<const LeafNodeType*>& nodes, double voxelSize) { if (attr) attr->cacheFrustumNodes(nodes, voxelSize); } template<typename AttributeType, typename LeafNodeType> inline void cacheFrustumAttributeBuffers(std::vector<UT_SharedPtr<AttributeType> >& attr, std::vector<const LeafNodeType*>& nodes, double voxelSize) { for (size_t n = 0, N = attr.size(); n < N; ++n) { attr[n]->cacheFrustumNodes(nodes, voxelSize); } } template<typename AttributeType> inline void exportAttributeGrid(UT_SharedPtr<AttributeType>& attr, std::vector<openvdb::GridBase::Ptr>& outputGrids) { if (attr) attr->exportGrid(outputGrids); } template<typename AttributeType> inline void exportAttributeGrid(std::vector<UT_SharedPtr<AttributeType> >& attr, std::vector<openvdb::GridBase::Ptr>& outputGrids) { for (size_t n = 0, N = attr.size(); n < N; ++n) { attr[n]->exportGrid(outputGrids); } } //////////////////////////////////////// // VEX Utilities struct VEXProgram { using Ptr = UT_SharedPtr<VEXProgram>; VEXProgram(OP_Caller& opcaller, const UT_WorkArgs& vexArgs, fpreal time, size_t maxArraySize, GU_VexGeoInputs& geoinputs) : mCVEX() , mRunData() , mMaxArraySize(maxArraySize) , mWorldCoordBuffer() , mNoiseBuffer() , mVEXLoaded(false) , mIsTimeDependant(false) { mRunData.setOpCaller(&opcaller); mRunData.setTime(time); mRunData.setGeoInputs(&geoinputs); // array attributes mCVEX.addInput("voxelpos", CVEX_TYPE_VECTOR3, true); mCVEX.addInput("Time", CVEX_TYPE_FLOAT, false); mCVEX.addInput("TimeInc", CVEX_TYPE_FLOAT, false); mCVEX.addInput("Frame", CVEX_TYPE_FLOAT, false); // uniform attributes mCVEX.addInput("voxelsize", CVEX_TYPE_FLOAT, false); mCVEX.addInput("pcenter", CVEX_TYPE_VECTOR3, false); mCVEX.addInput("pradius", CVEX_TYPE_FLOAT, false); mCVEX.addInput("pindex", CVEX_TYPE_INTEGER, false); mVEXLoaded = mCVEX.load(vexArgs.getArgc(), vexArgs.getArgv()); } void run(size_t arraySize) { if (mVEXLoaded) { mRunData.setTimeDependent(false); mCVEX.run(int(arraySize), false, &mRunData); mIsTimeDependant = mRunData.isTimeDependent(); } } bool isTimeDependant() const { return mIsTimeDependant; } CVEX_Value* findInput(const char *name, CVEX_Type type) { return mVEXLoaded ? mCVEX.findInput(name, type) : nullptr; } CVEX_Value* findOutput(const char *name, CVEX_Type type) { return mVEXLoaded ? mCVEX.findOutput(name, type) : nullptr; } ////////// // Array buffers UT_Vector3* getWorldCoordBuffer() { if (!mWorldCoordBuffer) mWorldCoordBuffer.reset(new UT_Vector3[mMaxArraySize]); return mWorldCoordBuffer.get(); } fpreal32* getNoiseBuffer() { if (!mNoiseBuffer) mNoiseBuffer.reset(new fpreal32[mMaxArraySize]); return mNoiseBuffer.get(); } private: CVEX_Context mCVEX; CVEX_RunData mRunData; const size_t mMaxArraySize; UT_UniquePtr<UT_Vector3[]> mWorldCoordBuffer; UT_UniquePtr<fpreal32[]> mNoiseBuffer; bool mVEXLoaded, mIsTimeDependant; }; // struct VEXProgram struct VEXContext { VEXContext(OP_Caller& opcaller, const UT_String& script, size_t maxArraySize) : mThreadLocalTable() , mCaller(&opcaller) , mVexScript(script) , mVexArgs() , mMaxArraySize(maxArraySize) , mTime(0.0f) , mTimeInc(0.0f) , mFrame(0.0f) , mIsTimeDependant() , mVexInputs() { mIsTimeDependant = 0; mVexScript.parse(mVexArgs); } void setTime(fpreal time, fpreal timeinc, fpreal frame) { mTime = time; mTimeInc = timeinc; mFrame = frame; } void setInput(int idx, const GU_Detail *geo) { mVexInputs.setInput(idx, geo); } VEXProgram& getThereadLocalVEXProgram() { VEXProgram::Ptr& ptr = mThreadLocalTable.local(); if (!ptr) { ptr.reset(new VEXProgram(*mCaller, mVexArgs, mTime, mMaxArraySize, mVexInputs)); } return *ptr; } fpreal time() const { return mTime; } fpreal timeInc() const { return mTimeInc; } fpreal frame() const { return mFrame; } void setTimeDependantFlag() { mIsTimeDependant = 1; } bool isTimeDependant() const { return mIsTimeDependant == 1; } private: tbb::enumerable_thread_specific<VEXProgram::Ptr> mThreadLocalTable; OP_Caller* mCaller; UT_String mVexScript; UT_WorkArgs mVexArgs; const size_t mMaxArraySize; fpreal mTime, mTimeInc, mFrame; tbb::atomic<int> mIsTimeDependant; GU_VexGeoInputs mVexInputs; }; //////////////////////////////////////// ///@brief Gather based point rasterization. struct RasterizePoints { using PosType = PointCache::PosType; using ScalarType = PosType::value_type; using PointIndexTree = openvdb::tools::PointIndexGrid::TreeType; using PointIndexLeafNode = PointIndexTree::LeafNodeType; using PointIndexType = PointIndexLeafNode::ValueType; using BoolLeafNodeType = openvdb::tree::LeafNode<bool, PointIndexLeafNode::LOG2DIM>; using DensityAttribute = Attribute<float, DensityOp<float> >; using Vec3sAttribute = Attribute<openvdb::Vec3s>; using FloatAttribute = Attribute<float>; using DensitySample = std::pair<float, openvdb::Index>; enum DensityTreatment { ACCUMULATE = 0, MAXIMUM, MINIMUM }; ///// RasterizePoints(const GU_Detail& detail, const PointIndexGridCollection& idxGridCollection, std::vector<const BoolLeafNodeType*>& regionMaskLeafNodes, const openvdb::math::Transform& volumeXform, DensityTreatment treatment, const float densityScale = 1.0, const float solidRatio = 0.0, hvdb::Interrupter* interrupter = nullptr) : mDetail(&detail) , mIdxGridCollection(&idxGridCollection) , mRegionMaskNodes(&regionMaskLeafNodes.front()) , mInterrupter(interrupter) , mDensityAttribute(nullptr) , mVectorAttributes(nullptr) , mFloatAttributes(nullptr) , mVEXContext(nullptr) , mVolumeXform(volumeXform) , mDensityScale(densityScale) , mSolidRatio(solidRatio) , mDensityTreatment(treatment) { } void setDensityAttribute(DensityAttribute * a) { mDensityAttribute = a; } void setVectorAttributes(std::vector<Vec3sAttribute::Ptr>& v) { if(!v.empty()) mVectorAttributes = &v; } void setFloatAttributes(std::vector<FloatAttribute::Ptr>& v) { if (!v.empty()) mFloatAttributes = &v; } void setVEXContext(VEXContext * v) { mVEXContext = v; } ///// /// Thread safe copy constructor RasterizePoints(const RasterizePoints& rhs) : mDetail(rhs.mDetail) , mIdxGridCollection(rhs.mIdxGridCollection) , mRegionMaskNodes(rhs.mRegionMaskNodes) , mInterrupter(rhs.mInterrupter) , mDensityAttribute(rhs.mDensityAttribute) , mVectorAttributes(rhs.mVectorAttributes) , mFloatAttributes(rhs.mFloatAttributes) , mVEXContext(rhs.mVEXContext) , mVolumeXform(rhs.mVolumeXform) , mDensityScale(rhs.mDensityScale) , mSolidRatio(rhs.mSolidRatio) , mDensityTreatment(rhs.mDensityTreatment) { } void operator()(const tbb::blocked_range<size_t>& range) const { // Setup attribute operators DensityAttribute::OperatorType::Ptr densityAttribute; if (mDensityAttribute) densityAttribute = mDensityAttribute->getAccessor(); std::vector<Vec3sAttribute::OperatorType::Ptr> vecAttributes; if (mVectorAttributes && !mVectorAttributes->empty()) { vecAttributes.reserve(mVectorAttributes->size()); for (size_t n = 0, N = mVectorAttributes->size(); n < N; ++n) { vecAttributes.push_back((*mVectorAttributes)[n]->getAccessor()); } } std::vector<FloatAttribute::OperatorType::Ptr> floatAttributes; if (mFloatAttributes && !mFloatAttributes->empty()) { floatAttributes.reserve(mFloatAttributes->size()); for (size_t n = 0, N = mFloatAttributes->size(); n < N; ++n) { floatAttributes.push_back((*mFloatAttributes)[n]->getAccessor()); } } const bool transferAttributes = !vecAttributes.empty() || !floatAttributes.empty(); // Bind optional density attribute GA_ROHandleF densityHandle; GA_ROAttributeRef densityRef = mDetail->findFloatTuple(GA_ATTRIB_POINT, "density", 1); if (densityRef.isValid()) densityHandle.bind(densityRef.getAttribute()); openvdb::tools::PointIndexIterator<PointIndexTree> pointIndexIter; using IndexTreeAccessor = openvdb::tree::ValueAccessor<const PointIndexTree>; using IndexTreeAccessorPtr = UT_SharedPtr<IndexTreeAccessor>; UT_UniquePtr<IndexTreeAccessorPtr[]> accessorList( new IndexTreeAccessorPtr[mIdxGridCollection->size()]); for (size_t i = 0; i < mIdxGridCollection->size(); ++i) { const PointIndexTree& tree = mIdxGridCollection->idxGrid(i).tree(); accessorList[i].reset(new IndexTreeAccessor(tree)); } // scratch space UT_UniquePtr<float[]> voxelWeightArray(new float[BoolLeafNodeType::SIZE]); std::vector<DensitySample> densitySamples; densitySamples.reserve(BoolLeafNodeType::SIZE); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { if (this->wasInterrupted()) { tbb::task::self().cancel_group_execution(); break; } const BoolLeafNodeType& maskNode = *mRegionMaskNodes[n]; if (maskNode.isEmpty()) continue; const openvdb::Coord& origin = maskNode.origin(); if (transferAttributes) { memset(voxelWeightArray.get(), 0, BoolLeafNodeType::SIZE * sizeof(float)); } if (densityAttribute) { densityAttribute->beginNodeProcessing(origin, n); } for (size_t i = 0, I = vecAttributes.size(); i < I; ++i) { vecAttributes[i]->beginNodeProcessing(origin, n); } for (size_t i = 0, I = floatAttributes.size(); i < I; ++i) { floatAttributes[i]->beginNodeProcessing(origin, n); } const openvdb::CoordBBox nodeBoundingBox( origin, origin.offsetBy(BoolLeafNodeType::DIM - 1)); const openvdb::Vec3d bMin = mVolumeXform.indexToWorld(nodeBoundingBox.min()); const openvdb::Vec3d bMax = mVolumeXform.indexToWorld(nodeBoundingBox.max()); bool transferData = false; for (size_t i = 0; i < mIdxGridCollection->size(); ++i) { if (this->wasInterrupted()) break; const double sarchRadius = double(mIdxGridCollection->maxRadius(i)); const PointCache& pointCache = mIdxGridCollection->pointCache(i); const openvdb::math::Transform& idxGridTransform = mIdxGridCollection->idxGrid(i).transform(); const openvdb::CoordBBox searchRegion( idxGridTransform.worldToIndexCellCentered(bMin - sarchRadius), idxGridTransform.worldToIndexCellCentered(bMax + sarchRadius)); pointIndexIter.searchAndUpdate(searchRegion, *accessorList[i]); transferData |= gatherDensityAndAttributes(densityHandle, pointIndexIter, sarchRadius, pointCache, nodeBoundingBox, densityAttribute, vecAttributes, floatAttributes, voxelWeightArray, densitySamples); } if (transferData && !this->wasInterrupted()) { if (densityAttribute) densityAttribute->endNodeProcessing(maskNode); if (transferAttributes) { for (size_t nn = 0; nn < BoolLeafNodeType::SIZE; ++nn) { const float weight = (voxelWeightArray[nn] > 0.0f) ? (1.0f / voxelWeightArray[nn]) : 0.0f; // Subnormal input values are nonzero but can result in infinite weights. voxelWeightArray[nn] = openvdb::math::isFinite(weight) ? weight : 0.0f; } for (size_t i = 0, I = vecAttributes.size(); i < I; ++i) { vecAttributes[i]->endNodeProcessing(maskNode, voxelWeightArray.get()); } for (size_t i = 0, I = floatAttributes.size(); i < I; ++i) { floatAttributes[i]->endNodeProcessing(maskNode, voxelWeightArray.get()); } } } } // end node loop } // operator::() private: bool wasInterrupted() const { return mInterrupter && mInterrupter->wasInterrupted(); } bool gatherDensityAndAttributes( GA_ROHandleF& densityHandle, openvdb::tools::PointIndexIterator<PointIndexTree>& pointIndexIter, double sarchRadius, const PointCache& pointCache, const openvdb::CoordBBox& nodeBoundingBox, DensityAttribute::OperatorType::Ptr& densityAttribute, std::vector<Vec3sAttribute::OperatorType::Ptr>& vecAttributes, std::vector<FloatAttribute::OperatorType::Ptr>& floatAttributes, UT_UniquePtr<float[]>& voxelWeightArray, std::vector<DensitySample>& densitySamples) const { const bool hasPointDensity = densityHandle.isValid(); const bool transferVec3sAttributes = !vecAttributes.empty(); const bool transferFloatAttributes = !floatAttributes.empty(); const bool transferAttributes = transferVec3sAttributes || transferFloatAttributes; bool hasNonzeroDensityValues = false; VEXProgram * cvex = mVEXContext ? &mVEXContext->getThereadLocalVEXProgram() : nullptr; ScalarType * const densityData = densityAttribute ? densityAttribute->data() : nullptr; const bool exportDensity = densityData != nullptr; const float * pointRadiusData = pointCache.radiusData(); const openvdb::Vec3s * pointPosData = pointCache.posData(); const double dx = mVolumeXform.voxelSize()[0]; const double dxSqr = dx * dx; openvdb::Coord ijk, pMin, pMax; PosType xyz; for (; pointIndexIter; ++pointIndexIter) { if (this->wasInterrupted()) break; // Get attribute values for the given point offset const GA_Offset pointOffset = pointCache.offsetFromIndex(*pointIndexIter); if (transferVec3sAttributes) { for (size_t i = 0, I = vecAttributes.size(); i < I; ++i) { vecAttributes[i]->updateValue(pointOffset); } } if (transferFloatAttributes) { for (size_t i = 0, I = floatAttributes.size(); i < I; ++i) { floatAttributes[i]->updateValue(pointOffset); } } // Compute point properties xyz = pointPosData[*pointIndexIter]; openvdb::Vec3d localPos = mVolumeXform.worldToIndex(xyz); ScalarType radius = pointRadiusData[*pointIndexIter]; const float radiusSqr = radius * radius; const ScalarType densityScale = mDensityScale * (hasPointDensity ? densityHandle.get(pointOffset) : 1.0f); const ScalarType solidRadius = std::min(radius * mSolidRatio, radius); const ScalarType residualRadius = std::max(ScalarType(0.0), radius - solidRadius); const ScalarType invResidualRadius = residualRadius > 0.0f ? 1.0f / residualRadius : 0.0f; openvdb::Index xPos(0), yPos(0), pos(0); double xSqr, ySqr, zSqr; densitySamples.clear(); // Intersect (point + radius) bbox with leafnode bbox to // define the overlapping voxel region. pMin = mVolumeXform.worldToIndexCellCentered(xyz - sarchRadius); pMax = mVolumeXform.worldToIndexCellCentered(xyz + sarchRadius); pMin = openvdb::Coord::maxComponent(nodeBoundingBox.min(), pMin); pMax = openvdb::Coord::minComponent(nodeBoundingBox.max(), pMax); for (ijk[0] = pMin[0]; ijk[0] <= pMax[0]; ++ijk[0]) { if (this->wasInterrupted()) break; xPos = (ijk[0] & (BoolLeafNodeType::DIM - 1u)) << (2 * BoolLeafNodeType::LOG2DIM); xSqr = localPos[0] - double(ijk[0]); xSqr *= xSqr; for (ijk[1] = pMin[1]; ijk[1] <= pMax[1]; ++ijk[1]) { yPos = xPos + ((ijk[1] & (BoolLeafNodeType::DIM - 1u)) << BoolLeafNodeType::LOG2DIM); ySqr = localPos[1] - double(ijk[1]); ySqr *= ySqr; for (ijk[2] = pMin[2]; ijk[2] <= pMax[2]; ++ijk[2]) { pos = yPos + (ijk[2] & (BoolLeafNodeType::DIM - 1u)); zSqr = localPos[2] - double(ijk[2]); zSqr *= zSqr; const float distSqr = float( (xSqr + ySqr + zSqr) * dxSqr ); if (distSqr < radiusSqr) { const float dist = std::sqrt(distSqr) - solidRadius; const float weight = dist > 0.0f ? densityScale * (1.0f - invResidualRadius * dist) : densityScale; if (weight > 0.0f) densitySamples.push_back(DensitySample(weight, pos)); } } } } // end overlapping voxel region loop hasNonzeroDensityValues |= !densitySamples.empty(); // Apply VEX shader program to density samples if (cvex && !densitySamples.empty()) { hasNonzeroDensityValues |= executeVEXShader(*cvex, densitySamples, exportDensity, vecAttributes, floatAttributes, nodeBoundingBox.min(), xyz, radius, pointOffset); } // Transfer density data to leafnode buffer if (densityData && mDensityTreatment == MAXIMUM) { // max for (size_t n = 0, N = densitySamples.size(); n < N; ++n) { const DensitySample& sample = densitySamples[n]; ScalarType& value = densityData[sample.second]; value = std::max(value, sample.first); } } else if (densityData && mDensityTreatment == ACCUMULATE) { // add for (size_t n = 0, N = densitySamples.size(); n < N; ++n) { const DensitySample& sample = densitySamples[n]; densityData[sample.second] += sample.first; } } else if (densityData && mDensityTreatment == MINIMUM) { // min for (size_t n = 0, N = densitySamples.size(); n < N; ++n) { const DensitySample& sample = densitySamples[n]; ScalarType& value = densityData[sample.second]; value = std::min(value, sample.first); } } // Transfer attribute data to leafnode buffers if (transferAttributes && hasNonzeroDensityValues) { for (size_t n = 0, N = densitySamples.size(); n < N; ++n) { const DensitySample& sample = densitySamples[n]; voxelWeightArray[sample.second] += sample.first; } for (size_t i = 0, I = vecAttributes.size(); i < I; ++i) { vecAttributes[i]->updateVoxelData(densitySamples); } for (size_t i = 0, I = floatAttributes.size(); i < I; ++i) { floatAttributes[i]->updateVoxelData(densitySamples); } } } // end point loop return hasNonzeroDensityValues; } // end gatherDensityAndAttributes method bool executeVEXShader(VEXProgram& cvex, std::vector<DensitySample>& densitySamples, bool exportDensity, std::vector<Vec3sAttribute::OperatorType::Ptr>& vecAttributes, std::vector<FloatAttribute::OperatorType::Ptr>& floatAttributes, const openvdb::Coord& nodeOrigin, const PosType& point, ScalarType radius, GA_Offset pointOffset) const { bool timeDependantVEX = false; const int numValues = int(densitySamples.size()); if (CVEX_Value* val = cvex.findInput("voxelpos", CVEX_TYPE_VECTOR3)) { UT_Vector3* data = cvex.getWorldCoordBuffer(); openvdb::Coord coord; openvdb::Vec3d ws; for (int n = 0; n < numValues; ++n) { coord = BoolLeafNodeType::offsetToLocalCoord(densitySamples[n].second); coord += nodeOrigin; ws = mVolumeXform.indexToWorld(coord); UT_Vector3& pointRef = data[n]; pointRef[0] = float(ws[0]); pointRef[1] = float(ws[1]); pointRef[2] = float(ws[2]); } val->setTypedData(data, numValues); } UT_Vector3 particleCenter(point[0], point[1], point[2]); if (CVEX_Value* val = cvex.findInput("pcenter", CVEX_TYPE_VECTOR3)) { val->setTypedData(&particleCenter, 1); } fpreal32 particleRadius(radius); if (CVEX_Value* val = cvex.findInput("pradius", CVEX_TYPE_FLOAT)) { val->setTypedData(&particleRadius, 1); } int particleIndex = 0; if (CVEX_Value* val = cvex.findInput("pindex", CVEX_TYPE_INTEGER)) { particleIndex = int(mDetail->pointIndex(pointOffset)); val->setTypedData(&particleIndex, 1); } fpreal32 voxelSize(1.0f); if (CVEX_Value* val = cvex.findInput("voxelsize", CVEX_TYPE_FLOAT)) { voxelSize = fpreal32(mVolumeXform.voxelSize()[0]); val->setTypedData(&voxelSize, 1); } fpreal32 time = fpreal32(mVEXContext->time()); if (CVEX_Value* val = cvex.findInput("Time", CVEX_TYPE_FLOAT)) { val->setTypedData(&time, 1); timeDependantVEX = true; } fpreal32 timeInc = fpreal32(mVEXContext->timeInc()); if (CVEX_Value* val = cvex.findInput("TimeInc", CVEX_TYPE_FLOAT)) { val->setTypedData(&timeInc, 1); timeDependantVEX = true; } fpreal32 frame = fpreal32(mVEXContext->frame()); if (CVEX_Value* val = cvex.findInput("Frame", CVEX_TYPE_FLOAT)) { val->setTypedData(&frame, 1); timeDependantVEX = true; } bool hasNonzeroDensityValues = false, runProcess = false; fpreal32* densityScales = nullptr; if (exportDensity) { if (CVEX_Value* val = cvex.findOutput("output", CVEX_TYPE_FLOAT)) { runProcess = true; densityScales = cvex.getNoiseBuffer(); for (int n = 0; n < numValues; ++n) densityScales[n] = 1.0f; val->setTypedData(densityScales, numValues); } } for (size_t i = 0, I = vecAttributes.size(); i < I; ++i) { if (CVEX_Value* val = cvex.findOutput(vecAttributes[i]->getName(), CVEX_TYPE_VECTOR3)) { runProcess = true; val->setTypedData(vecAttributes[i]->varyingData(), numValues); } } for (size_t i = 0, I = floatAttributes.size(); i < I; ++i) { if (CVEX_Value* val = cvex.findOutput(floatAttributes[i]->getName(), CVEX_TYPE_FLOAT)) { runProcess = true; val->setTypedData(floatAttributes[i]->varyingData(), numValues); } } if (runProcess) { cvex.run(numValues); timeDependantVEX |= cvex.isTimeDependant(); if (densityScales) { for (int n = 0; n < numValues; ++n) { densitySamples[n].first *= densityScales[n]; hasNonzeroDensityValues |= densitySamples[n].first > 0.0f; } } } if (timeDependantVEX) mVEXContext->setTimeDependantFlag(); return hasNonzeroDensityValues; } // end executeVEXShader method ////////// GU_Detail const * const mDetail; PointIndexGridCollection const * const mIdxGridCollection; BoolLeafNodeType const * const * const mRegionMaskNodes; hvdb::Interrupter * const mInterrupter; DensityAttribute * mDensityAttribute; std::vector<Vec3sAttribute::Ptr> * mVectorAttributes; std::vector<FloatAttribute::Ptr> * mFloatAttributes; VEXContext * mVEXContext; openvdb::math::Transform const mVolumeXform; ScalarType const mDensityScale, mSolidRatio; DensityTreatment const mDensityTreatment; }; // struct RasterizePoints //////////////////////////////////////// /// Collection of rasterization settings struct RasterizationSettings { RasterizationSettings(const GU_Detail& geo, const GA_PointGroup* group, hvdb::Interrupter& in) : createDensity(true) , clipToFrustum(true) , invertMask(false) , exportPointMask(false) , densityScale(1.0f) , particleScale(1.0f) , solidRatio(0.0f) , treatment(RasterizePoints::MAXIMUM) , transform(openvdb::math::Transform::createLinearTransform(0.1)) , pointsGeo(&geo) , pointGroup(group) , interrupter(&in) , vexContext(nullptr) , scalarAttributeNames() , vectorAttributeNames() , maskGrid() , maskBBox() , frustumQuality(0.0f) { } inline bool wasInterrupted() { return interrupter->wasInterrupted(); } // the input value is clipped to [0, 1] range. void setFrustumQuality(float val) { frustumQuality = std::max(std::min(val, 1.0f), 0.0f); } float getFrustumQuality() const { return frustumQuality; } bool createDensity, clipToFrustum, invertMask, exportPointMask; float densityScale, particleScale, solidRatio; RasterizePoints::DensityTreatment treatment; openvdb::math::Transform::Ptr transform; GU_Detail const * const pointsGeo; GA_PointGroup const * const pointGroup; hvdb::Interrupter * const interrupter; VEXContext * vexContext; std::vector<std::string> scalarAttributeNames; std::vector<std::string> vectorAttributeNames; hvdb::GridCPtr maskGrid; UT_SharedPtr<openvdb::BBoxd> maskBBox; private: float frustumQuality; }; // RasterizationSettings /// Culls the @a mask region using a user supplied bbox or another grids active voxel topology. inline void applyClippingMask(PointIndexGridCollection::BoolTreeType& mask, RasterizationSettings& settings) { if (settings.maskBBox) { if (settings.transform->isLinear()) { bboxClip(mask, *settings.maskBBox, settings.invertMask, *settings.transform); } else { openvdb::CoordBBox maskBBox; mask.evalActiveVoxelBoundingBox(maskBBox); openvdb::Vec3d locVoxelSize = computeFrustumVoxelSize( maskBBox.min().z(), *settings.transform); openvdb::Vec3d nearPlaneVoxelSize = settings.transform->voxelSize(); const double weight = double(settings.getFrustumQuality()); double voxelSize = linearBlend(nearPlaneVoxelSize.x(), locVoxelSize.x(), weight); openvdb::math::Transform::Ptr xform = openvdb::math::Transform::createLinearTransform(voxelSize); bboxClip(mask, *settings.maskBBox, settings.invertMask, *settings.transform, xform.get()); } } else if (settings.maskGrid) { GridTopologyClipOp<PointIndexGridCollection::BoolTreeType> op(mask, *settings.transform, settings.invertMask); settings.maskGrid->apply<hvdb::AllGridTypes>(op); } } inline void rasterize(RasterizationSettings& settings, std::vector<openvdb::GridBase::Ptr>& outputGrids) { using BoolTreeType = PointIndexGridCollection::BoolTreeType; using BoolLeafNodeType = BoolTreeType::LeafNodeType; using DensityAttributeType = Attribute<float, DensityOp<float> >; using Vec3sAttribute = Attribute<openvdb::Vec3s>; using FloatAttribute = Attribute<float>; ////////// float partitioningVoxelSize = float(std::max( settings.transform->voxelSize().x(), settings.transform->voxelSize().z())); PointIndexGridCollection idxGridCollection(*settings.pointsGeo, settings.particleScale, partitioningVoxelSize, settings.pointGroup, settings.interrupter); // setup selected point attributes DensityAttributeType::Ptr densityAttribute; if (settings.createDensity) { densityAttribute = DensityAttributeType::create( GEO_STD_ATTRIB_POSITION, *settings.pointsGeo, *settings.transform); } std::vector<Vec3sAttribute::Ptr> vectorAttributes; vectorAttributes.reserve(settings.vectorAttributeNames.size()); for (size_t n = 0; n < settings.vectorAttributeNames.size(); ++n) { Vec3sAttribute::Ptr a = Vec3sAttribute::create( settings.vectorAttributeNames[n], *settings.pointsGeo, *settings.transform); if (a) vectorAttributes.push_back(a); } std::vector<FloatAttribute::Ptr> scalarAttributes; scalarAttributes.reserve(settings.scalarAttributeNames.size()); for (size_t n = 0; n < settings.scalarAttributeNames.size(); ++n) { FloatAttribute::Ptr a = FloatAttribute::create( settings.scalarAttributeNames[n], *settings.pointsGeo, *settings.transform); if (a) scalarAttributes.push_back(a); } const bool doTransfer = (densityAttribute || !vectorAttributes.empty() || !scalarAttributes.empty()); if (!(doTransfer || settings.exportPointMask) || settings.wasInterrupted()) return; // create region of interest mask BoolTreeType roiMask; maskRegionOfInterest(roiMask, idxGridCollection, *settings.transform, settings.clipToFrustum, settings.interrupter); applyClippingMask(roiMask, settings); if (settings.exportPointMask) { using BoolGridType = openvdb::Grid<BoolTreeType>; BoolGridType::Ptr exportMask = BoolGridType::create(); exportMask->setTransform(settings.transform->copy()); exportMask->setName("pointmask"); exportMask->tree().topologyUnion(roiMask); std::vector<BoolLeafNodeType*> maskNodes; exportMask->tree().getNodes(maskNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, maskNodes.size()), FillActiveValues<BoolLeafNodeType>(maskNodes, true)); outputGrids.push_back(exportMask); } if (!doTransfer || settings.wasInterrupted()) return; std::vector<const BoolLeafNodeType*> maskNodes; roiMask.getNodes(maskNodes); if (settings.transform->isLinear()) { // ortho grid rasterization initializeAttributeBuffers(densityAttribute, maskNodes.size()); initializeAttributeBuffers(vectorAttributes, maskNodes.size()); initializeAttributeBuffers(scalarAttributes, maskNodes.size()); RasterizePoints op( *settings.pointsGeo, idxGridCollection, maskNodes, *settings.transform, settings.treatment, settings.densityScale, settings.solidRatio, settings.interrupter); op.setDensityAttribute(densityAttribute.get()); op.setVectorAttributes(vectorAttributes); op.setFloatAttributes(scalarAttributes); op.setVEXContext(settings.vexContext); tbb::parallel_for(tbb::blocked_range<size_t>(0, maskNodes.size()), op); cacheAttributeBuffers(densityAttribute); cacheAttributeBuffers(vectorAttributes); cacheAttributeBuffers(scalarAttributes); } else { // frustum grid rasterization openvdb::Vec3d nearPlaneVoxelSize = settings.transform->voxelSize(); const double sizeWeight = double(settings.getFrustumQuality()); std::sort(maskNodes.begin(), maskNodes.end(), CompZCoord<BoolLeafNodeType>()); size_t nodeIdx = 0; while (nodeIdx < maskNodes.size()) { int progress = int(double(nodeIdx) / double(maskNodes.size()) * 100.0); if (settings.interrupter->wasInterrupted(progress)) { return; } int zCoord = maskNodes[nodeIdx]->origin().z(); openvdb::Vec3d locVoxelSize = computeFrustumVoxelSize(zCoord, *settings.transform); double voxelSize = linearBlend(nearPlaneVoxelSize.x(), locVoxelSize.x(), sizeWeight); openvdb::math::Transform::Ptr xform = openvdb::math::Transform::createLinearTransform(voxelSize); std::vector<const BoolLeafNodeType*> frustumNodeQueue; // create subregion mask BoolTreeType subregionMask(false); openvdb::tree::ValueAccessor<BoolTreeType> maskAcc(subregionMask); for (; nodeIdx < maskNodes.size(); ++nodeIdx) { if (settings.interrupter->wasInterrupted(progress)) { return; } const BoolLeafNodeType& node = *maskNodes[nodeIdx]; openvdb::Vec3d tmpVoxelSize = computeFrustumVoxelSize(node.origin().z(), *settings.transform); if (tmpVoxelSize.x() > (locVoxelSize.x() * 2.0)) break; openvdb::CoordBBox bbox; node.evalActiveBoundingBox(bbox); bbox = remapBBox(bbox, *settings.transform, *xform); bbox.expand(1); fillWithLeafLevelTiles(maskAcc, bbox); frustumNodeQueue.push_back(maskNodes[nodeIdx]); if (subregionMask.activeTileCount() >= 1000000) break; } if (subregionMask.empty()) continue; subregionMask.voxelizeActiveTiles(); std::vector<const BoolLeafNodeType*> subregionNodes; subregionMask.getNodes(subregionNodes); size_t subregionNodeCount = subregionNodes.size(); if (subregionNodeCount == 0) continue; // do subregion rasterization initializeAttributeBuffers(densityAttribute, subregionNodeCount); initializeAttributeBuffers(vectorAttributes, subregionNodeCount); initializeAttributeBuffers(scalarAttributes, subregionNodeCount); RasterizePoints op( *settings.pointsGeo, idxGridCollection, subregionNodes, *xform, settings.treatment, settings.densityScale, settings.solidRatio, settings.interrupter); op.setDensityAttribute(densityAttribute.get()); op.setVectorAttributes(vectorAttributes); op.setFloatAttributes(scalarAttributes); op.setVEXContext(settings.vexContext); tbb::parallel_for(tbb::blocked_range<size_t>(0, subregionNodeCount), op); cacheFrustumAttributeBuffers(densityAttribute, frustumNodeQueue, voxelSize); cacheFrustumAttributeBuffers(vectorAttributes, frustumNodeQueue, voxelSize); cacheFrustumAttributeBuffers(scalarAttributes, frustumNodeQueue, voxelSize); } } if (!settings.wasInterrupted()) { exportAttributeGrid(densityAttribute, outputGrids); exportAttributeGrid(vectorAttributes, outputGrids); exportAttributeGrid(scalarAttributes, outputGrids); } } //////////////////////////////////////// /// Populates the @a scalarAttribNames and @a vectorAttribNames lists. inline void getAttributeNames( const std::string& attributeNames, const GU_Detail& geo, std::vector<std::string>& scalarAttribNames, std::vector<std::string>& vectorAttribNames, bool createVelocityAttribute, UT_ErrorManager* log = nullptr) { if (attributeNames.empty() && !createVelocityAttribute) { return; } std::vector<std::string> allNames; hboost::algorithm::split(allNames, attributeNames, hboost::is_any_of(", ")); std::set<std::string> uniqueNames(allNames.begin(), allNames.end()); if (createVelocityAttribute) { uniqueNames.insert("v"); } std::vector<std::string> skipped; for (const std::string& name: uniqueNames) { GA_ROAttributeRef attr = geo.findFloatTuple(GA_ATTRIB_POINT, name.c_str(), 1, 1); if (attr.isValid()) { scalarAttribNames.push_back(name); } else { attr = geo.findFloatTuple(GA_ATTRIB_POINT, name.c_str(), 3); if (attr.isValid()) { vectorAttribNames.push_back(name); } else { skipped.push_back(name); } } } if (!skipped.empty() && log) { log->addWarning(SOP_OPTYPE_NAME, SOP_MESSAGE, ("Unable to rasterize attribute(s): " + hboost::algorithm::join(skipped, ", ")).c_str()); log->addWarning(SOP_OPTYPE_NAME, SOP_MESSAGE, "Only supporting point-rate attributes " "of scalar or vector type with floating-point values."); } } /// Returns a null pointer if geoPt is null or if no reference vdb is found. inline openvdb::math::Transform::Ptr getReferenceTransform(const GU_Detail* geoPt, const GA_PrimitiveGroup* group = nullptr, UT_ErrorManager* log = nullptr) { if (geoPt) { hvdb::VdbPrimCIterator vdbIt(geoPt, group); if (vdbIt) { return (*vdbIt)->getGrid().transform().copy(); } else if (log) { log->addWarning(SOP_OPTYPE_NAME, SOP_MESSAGE, "Could not find a reference VDB grid"); } } return openvdb::math::Transform::Ptr(); } //////////////////////////////////////// inline int lookupAttrInput(const PRM_SpareData* spare) { const char *istring; if (!spare) return 0; istring = spare->getValue("sop_input"); return istring ? atoi(istring) : 0; } inline void populateMeshMenu(void* data, PRM_Name* choicenames, int themenusize, const PRM_SpareData* spare, const PRM_Parm*) { choicenames[0].setToken(0); choicenames[0].setLabel(0); SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); if (sop == nullptr) return; size_t count = 0; try { const int inputIndex = lookupAttrInput(spare); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); if (gdp) { GA_AttributeDict::iterator iter = gdp->pointAttribs().begin(GA_SCOPE_PUBLIC); size_t maxSize(themenusize - 1); std::vector<std::string> scalarNames, vectorNames; scalarNames.reserve(gdp->pointAttribs().entries(GA_SCOPE_PUBLIC)); vectorNames.reserve(scalarNames.capacity()); for (; !iter.atEnd(); ++iter) { GA_Attribute const * const attrib = iter.attrib(); if (attrib->getStorageClass() == GA_STORECLASS_FLOAT) { const int tupleSize = attrib->getTupleSize(); const UT_StringHolder& attribName = attrib->getName(); if (tupleSize == 1) scalarNames.push_back(attribName.buffer()); else if (tupleSize == 3) vectorNames.push_back(attribName.buffer()); } } std::sort(scalarNames.begin(), scalarNames.end()); for (size_t n = 0, N = scalarNames.size(); n < N && count < maxSize; ++n) { const char * str = scalarNames[n].c_str(); if (std::strcmp(str, "density") != 0) { choicenames[count].setToken(str); choicenames[count++].setLabel(str); } } if (!scalarNames.empty() && !vectorNames.empty() && count < maxSize) { choicenames[count].setToken(PRM_Name::mySeparator); choicenames[count++].setLabel(PRM_Name::mySeparator); } std::sort(vectorNames.begin(), vectorNames.end()); for (size_t n = 0, N = vectorNames.size(); n < N && count < maxSize; ++n) { choicenames[count].setToken(vectorNames[n].c_str()); choicenames[count++].setLabel(vectorNames[n].c_str()); } } } catch (...) {} // Terminate the list. choicenames[count].setToken(0); choicenames[count].setLabel(0); } } // unnamed namespace //////////////////////////////////////// // SOP Implementation struct SOP_OpenVDB_Rasterize_Points: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Rasterize_Points(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); // Overriding these are what allow us to contain VOPs using hvdb::SOP_NodeVDB::evalVariableValue; bool evalVariableValue(UT_String &value, int index, int thread) override; OP_OperatorFilter* getOperatorFilter() override { return mCodeGenerator.getOperatorFilter(); } const char* getChildType() const override { return VOP_OPTYPE_NAME; } OP_OpTypeId getChildTypeID() const override { return VOP_OPTYPE_ID; } VOP_CodeGenerator* getVopCodeGenerator() override { return &mCodeGenerator; } void opChanged(OP_EventType reason, void *data = 0) override; bool hasVexShaderParameter(const char* name) override { return mCodeGenerator.hasShaderParameter(name); } int isRefInput(unsigned i) const override { return i > 0; } protected: OP_ERROR cookVDBSop(OP_Context&) override; bool updateParmsFlags() override; /// VOP and VEX functions void finishedLoadingNetwork(bool is_child_call = false) override; void addNode(OP_Node *node, int notify = 1, int explicitly = 1) override; void ensureSpareParmsAreUpdatedSubclass() override; VOP_CodeGenerator mCodeGenerator; int mInitialParmNum; }; // struct SOP_OpenVDB_Rasterize_Points //////////////////////////////////////// // VEX related methods bool SOP_OpenVDB_Rasterize_Points::evalVariableValue(UT_String &value, int index, int thread) { if (mCodeGenerator.getVariableString(index, value)) return true; // else delegate to base class return SOP_Node::evalVariableValue(value, index, thread); } void SOP_OpenVDB_Rasterize_Points::opChanged(OP_EventType reason, void *data) { int update_id = mCodeGenerator.beginUpdate(); SOP_Node::opChanged(reason, data); mCodeGenerator.ownerChanged(reason, data); mCodeGenerator.endUpdate(update_id); } void SOP_OpenVDB_Rasterize_Points::finishedLoadingNetwork(bool is_child_call) { mCodeGenerator.ownerFinishedLoadingNetwork(); SOP_Node::finishedLoadingNetwork(is_child_call); } void SOP_OpenVDB_Rasterize_Points::addNode(OP_Node *node, int notify, int explicitly) { mCodeGenerator.beforeAddNode(node); SOP_Node::addNode(node, notify, explicitly); mCodeGenerator.afterAddNode(node); } void SOP_OpenVDB_Rasterize_Points::ensureSpareParmsAreUpdatedSubclass() { // Check if the spare parameter templates are out-of-date. if (getVopCodeGenerator() && eventMicroNode(OP_SPAREPARM_MODIFIED).requiresUpdate(0.0)) { // Call into the code generator to update the spare parameter templates. getVopCodeGenerator()->exportedParmsManager()->updateOwnerSpareParmLayout(); } } //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "pointgroup", "Point Group") .setChoiceList(&SOP_Node::pointGroupMenu) .setTooltip("A group of points to rasterize.")); parms.add(hutil::ParmFactory(PRM_STRING, "transformvdb", "Transform VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("VDB grid that defines the output transform")); parms.add(hutil::ParmFactory(PRM_STRING, "maskvdb", "Mask VDB") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip("VDB grid whose active topology defines what region to rasterize into")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invertmask", "Invert Mask") .setDefault(PRMzeroDefaults) .setTooltip("Toggle to rasterize in the region outside the mask.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelsize", "Voxel Size") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 5) .setTooltip("Uniform voxel edge length in world units. " "Decrease the voxel size to increase the volume resolution.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "frustumquality", "Frustum Quality") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_RESTRICTED, 10) .setTooltip( "1 is the standard quality and is fast. 10 is the highest quality;" " everything is rasterized at the frustum near-plane resolution " " and filtered down to frustum resolution, which can be slow.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "cliptofrustum", "Clip to Frustum") .setDefault(PRMoneDefaults) .setTooltip( "When using a frustum transform, only rasterize data inside the frustum region.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "createdensity", "Create Density Volume") .setDefault(PRMoneDefaults) .setTooltip("Toggle to enable or disable the density volume generation. " "Attribute volumes are still constructed as usual.")); { // density compositing char const * const items[] = { "add", "Add", "max", "Maximum", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "compositing", "Density Merge") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("How to blend point densities in the density volume") .setDocumentation( "How to blend point densities in the density volume\n\n" "Add:\n" " Sum densities at each voxel.\n" "Maximum:\n" " Choose the maximum density at each voxel.\n")); } parms.add(hutil::ParmFactory(PRM_FLT_J, "densityscale", "Density Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("Scale the density attribute by this amount." " If the density attribute is missing, use a value of one as the reference.") .setDocumentation("Scale the `density` attribute by this amount." " If the `density` attribute is missing, use a value of one as the reference.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "particlescale", "Particle Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip( "Scale the pscale point attribute, which defines the world-space" " particle radius, by this amount. If the pscale attribute is missing," " use a value of one.") .setDocumentation( "Scale the `pscale` point attribute, which defines the world-space" " particle radius, by this amount. If the `pscale` attribute is missing," " use a value of one.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "solidratio", "Solid Ratio") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_RESTRICTED, 1) .setTooltip("Specify the amount of the particle that gets full density. " "0 means only the very center of the particle will have full density. " "1 means the entire particle out to the radius will have full density.")); parms.add(hutil::ParmFactory(PRM_STRING, "attributes", "Attributes") .setChoiceList(new PRM_ChoiceList(PRM_CHOICELIST_TOGGLE, populateMeshMenu)) .setTooltip( "Specify a list of floating-point or vector point attributes" " to be rasterized using weighted-average blending.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "exportpointmask", "Export Point Mask") .setDefault(PRMzeroDefaults) .setDocumentation( "If enabled, output the point mask used in the rasterization operation.")); ///// parms.add(hutil::ParmFactory(PRM_HEADING,"noiseheading", "")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "modeling", "Enable VEX Processing") .setDefault(PRMzeroDefaults) .setTooltip("Use the contained VOP network to define a VEX procedure that " "determines density and attribute values.")); hvdb::OpenVDBOpFactory("VDB Rasterize Points", SOP_OpenVDB_Rasterize_Points::factory, parms, *table) .setNativeName("") .setOperatorTable(VOP_TABLE_NAME) .setLocalVariables(VOP_CodeGenerator::theLocalVariables) .addInput("Points to rasterize") .addOptionalInput("Optional VDB grid that defines the output transform.") .addOptionalInput("Optional VDB or bounding box mask.") .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Rasterize points into density and attribute volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node rasterizes points into density and attribute grids.\n\ It has an accompanying creation script that adds a default VOP subnetwork\n\ and UI parameters for cloud and velocity field modeling.\n\ \n\ @related\n\ - [Node:sop/cloud]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Rasterize_Points::updateParmsFlags() { bool changed = false; const bool refexists = getInput(1) != nullptr; changed |= enableParm("voxelsize", !refexists); changed |= enableParm("transformvdb", refexists); changed |= enableParm("cliptofrustum", refexists); changed |= enableParm("frustumquality", refexists); const bool maskexists = getInput(2) != nullptr; changed |= enableParm("maskvdb", maskexists); changed |= enableParm("invertmask", maskexists); changed |= enableParm("compositing", bool(evalInt("createdensity", 0, 0))); const bool createDensity = evalInt("createdensity", 0, 0) != 0; const bool transferAttributes = !evalStdString("attributes", 0).empty(); const bool enableVEX = createDensity || transferAttributes; changed |= enableParm("modeling", enableVEX); /*const bool proceduralModeling = evalInt("modeling", 0, 0) != 0 && enableVEX; for (int i = mInitialParmNum; i < this->getParmList()->getEntries(); ++i) { changed |= enableParm(i, proceduralModeling); }*/ return changed; } OP_Node* SOP_OpenVDB_Rasterize_Points::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Rasterize_Points(net, name, op); } SOP_OpenVDB_Rasterize_Points::SOP_OpenVDB_Rasterize_Points(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) , mCodeGenerator(this, new VOP_LanguageContextTypeList(VOP_LANGUAGE_VEX, VOP_CVEX_SHADER), 1, 1) , mInitialParmNum(this->getParmList()->getEntries()) { } OP_ERROR SOP_OpenVDB_Rasterize_Points::cookVDBSop(OP_Context& context) { try { hutil::ScopedInputLock lock(*this, context); gdp->clearAndDestroy(); UT_ErrorManager* log = UTgetErrorManager(); // Get UI parameters const fpreal time = context.getTime(); const fpreal samplesPerSec = OPgetDirector()->getChannelManager()->getSamplesPerSec(); const fpreal timeinc = samplesPerSec > 0.0f ? 1.0f / samplesPerSec : 0.0f; const fpreal frame = OPgetDirector()->getChannelManager()->getSample(time); const fpreal densityScale = evalFloat("densityscale", 0, time); const fpreal particleScale = evalFloat("particlescale", 0, time); const fpreal solidRatio = evalFloat("solidratio", 0, time); const exint compositing = evalInt("compositing", 0, time); float voxelSize = float(evalFloat("voxelsize", 0, time)); const bool clipToFrustum = evalInt("cliptofrustum", 0, time) == 1; const bool invertMask = evalInt("invertmask", 0, time) == 1; // Remap the frustum quality control from [1, 10] range to [0, 1] range. // (The [0, 1] range is perceived poorly in the UI. Base quality 0 looks bad.) const fpreal frustumQuality = (evalFloat("frustumquality", 0, time) - 1.0) / 9.0; const GU_Detail* pointsGeo = inputGeo(0); const GA_PointGroup* pointGroup = parsePointGroups( evalStdString("pointgroup", time).c_str(), GroupCreator(pointsGeo)); const GU_Detail* refGeo = inputGeo(1); const GA_PrimitiveGroup* refGroup = nullptr; if (refGeo) { refGroup = parsePrimitiveGroups( evalStdString("transformvdb", time).c_str(), GroupCreator(refGeo)); } const GU_Detail* maskGeo = inputGeo(2); const GA_PrimitiveGroup* maskGroup = nullptr; bool expectingVDBMask = false; if (maskGeo) { const auto groupStr = evalStdString("maskvdb", time); if (!groupStr.empty()) { expectingVDBMask = true; } maskGroup = parsePrimitiveGroups(groupStr.c_str(), GroupCreator(maskGeo)); } const bool exportPointMask = 0 != evalInt("exportpointmask", 0, time); const bool createDensity = 0 != evalInt("createdensity", 0, time); const bool applyVEX = evalInt("modeling", 0, time); const bool createVelocityAttribute = applyVEX && hasParm("process_velocity") && evalInt("process_velocity", 0, time) == 1; // Get selected point attribute names std::vector<std::string> scalarAttribNames; std::vector<std::string> vectorAttribNames; getAttributeNames(evalStdString("attributes", time), *pointsGeo, scalarAttribNames, vectorAttribNames, createVelocityAttribute, log); if (exportPointMask || createDensity || !scalarAttribNames.empty() || !vectorAttribNames.empty()) { hvdb::Interrupter boss("Rasterizing points"); // Set rasterization settings openvdb::math::Transform::Ptr xform = getReferenceTransform(refGeo, refGroup, log); if (xform) voxelSize = float(xform->voxelSize().x()); UT_SharedPtr<openvdb::BBoxd> maskBBox; hvdb::GridCPtr maskGrid = getMaskVDB(maskGeo, maskGroup); if (!maskGrid) { if (expectingVDBMask) { addWarning(SOP_MESSAGE, "VDB mask not found."); } else { maskBBox = getMaskGeoBBox(maskGeo); } } RasterizationSettings settings(*pointsGeo, pointGroup, boss); settings.createDensity = createDensity; settings.exportPointMask = exportPointMask; settings.densityScale = float(densityScale); settings.particleScale = float(particleScale); settings.solidRatio = float(solidRatio); settings.transform = xform ? xform : openvdb::math::Transform::createLinearTransform(double(voxelSize)); settings.vectorAttributeNames.swap(vectorAttribNames); settings.scalarAttributeNames.swap(scalarAttribNames); settings.treatment = compositing == 0 ? RasterizePoints::ACCUMULATE : RasterizePoints::MAXIMUM; settings.setFrustumQuality(float(frustumQuality)); settings.clipToFrustum = clipToFrustum; settings.maskBBox = maskBBox; settings.maskGrid = maskGrid; settings.invertMask = invertMask; // Setup VEX context OP_Caller caller(this, context.getContextOptionsStack(), context.getContextOptions()); UT_SharedPtr<VEXContext> vexContextPtr; if (applyVEX) { UT_String shoppath = "", script = "op:"; getFullPath(shoppath); script += shoppath; buildVexCommand(script, getSpareParmTemplates(), time); vexContextPtr.reset(new VEXContext(caller, script, PointIndexGridCollection::BoolTreeType::LeafNodeType::SIZE)); vexContextPtr->setTime(time, timeinc, frame); vexContextPtr->setInput(0, pointsGeo); } settings.vexContext = vexContextPtr.get(); // Rasterize attributes and export VDB grids std::vector<openvdb::GridBase::Ptr> outputGrids; rasterize(settings, outputGrids); if (vexContextPtr && vexContextPtr->isTimeDependant()) { OP_Node::flags().setTimeDep(true); } for (size_t n = 0, N = outputGrids.size(); n < N && !boss.wasInterrupted(); ++n) { hvdb::createVdbPrimitive(*gdp, outputGrids[n]); } } else { addWarning(SOP_MESSAGE, "No output selected"); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
118,964
C++
34.186335
100
0.600442
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Sample_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Sample_Points.cc /// /// @author FX R&D OpenVDB team /// /// @brief Samples OpenVDB grid values as attributes on spatially located particles. /// Currently the grid values can be scalar (float, double) or vec3 (float, double) /// but the attributes on the particles are single precision scalar or vec3 #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/Interpolation.h> // for box sampler #include <openvdb/points/PointCount.h> #include <openvdb/points/PointSample.h> #include <openvdb/points/IndexFilter.h> // for MultiGroupFilter #include <tbb/tick_count.h> // for timing #include <tbb/task.h> // for cancel #include <UT/UT_Interrupt.h> #include <GA/GA_PageHandle.h> #include <GA/GA_PageIterator.h> #include <hboost/algorithm/string/join.hpp> #include <algorithm> #include <iostream> #include <map> #include <memory> #include <set> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace cvdb = openvdb; class SOP_OpenVDB_Sample_Points: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Sample_Points(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Sample_Points() override {} static OP_Node* factory(OP_Network*, const char*, OP_Operator*); // The VDB port holds read-only VDBs. int isRefInput(unsigned input) const override { return (input == 1); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("A subset of the input VDBs to sample") .setDocumentation("A subset of the input VDBs to sample" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroups", "VDB Points Groups") .setTooltip("Subsets of VDB points to sample onto") .setDocumentation( "Subsets of VDB points to sample onto\n\n" "See [Node:sop/vdbpointsgroup] for details on grouping VDB points.\n\n" "This parameter has no effect if there are no input VDB point data primitives.") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "renamevel", "Rename Vel to V") .setDefault(PRMzeroDefaults) .setDocumentation("If an input VDB's name is \"`vel`\", name the point attribute \"`v`\".") .setTooltip("If an input VDB's name is \"vel\", name the point attribute \"v\".")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "attributeexists", "Report Existing Attributes") .setDefault(PRMzeroDefaults) .setTooltip("Display a warning if a point attribute being sampled into already exists.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setDefault(PRMzeroDefaults) .setTooltip("Print the sequence of operations to the terminal.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "threaded", "Multi-threading")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "Separator")); // Register the SOP hvdb::OpenVDBOpFactory("VDB Sample Points", SOP_OpenVDB_Sample_Points::factory, parms, *table) .setNativeName("") .setObsoleteParms(obsoleteParms) .addInput("Points") .addInput("VDBs") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Sample_Points::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Sample VDB voxel values onto points.\"\"\"\n\ \n\ @overview\n\ \n\ This node samples VDB voxel values into point attributes, where the points\n\ may be either standard Houdini points or points stored in VDB point data grids.\n\ Currently, the voxel values can be single- or double-precision scalars or vectors,\n\ but the attributes on the points will be single-precision only.\n\ \n\ Point attributes are given the same names as the VDBs from which they are sampled.\n\ \n\ @related\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [Node:sop/vdbfromparticles]\n\ - [Node:sop/convertvdbpoints]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Sample_Points::factory(OP_Network* net, const char* name, OP_Operator *op) { return new SOP_OpenVDB_Sample_Points(net, name, op); } SOP_OpenVDB_Sample_Points::SOP_OpenVDB_Sample_Points( OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { using StringSet = std::set<std::string>; using StringVec = std::vector<std::string>; using AttrNameMap = std::map<std::string /*gridName*/, StringSet /*attrNames*/>; using PointGridPtrVec = std::vector<openvdb::points::PointDataGrid::Ptr>; struct VDBPointsSampler { VDBPointsSampler(PointGridPtrVec& points, const StringVec& includeGroups, const StringVec& excludeGroups, AttrNameMap& existingAttrs) : mPointGrids(points) , mIncludeGroups(includeGroups) , mExcludeGroups(excludeGroups) , mExistingAttrs(existingAttrs) {} template <typename GridType> inline void pointSample(const hvdb::Grid& sourceGrid, const std::string& attributeName, hvdb::Interrupter* interrupter) { warnOnExisting(attributeName); const GridType& grid = UTvdbGridCast<GridType>(sourceGrid); for (auto& pointGrid : mPointGrids) { auto leaf = pointGrid->tree().cbeginLeaf(); if (!leaf) continue; cvdb::points::MultiGroupFilter filter( mIncludeGroups, mExcludeGroups, leaf->attributeSet()); cvdb::points::pointSample(*pointGrid, grid, attributeName, filter, interrupter); } } template <typename GridType> inline void boxSample(const hvdb::Grid& sourceGrid, const std::string& attributeName, hvdb::Interrupter* interrupter) { warnOnExisting(attributeName); const GridType& grid = UTvdbGridCast<GridType>(sourceGrid); for (auto& pointGrid : mPointGrids) { auto leaf = pointGrid->tree().cbeginLeaf(); if (!leaf) continue; cvdb::points::MultiGroupFilter filter( mIncludeGroups, mExcludeGroups, leaf->attributeSet()); cvdb::points::boxSample(*pointGrid, grid, attributeName, filter, interrupter); } } private: inline void warnOnExisting(const std::string& attributeName) const { for (const auto& pointGrid : mPointGrids) { assert(pointGrid); const auto leaf = pointGrid->tree().cbeginLeaf(); if (!leaf) continue; if (leaf->hasAttribute(attributeName)) { mExistingAttrs[pointGrid->getName()].insert(attributeName); } } } const PointGridPtrVec& mPointGrids; const StringVec& mIncludeGroups; const StringVec& mExcludeGroups; AttrNameMap& mExistingAttrs; }; template <bool staggered = false> struct BoxSampler { template <class Accessor> static bool sample(const Accessor& in, const cvdb::Vec3R& inCoord, typename Accessor::ValueType& result) { return cvdb::tools::BoxSampler::sample<Accessor>(in, inCoord, result); } }; template<> struct BoxSampler<true> { template <class Accessor> static bool sample(const Accessor& in, const cvdb::Vec3R& inCoord, typename Accessor::ValueType& result) { return cvdb::tools::StaggeredBoxSampler::sample<Accessor>(in, inCoord, result); } }; template <bool staggered = false> struct NearestNeighborSampler { template <class Accessor> static bool sample(const Accessor& in, const cvdb::Vec3R& inCoord, typename Accessor::ValueType& result) { return cvdb::tools::PointSampler::sample<Accessor>(in, inCoord, result); } }; template<> struct NearestNeighborSampler<true> { template <class Accessor> static bool sample(const Accessor& in, const cvdb::Vec3R& inCoord, typename Accessor::ValueType& result) { return cvdb::tools::StaggeredPointSampler::sample<Accessor>(in, inCoord, result); } }; template< typename GridType, typename GA_RWPageHandleType, bool staggered = false, bool NearestNeighbor = false> class PointSampler { public: using Accessor = typename GridType::ConstAccessor; // constructor. from grid and GU_Detail* PointSampler(const hvdb::Grid& grid, const bool threaded, GU_Detail* gdp, GA_RWAttributeRef& handle, hvdb::Interrupter* interrupter): mGrid(grid), mThreaded(threaded), mGdp(gdp), mAttribPageHandle(handle.getAttribute()), mInterrupter(interrupter) { } // constructor. from other PointSampler(const PointSampler<GridType, GA_RWPageHandleType, staggered>& other): mGrid(other.mGrid), mThreaded(other.mThreaded), mGdp(other.mGdp), mAttribPageHandle(other.mAttribPageHandle), mInterrupter(other.mInterrupter) { } void sample() { mInterrupter->start(); if (mThreaded) { // multi-threaded UTparallelFor(GA_SplittableRange(mGdp->getPointRange()), *this); } else { // single-threaded (*this)(GA_SplittableRange(mGdp->getPointRange())); } mInterrupter->end(); } // only the supported versions don't throw void operator() (const GA_SplittableRange& range) const { if (mInterrupter->wasInterrupted()) { tbb::task::self().cancel_group_execution(); } const GridType& grid = UTvdbGridCast<GridType>(mGrid); // task local grid accessor Accessor accessor = grid.getAccessor(); // sample scalar data onto points typename GridType::ValueType value; cvdb::Vec3R point; GA_ROPageHandleV3 p_ph(mGdp->getP()); GA_RWPageHandleType v_ph = mAttribPageHandle; if(!v_ph.isValid()) { throw std::runtime_error("new attribute not valid"); } // iterate over pages in the range for (GA_PageIterator pit = range.beginPages(); !pit.atEnd(); ++pit) { GA_Offset start; GA_Offset end; // per-page setup p_ph.setPage(*pit); v_ph.setPage(*pit); // iterate over elements in the page for (GA_Iterator it(pit.begin()); it.blockAdvance(start, end); ) { for (GA_Offset offset = start; offset < end; ++offset ) { // get the pos. UT_Vector3 pos = p_ph.get(offset); // find the interpolated value point = mGrid.worldToIndex(cvdb::Vec3R(pos[0], pos[1], pos[2])); if (NearestNeighbor) { NearestNeighborSampler<staggered>::template sample<Accessor>( accessor, point, value); } else { BoxSampler<staggered>::template sample<Accessor>(accessor, point, value); } // set the value v_ph.value(offset) = translateValue(value); } } } } template<typename T> inline static float translateValue(const T& vdb_value) { return static_cast<float>(vdb_value); } inline static UT_Vector3 translateValue(cvdb::Vec3f& vdb_value) { return UT_Vector3(vdb_value[0], vdb_value[1], vdb_value[2]); } inline static UT_Vector3 translateValue(cvdb::Vec3d& vdb_value) { return UT_Vector3( static_cast<float>(vdb_value[0]), static_cast<float>(vdb_value[1]), static_cast<float>(vdb_value[2])); } private: // member data const hvdb::Grid& mGrid; bool mThreaded; GU_Detail* mGdp; GA_RWPageHandleType mAttribPageHandle; hvdb::Interrupter* mInterrupter; }; // class PointSampler } // anonymous namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Sample_Points::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); GU_Detail* aGdp = gdp; // where the points live const GU_Detail* bGdp = inputGeo(1, context); // where the grids live // extract UI data const bool verbose = evalInt("verbose", 0, time) != 0; const bool threaded = true; /*evalInt("threaded", 0, time);*/ // total number of points in vdb grids - this is used when verbose execution is // requested, but will otherwise remain 0 size_t nVDBPoints = 0; StringVec includeGroups, excludeGroups; UT_String vdbPointsGroups; // obtain names of vdb point groups to include / exclude evalString(vdbPointsGroups, "vdbpointsgroups", 0, time); cvdb::points::AttributeSet::Descriptor::parseNames(includeGroups, excludeGroups, vdbPointsGroups.toStdString()); // extract VDB points grids PointGridPtrVec pointGrids; for (openvdb_houdini::VdbPrimIterator it(gdp); it; ++it) { GU_PrimVDB* vdb = *it; if (!vdb || !vdb->getConstGridPtr()->isType<cvdb::points::PointDataGrid>()) continue; vdb->makeGridUnique(); cvdb::GridBase::Ptr grid = vdb->getGridPtr(); cvdb::points::PointDataGrid::Ptr pointDataGrid = cvdb::gridPtrCast<cvdb::points::PointDataGrid>(grid); if (verbose) { if (auto leaf = pointDataGrid->tree().cbeginLeaf()) { cvdb::points::MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); nVDBPoints += cvdb::points::pointCount<cvdb::points::PointDataTree, cvdb::points::MultiGroupFilter>(pointDataGrid->tree(), filter); } } pointGrids.emplace_back(pointDataGrid); } const GA_Size nPoints = aGdp->getNumPoints(); // sanity checks - warn if there are no points on first input port. Note that // each VDB primitive should have a single point associated with it so that we could // theoretically only check if nPoints == 0, but we explictly check for 0 pointGrids // for the sake of clarity if (nPoints == 0 && pointGrids.empty()) { const std::string msg = "Input 1 contains no points."; addWarning(SOP_MESSAGE, msg.c_str()); if (verbose) std::cout << msg << std::endl; } // Get the group of grids to process const GA_PrimitiveGroup* group = matchGroup(*bGdp, evalStdString("group", time)); // These lists are used to keep track of names of already-existing point attributes. StringSet existingPointAttrs; AttrNameMap existingVdbPointAttrs; VDBPointsSampler vdbPointsSampler(pointGrids, includeGroups, excludeGroups, existingVdbPointAttrs); // scratch variables used in the loop GA_Defaults defaultFloat(0.0), defaultInt(0); int numScalarGrids = 0; int numVectorGrids = 0; int numUnnamedGrids = 0; // start time tbb::tick_count time_start = tbb::tick_count::now(); UT_AutoInterrupt progress("Sampling from VDB grids"); for (hvdb::VdbPrimCIterator it(bGdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("was interrupted"); } const GU_PrimVDB* vdb = *it; UT_VDBType gridType = vdb->getStorageType(); const hvdb::Grid& grid = vdb->getGrid(); std::string gridName = it.getPrimitiveName().toStdString(); if (gridName.empty()) { std::stringstream ss; ss << "VDB_" << numUnnamedGrids++; gridName = ss.str(); } // remove any dot "." characters, attribute names can't contain this. std::replace(gridName.begin(), gridName.end(), '.', '_'); std::string attributeName; if (gridName == "vel" && evalInt("renamevel", 0, time)) { attributeName = "v"; } else { attributeName = gridName; } //convert gridName to uppercase so we can use it as a local variable name std::string attributeVariableName = attributeName; std::transform(attributeVariableName.begin(), attributeVariableName.end(), attributeVariableName.begin(), ::toupper); if (gridType == UT_VDB_FLOAT || gridType == UT_VDB_DOUBLE) { // a grid that holds a scalar field (as either float or double type) // count numScalarGrids++; //find or create float attribute GA_RWAttributeRef attribHandle = aGdp->findFloatTuple(GA_ATTRIB_POINT, attributeName.c_str(), 1); if (!attribHandle.isValid()) { attribHandle = aGdp->addFloatTuple( GA_ATTRIB_POINT, attributeName.c_str(), 1, defaultFloat); } else { existingPointAttrs.insert(attributeName); } aGdp->addVariableName(attributeName.c_str(), attributeVariableName.c_str()); // user feedback if (verbose) { std::cout << "Sampling grid " << gridName << " of type " << grid.valueType() << std::endl; } hvdb::Interrupter scalarInterrupt("Sampling from VDB floating-type grids"); // do the sampling if (gridType == UT_VDB_FLOAT) { // float scalar PointSampler<cvdb::FloatGrid, GA_RWPageHandleF> theSampler( grid, threaded, aGdp, attribHandle, &scalarInterrupt); theSampler.sample(); vdbPointsSampler.boxSample<cvdb::FloatGrid>( grid, attributeName, &scalarInterrupt); } else { // double scalar PointSampler<cvdb::DoubleGrid, GA_RWPageHandleF> theSampler( grid, threaded, aGdp, attribHandle, &scalarInterrupt); theSampler.sample(); vdbPointsSampler.boxSample<cvdb::DoubleGrid>( grid, attributeName, &scalarInterrupt); } } else if (gridType == UT_VDB_INT32 || gridType == UT_VDB_INT64) { numScalarGrids++; //find or create integer attribute GA_RWAttributeRef attribHandle = aGdp->findIntTuple(GA_ATTRIB_POINT, attributeName.c_str(), 1); if (!attribHandle.isValid()) { attribHandle = aGdp->addIntTuple(GA_ATTRIB_POINT, attributeName.c_str(), 1, defaultInt); } else { existingPointAttrs.insert(attributeName); } aGdp->addVariableName(attributeName.c_str(), attributeVariableName.c_str()); // user feedback if (verbose) { std::cout << "Sampling grid " << gridName << " of type " << grid.valueType() << std::endl; } hvdb::Interrupter scalarInterrupt("Sampling from VDB integer-type grids"); if (gridType == UT_VDB_INT32) { PointSampler<cvdb::Int32Grid, GA_RWPageHandleF, false, true> theSampler(grid, threaded, aGdp, attribHandle, &scalarInterrupt); theSampler.sample(); vdbPointsSampler.pointSample<cvdb::Int32Grid>( grid, attributeName, &scalarInterrupt); } else { PointSampler<cvdb::Int64Grid, GA_RWPageHandleF, false, true> theSampler(grid, threaded, aGdp, attribHandle, &scalarInterrupt); theSampler.sample(); vdbPointsSampler.pointSample<cvdb::Int64Grid>( grid, attributeName, &scalarInterrupt); } } else if (gridType == UT_VDB_VEC3F || gridType == UT_VDB_VEC3D) { // a grid that holds Vec3 data (as either float or double) // count numVectorGrids++; // find or create create vector attribute GA_RWAttributeRef attribHandle = aGdp->findFloatTuple(GA_ATTRIB_POINT, attributeName.c_str(), 3); if (!attribHandle.isValid()) { attribHandle = aGdp->addFloatTuple( GA_ATTRIB_POINT, attributeName.c_str(), 3, defaultFloat); } else { existingPointAttrs.insert(attributeName); } aGdp->addVariableName(attributeName.c_str(), attributeVariableName.c_str()); std::unique_ptr<hvdb::Interrupter> interrupter; // user feedback if (grid.getGridClass() != cvdb::GRID_STAGGERED) { // regular (non-staggered) vec3 grid if (verbose) { std::cout << "Sampling grid " << gridName << " of type " << grid.valueType() << std::endl; } interrupter.reset(new hvdb::Interrupter("Sampling from VDB vector-type grids")); // do the sampling if (gridType == UT_VDB_VEC3F) { // Vec3f PointSampler<cvdb::Vec3fGrid, GA_RWPageHandleV3> theSampler( grid, threaded, aGdp, attribHandle, interrupter.get()); theSampler.sample(); } else { // Vec3d PointSampler<cvdb::Vec3dGrid, GA_RWPageHandleV3> theSampler( grid, threaded, aGdp, attribHandle, interrupter.get()); theSampler.sample(); } } else { // staggered grid case if (verbose) { std::cout << "Sampling staggered grid " << gridName << " of type " << grid.valueType() << std::endl; } interrupter.reset(new hvdb::Interrupter( "Sampling from VDB vector-type staggered grids")); // do the sampling if (grid.isType<cvdb::Vec3fGrid>()) { // Vec3f PointSampler<cvdb::Vec3fGrid, GA_RWPageHandleV3, true> theSampler( grid, threaded, aGdp, attribHandle, interrupter.get()); theSampler.sample(); } else { // Vec3d PointSampler<cvdb::Vec3dGrid, GA_RWPageHandleV3, true> theSampler( grid, threaded, aGdp, attribHandle, interrupter.get()); theSampler.sample(); } } // staggered vector sampling is handled within the core library for vdb points if (gridType == UT_VDB_VEC3F) { vdbPointsSampler.boxSample<cvdb::Vec3fGrid>( grid, attributeName, interrupter.get()); } else { vdbPointsSampler.boxSample<cvdb::Vec3dGrid>( grid, attributeName, interrupter.get()); } } else { addWarning(SOP_MESSAGE, ("Skipped VDB \"" + gridName + "\" of unsupported type " + grid.valueType()).c_str()); } }//end iter if (0 != evalInt("attributeexists", 0, time)) { // Report existing Houdini point attributes. existingPointAttrs.erase(""); if (existingPointAttrs.size() == 1) { addWarning(SOP_MESSAGE, ("Point attribute \"" + *existingPointAttrs.begin() + "\" already exists").c_str()); } else if (!existingPointAttrs.empty()) { const StringVec attrNames(existingPointAttrs.begin(), existingPointAttrs.end()); const std::string s = "These point attributes already exist: " + hboost::algorithm::join(attrNames, ", "); addWarning(SOP_MESSAGE, s.c_str()); } // Report existing VDB Points attributes and the grids in which they appear. for (auto& attrs: existingVdbPointAttrs) { auto& attrSet = attrs.second; attrSet.erase(""); if (attrSet.size() == 1) { addWarning(SOP_MESSAGE, ("Attribute \"" + *attrSet.begin() + "\" already exists in VDB point data grid \"" + attrs.first + "\".").c_str()); } else if (!attrSet.empty()) { const StringVec attrNames(attrSet.begin(), attrSet.end()); const std::string s = "These attributes already exist in VDB point data grid \"" + attrs.first + "\": " + hboost::algorithm::join(attrNames, ", "); addWarning(SOP_MESSAGE, s.c_str()); } } } // timing: end time tbb::tick_count time_end = tbb::tick_count::now(); if (verbose) { std::cout << "Sampling " << nPoints + nVDBPoints << " points in " << numVectorGrids << " vector grid" << (numVectorGrids == 1 ? "" : "s") << " and " << numScalarGrids << " scalar grid" << (numScalarGrids == 1 ? "" : "s") << " took " << (time_end - time_start).seconds() << " seconds\n " << (threaded ? "threaded" : "non-threaded") << std::endl; } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
27,270
C++
36.511692
100
0.56729
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Ray.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Ray.cc /// /// @author FX R&D OpenVDB team /// /// @brief Performs geometry projections using level set ray intersections or closest point queries. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/tools/RayIntersector.h> #include <openvdb/tools/VolumeToSpheres.h> // for ClosestSurfacePoint #include <GA/GA_PageHandle.h> #include <GA/GA_SplittableRange.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <hboost/algorithm/string/join.hpp> #include <limits> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_Ray: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Ray(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Ray() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to process.") .setDocumentation( "A subset of VDBs to process (see [specifying volumes|/model/volumes#group])")); // Method parms.add(hutil::ParmFactory(PRM_ORD, "method", "Method") .setDefault(PRMzeroDefaults) .setTooltip("Projection method") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "rayintersection", "Ray Intersection", "closestpoint", "Closest Surface Point" })); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "The voxel value that defines the surface\n\n" "Zero works for signed distance fields, while fog volumes require" " a larger positive value (0.5 is a good initial guess).")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "dotrans", "Transform") .setDefault(PRMoneDefaults) .setTooltip("If enabled, transform the intersected geometry.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "scale", "Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, 0, PRM_RANGE_UI, 1) .setTooltip("Specify the amount by which to scale the intersected geometry.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "putdist", "Store Distances") .setTooltip( "Create a point attribute giving the distance to the" " collision surface or to the closest surface point.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "lookfar", "Intersect Farthest Surface") .setTooltip("Use the farthest intersection point instead of the closest.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "reverserays", "Reverse Rays") .setTooltip("Make rays fire in the direction opposite to the normals.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "bias", "Bias") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, 0, PRM_RANGE_UI, 1) .setTooltip("Offset the starting position of the rays.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "creategroup", "Create Ray Hit Group") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, create a point group to hold all successful intersections")); parms.add(hutil::ParmFactory(PRM_STRING, "hitgrp", "Ray Hit Group") .setDefault("rayHitGroup") .setTooltip("Point group name")); ////////// hvdb::OpenVDBOpFactory("VDB Ray", SOP_OpenVDB_Ray::factory, parms, *table) .addInput("points") .addInput("level set grids") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Ray::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Project geometry onto a level set VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node performs geometry projections using level set ray intersections\n\ or closest point queries.\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Ray::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Ray(net, name, op); } SOP_OpenVDB_Ray::SOP_OpenVDB_Ray(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } bool SOP_OpenVDB_Ray::updateParmsFlags() { bool changed = false; bool rayintersection = evalInt("method", 0, 0) == 0; changed |= enableParm("isovalue", !rayintersection); changed |= enableParm("lookfar", rayintersection); changed |= enableParm("reverserays", rayintersection); changed |= enableParm("creategroup", rayintersection); changed |= enableParm("bias", rayintersection); changed |= enableParm("scale", bool(evalInt("dotrans", 0, 0))); bool creategroup = evalInt("creategroup", 0, 0); changed |= enableParm("hitgrp", creategroup && rayintersection); return changed; } //////////////////////////////////////// template<typename GridType> class IntersectPoints { public: IntersectPoints( const GU_Detail& gdp, const UT_Vector3Array& pointNormals, const GridType& grid, UT_Vector3Array& positions, UT_FloatArray& distances, std::vector<char>& intersections, bool keepMaxDist = false, bool reverseRays = false, double scale = 1.0, double bias = 0.0) : mGdp(gdp) , mPointNormals(pointNormals) , mIntersector(grid) , mPositions(positions) , mDistances(distances) , mIntersections(intersections) , mKeepMaxDist(keepMaxDist) , mReverseRays(reverseRays) , mScale(scale) , mBias(bias) { } void operator()(const GA_SplittableRange &range) const { GA_Offset start, end; GA_Index pointIndex; using RayT = openvdb::math::Ray<double>; openvdb::Vec3d eye, dir, intersection; const bool doScaling = !openvdb::math::isApproxEqual(mScale, 1.0); const bool offsetRay = !openvdb::math::isApproxEqual(mBias, 0.0); GA_ROPageHandleV3 points(mGdp.getP()); // Iterate over blocks for (GA_Iterator it(range); it.blockAdvance(start, end); ) { points.setPage(start); // Point Offsets for (GA_Offset pointOffset = start; pointOffset < end; ++pointOffset) { const UT_Vector3& pos = points.value(pointOffset); eye[0] = double(pos.x()); eye[1] = double(pos.y()); eye[2] = double(pos.z()); pointIndex = mGdp.pointIndex(pointOffset); const UT_Vector3& normal = mPointNormals(pointIndex); dir[0] = double(normal.x()); dir[1] = double(normal.y()); dir[2] = double(normal.z()); if (mReverseRays) dir = -dir; RayT ray((offsetRay ? (eye + dir * mBias) : eye), dir); if (!mIntersector.intersectsWS(ray, intersection)) { if (!mIntersections[pointIndex]) mPositions(pointIndex) = pos; continue; } float distance = float((intersection - eye).length()); if ((!mKeepMaxDist && mDistances(pointIndex) > distance) || (mKeepMaxDist && mDistances(pointIndex) < distance)) { mDistances(pointIndex) = distance; UT_Vector3& position = mPositions(pointIndex); if (doScaling) intersection = eye + dir * mScale * double(distance); position.x() = float(intersection[0]); position.y() = float(intersection[1]); position.z() = float(intersection[2]); } mIntersections[pointIndex] = 1; } } } private: const GU_Detail& mGdp; const UT_Vector3Array& mPointNormals; openvdb::tools::LevelSetRayIntersector<GridType> mIntersector; UT_Vector3Array& mPositions; UT_FloatArray& mDistances; std::vector<char>& mIntersections; const bool mKeepMaxDist, mReverseRays; const double mScale, mBias; }; template<typename GridT, typename InterrupterT> inline void closestPoints(const GridT& grid, float isovalue, const GU_Detail& gdp, UT_FloatArray& distances, UT_Vector3Array* positions, InterrupterT& boss) { std::vector<openvdb::Vec3R> tmpPoints(distances.entries()); GA_ROHandleV3 points = GA_ROHandleV3(gdp.getP()); for (size_t n = 0, N = tmpPoints.size(); n < N; ++n) { const UT_Vector3 pos = points.get(gdp.pointOffset(n)); tmpPoints[n][0] = pos.x(); tmpPoints[n][1] = pos.y(); tmpPoints[n][2] = pos.z(); } std::vector<float> tmpDistances; const bool transformPoints = (positions != nullptr); auto closestPoint = openvdb::tools::ClosestSurfacePoint<GridT>::create(grid, isovalue, &boss); if (!closestPoint) return; if (transformPoints) closestPoint->searchAndReplace(tmpPoints, tmpDistances); else closestPoint->search(tmpPoints, tmpDistances); for (size_t n = 0, N = tmpDistances.size(); n < N; ++n) { if (tmpDistances[n] < distances(n)) { distances(n) = tmpDistances[n]; if (transformPoints) { UT_Vector3& pos = (*positions)(n); pos.x() = float(tmpPoints[n].x()); pos.y() = float(tmpPoints[n].y()); pos.z() = float(tmpPoints[n].z()); } } } } class ScalePositions { public: ScalePositions( const GU_Detail& gdp, UT_Vector3Array& positions, UT_FloatArray& distances, double scale = 1.0) : mGdp(gdp) , mPositions(positions) , mDistances(distances) , mScale(scale) { } void operator()(const GA_SplittableRange &range) const { GA_Offset start, end; GA_Index pointIndex; UT_Vector3 dir; GA_ROPageHandleV3 points(mGdp.getP()); // Iterate over blocks for (GA_Iterator it(range); it.blockAdvance(start, end); ) { points.setPage(start); // Point Offsets for (GA_Offset pointOffset = start; pointOffset < end; ++pointOffset) { pointIndex = mGdp.pointIndex(pointOffset); const UT_Vector3& point = points.value(pointOffset); UT_Vector3& pos = mPositions(pointIndex); dir = pos - point; dir.normalize(); pos = point + dir * mDistances(pointIndex) * mScale; } } } private: const GU_Detail& mGdp; UT_Vector3Array& mPositions; UT_FloatArray& mDistances; const double mScale; }; //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Ray::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Computing VDB ray intersections"); const GU_Detail* vdbGeo = inputGeo(1); if (vdbGeo == nullptr) return error(); // Get the group of grids to surface. const GA_PrimitiveGroup* group = matchGroup(*vdbGeo, evalStdString("group", time)); hvdb::VdbPrimCIterator vdbIt(vdbGeo, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDB grids found."); return error(); } // Eval attributes const bool keepMaxDist = bool(evalInt("lookfar", 0, time)); const bool reverseRays = bool(evalInt("reverserays", 0, time)); const bool rayIntersection = evalInt("method", 0, time) == 0; const double scale = double(evalFloat("scale", 0, time)); const double bias = double(evalFloat("bias", 0, time)); const float isovalue = float(evalFloat("isovalue", 0, time)); UT_Vector3Array pointNormals; GA_ROAttributeRef attributeRef = gdp->findPointAttribute("N"); if (attributeRef.isValid()) { gdp->getAttributeAsArray( attributeRef.getAttribute(), gdp->getPointRange(), pointNormals); } else { gdp->normal(pointNormals, /*use_internaln=*/false); } const size_t numPoints = gdp->getNumPoints(); UT_Vector3Array positions(numPoints); std::vector<char> intersections(numPoints); const double limit = std::numeric_limits<double>::max(); UT_FloatArray distances; distances.appendMultiple( float((keepMaxDist && rayIntersection) ? -limit : limit), numPoints); std::vector<std::string> skippedGrids; for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; if (vdbIt->getGrid().getGridClass() == openvdb::GRID_LEVEL_SET && vdbIt->getGrid().type() == openvdb::FloatGrid::gridType()) { openvdb::FloatGrid::ConstPtr gridPtr = openvdb::gridConstPtrCast<openvdb::FloatGrid>(vdbIt->getGridPtr()); if (rayIntersection) { IntersectPoints<openvdb::FloatGrid> op( *gdp, pointNormals, *gridPtr, positions, distances, intersections, keepMaxDist, reverseRays, scale, bias); UTparallelFor(GA_SplittableRange(gdp->getPointRange()), op); } else { closestPoints(*gridPtr, isovalue, *gdp, distances, &positions, boss); } } else { skippedGrids.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } } if (bool(evalInt("dotrans", 0, time))) { // update point positions if (!rayIntersection && !openvdb::math::isApproxEqual(scale, 1.0)) { UTparallelFor(GA_SplittableRange(gdp->getPointRange()), ScalePositions(*gdp, positions, distances, scale)); } gdp->setPos3FromArray(gdp->getPointRange(), positions); } if (bool(evalInt("putdist", 0, time))) { // add distance attribute GA_RWAttributeRef aRef = gdp->findPointAttribute("dist"); if (!aRef.isValid()) { aRef = gdp->addFloatTuple(GA_ATTRIB_POINT, "dist", 1, GA_Defaults(0.0)); } gdp->setAttributeFromArray(aRef.getAttribute(), gdp->getPointRange(), distances); } if (rayIntersection && bool(evalInt("creategroup", 0, time))) { // group intersecting points const auto groupStr = evalStdString("hitgrp", time); if (!groupStr.empty()) { GA_PointGroup *pointGroup = gdp->findPointGroup(groupStr.c_str()); if (!pointGroup) pointGroup = gdp->newPointGroup(groupStr.c_str()); for (size_t n = 0; n < numPoints; ++n) { if (intersections[n]) pointGroup->addIndex(n); } } } if (!skippedGrids.empty()) { std::string s = "Only level set grids are supported, the following " "were skipped: '" + hboost::algorithm::join(skippedGrids, ", ") + "'."; addWarning(SOP_MESSAGE, s.c_str()); } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
16,126
C++
30.436647
100
0.595932
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/AttributeTransferUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file AttributeTransferUtil.h /// @author FX R&D Simulation team /// @brief Utility methods used by the From/To Polygons and From Particles SOPs #ifndef OPENVDB_HOUDINI_ATTRIBUTE_TRANSFER_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_ATTRIBUTE_TRANSFER_UTIL_HAS_BEEN_INCLUDED #include "Utils.h" #include <openvdb/openvdb.h> #include <openvdb/math/Proximity.h> #include <openvdb/util/Util.h> #include <GA/GA_PageIterator.h> #include <GA/GA_SplittableRange.h> #include <GEO/GEO_PrimPolySoup.h> #include <SYS/SYS_Types.h> #include <algorithm> // for std::sort() #include <cmath> // for std::floor() #include <limits> #include <memory> #include <set> #include <sstream> #include <string> #include <type_traits> #include <vector> namespace openvdb_houdini { //////////////////////////////////////// /// Get OpenVDB specific value, by calling GA_AIFTuple::get() /// with appropriate arguments. template <typename ValueType> inline ValueType evalAttr(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int idx) { fpreal64 value; aif->get(atr, off, value, idx); return ValueType(value); } template <> inline float evalAttr<float>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int idx) { fpreal32 value; aif->get(atr, off, value, idx); return float(value); } template <> inline openvdb::Int32 evalAttr<openvdb::Int32>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int idx) { int32 value; aif->get(atr, off, value, idx); return openvdb::Int32(value); } template <> inline openvdb::Int64 evalAttr<openvdb::Int64>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int idx) { int64 value; aif->get(atr, off, value, idx); return openvdb::Int64(value); } template <> inline openvdb::Vec3i evalAttr<openvdb::Vec3i>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int) { openvdb::Vec3i vec; int32 comp; aif->get(atr, off, comp, 0); vec[0] = openvdb::Int32(comp); aif->get(atr, off, comp, 1); vec[1] = openvdb::Int32(comp); aif->get(atr, off, comp, 2); vec[2] = openvdb::Int32(comp); return vec; } template <> inline openvdb::Vec3s evalAttr<openvdb::Vec3s>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int) { openvdb::Vec3s vec; fpreal32 comp; aif->get(atr, off, comp, 0); vec[0] = float(comp); aif->get(atr, off, comp, 1); vec[1] = float(comp); aif->get(atr, off, comp, 2); vec[2] = float(comp); return vec; } template <> inline openvdb::Vec3d evalAttr<openvdb::Vec3d>(const GA_Attribute* atr, const GA_AIFTuple* aif, GA_Offset off, int) { openvdb::Vec3d vec; fpreal64 comp; aif->get(atr, off, comp, 0); vec[0] = double(comp); aif->get(atr, off, comp, 1); vec[1] = double(comp); aif->get(atr, off, comp, 2); vec[2] = double(comp); return vec; } //////////////////////////////////////// /// Combine different value types. template <typename ValueType> inline ValueType combine(const ValueType& v0, const ValueType& v1, const ValueType& v2, const openvdb::Vec3d& w) { return ValueType(v0 * w[0] + v1 * w[1] + v2 * w[2]); } template <> inline openvdb::Int32 combine(const openvdb::Int32& v0, const openvdb::Int32& v1, const openvdb::Int32& v2, const openvdb::Vec3d& w) { if (w[2] > w[0] && w[2] > w[1]) return v2; if (w[1] > w[0] && w[1] > w[2]) return v1; return v0; } template <> inline openvdb::Int64 combine(const openvdb::Int64& v0, const openvdb::Int64& v1, const openvdb::Int64& v2, const openvdb::Vec3d& w) { if (w[2] > w[0] && w[2] > w[1]) return v2; if (w[1] > w[0] && w[1] > w[2]) return v1; return v0; } template <> inline openvdb::Vec3i combine(const openvdb::Vec3i& v0, const openvdb::Vec3i& v1, const openvdb::Vec3i& v2, const openvdb::Vec3d& w) { if (w[2] > w[0] && w[2] > w[1]) return v2; if (w[1] > w[0] && w[1] > w[2]) return v1; return v0; } template <> inline openvdb::Vec3s combine(const openvdb::Vec3s& v0, const openvdb::Vec3s& v1, const openvdb::Vec3s& v2, const openvdb::Vec3d& w) { openvdb::Vec3s vec; vec[0] = float(v0[0] * w[0] + v1[0] * w[1] + v2[0] * w[2]); vec[1] = float(v0[1] * w[0] + v1[1] * w[1] + v2[1] * w[2]); vec[2] = float(v0[2] * w[0] + v1[2] * w[1] + v2[2] * w[2]); return vec; } template <> inline openvdb::Vec3d combine(const openvdb::Vec3d& v0, const openvdb::Vec3d& v1, const openvdb::Vec3d& v2, const openvdb::Vec3d& w) { openvdb::Vec3d vec; vec[0] = v0[0] * w[0] + v1[0] * w[1] + v2[0] * w[2]; vec[1] = v0[1] * w[0] + v1[1] * w[1] + v2[1] * w[2]; vec[2] = v0[2] * w[0] + v1[2] * w[1] + v2[2] * w[2]; return vec; } //////////////////////////////////////// /// @brief Get an OpenVDB-specific value by evaluating GA_Default::get() /// with appropriate arguments. template <typename ValueType> inline ValueType evalAttrDefault(const GA_Defaults& defaults, int idx) { fpreal64 value; defaults.get(idx, value); return ValueType(value); } template <> inline float evalAttrDefault<float>(const GA_Defaults& defaults, int /*idx*/) { fpreal32 value; defaults.get(0, value); return float(value); } template <> inline openvdb::Int32 evalAttrDefault<openvdb::Int32>(const GA_Defaults& defaults, int idx) { int32 value; defaults.get(idx, value); return openvdb::Int32(value); } template <> inline openvdb::Int64 evalAttrDefault<openvdb::Int64>(const GA_Defaults& defaults, int idx) { int64 value; defaults.get(idx, value); return openvdb::Int64(value); } template <> inline openvdb::Vec3i evalAttrDefault<openvdb::Vec3i>(const GA_Defaults& defaults, int) { openvdb::Vec3i vec; int32 value; defaults.get(0, value); vec[0] = openvdb::Int32(value); defaults.get(1, value); vec[1] = openvdb::Int32(value); defaults.get(2, value); vec[2] = openvdb::Int32(value); return vec; } template <> inline openvdb::Vec3s evalAttrDefault<openvdb::Vec3s>(const GA_Defaults& defaults, int) { openvdb::Vec3s vec; fpreal32 value; defaults.get(0, value); vec[0] = float(value); defaults.get(1, value); vec[1] = float(value); defaults.get(2, value); vec[2] = float(value); return vec; } template <> inline openvdb::Vec3d evalAttrDefault<openvdb::Vec3d>(const GA_Defaults& defaults, int) { openvdb::Vec3d vec; fpreal64 value; defaults.get(0, value); vec[0] = double(value); defaults.get(1, value); vec[1] = double(value); defaults.get(2, value); vec[2] = double(value); return vec; } template <> inline openvdb::math::Quat<float> evalAttrDefault<openvdb::math::Quat<float>>(const GA_Defaults& defaults, int) { openvdb::math::Quat<float> quat; fpreal32 value; for (int i = 0; i < 4; i++) { defaults.get(i, value); quat[i] = float(value); } return quat; } template <> inline openvdb::math::Quat<double> evalAttrDefault<openvdb::math::Quat<double>>(const GA_Defaults& defaults, int) { openvdb::math::Quat<double> quat; fpreal64 value; for (int i = 0; i < 4; i++) { defaults.get(i, value); quat[i] = double(value); } return quat; } template <> inline openvdb::math::Mat3<float> evalAttrDefault<openvdb::math::Mat3<float>>(const GA_Defaults& defaults, int) { openvdb::math::Mat3<float> mat; fpreal64 value; float* data = mat.asPointer(); for (int i = 0; i < 9; i++) { defaults.get(i, value); data[i] = float(value); } return mat; } template <> inline openvdb::math::Mat3<double> evalAttrDefault<openvdb::math::Mat3<double>>(const GA_Defaults& defaults, int) { openvdb::math::Mat3<double> mat; fpreal64 value; double* data = mat.asPointer(); for (int i = 0; i < 9; i++) { defaults.get(i, value); data[i] = double(value); } return mat; } template <> inline openvdb::math::Mat4<float> evalAttrDefault<openvdb::math::Mat4<float>>(const GA_Defaults& defaults, int) { openvdb::math::Mat4<float> mat; fpreal64 value; float* data = mat.asPointer(); for (int i = 0; i < 16; i++) { defaults.get(i, value); data[i] = float(value); } return mat; } template <> inline openvdb::math::Mat4<double> evalAttrDefault<openvdb::math::Mat4<double>>(const GA_Defaults& defaults, int) { openvdb::math::Mat4<double> mat; fpreal64 value; double* data = mat.asPointer(); for (int i = 0; i < 16; i++) { defaults.get(i, value); data[i] = double(value); } return mat; } //////////////////////////////////////// class AttributeDetailBase { public: using Ptr = std::shared_ptr<AttributeDetailBase>; virtual ~AttributeDetailBase() = default; AttributeDetailBase(const AttributeDetailBase&) = default; AttributeDetailBase& operator=(const AttributeDetailBase&) = default; virtual void set(const openvdb::Coord& ijk, const GA_Offset (&offsets)[3], const openvdb::Vec3d& weights) = 0; virtual void set(const openvdb::Coord& ijk, GA_Offset offset) = 0; virtual openvdb::GridBase::Ptr& grid() = 0; virtual std::string& name() = 0; virtual AttributeDetailBase::Ptr copy() = 0; protected: AttributeDetailBase() {} }; using AttributeDetailList = std::vector<AttributeDetailBase::Ptr>; //////////////////////////////////////// template <class VDBGridType> class AttributeDetail: public AttributeDetailBase { public: using ValueType = typename VDBGridType::ValueType; AttributeDetail( openvdb::GridBase::Ptr grid, const GA_Attribute* attribute, const GA_AIFTuple* tupleAIF, const int tupleIndex, const bool isVector = false); void set(const openvdb::Coord& ijk, const GA_Offset (&offsets)[3], const openvdb::Vec3d& weights) override; void set(const openvdb::Coord& ijk, GA_Offset offset) override; openvdb::GridBase::Ptr& grid() override { return mGrid; } std::string& name() override { return mName; } AttributeDetailBase::Ptr copy() override; protected: AttributeDetail(); private: openvdb::GridBase::Ptr mGrid; typename VDBGridType::Accessor mAccessor; const GA_Attribute* mAttribute; const GA_AIFTuple* mTupleAIF; const int mTupleIndex; std::string mName; }; template <class VDBGridType> AttributeDetail<VDBGridType>::AttributeDetail(): mAttribute(nullptr), mTupleAIF(nullptr), mTupleIndex(0) { } template <class VDBGridType> AttributeDetail<VDBGridType>::AttributeDetail( openvdb::GridBase::Ptr grid, const GA_Attribute* attribute, const GA_AIFTuple* tupleAIF, const int tupleIndex, const bool isVector): mGrid(grid), mAccessor(openvdb::GridBase::grid<VDBGridType>(mGrid)->getAccessor()), mAttribute(attribute), mTupleAIF(tupleAIF), mTupleIndex(tupleIndex) { std::ostringstream name; name << mAttribute->getName(); const int tupleSize = mTupleAIF->getTupleSize(mAttribute); if(!isVector && tupleSize != 1) { name << "_" << mTupleIndex; } mName = name.str(); } template <class VDBGridType> void AttributeDetail<VDBGridType>::set(const openvdb::Coord& ijk, const GA_Offset (&offsets)[3], const openvdb::Vec3d& weights) { ValueType v0 = evalAttr<ValueType>( mAttribute, mTupleAIF, offsets[0], mTupleIndex); ValueType v1 = evalAttr<ValueType>( mAttribute, mTupleAIF, offsets[1], mTupleIndex); ValueType v2 = evalAttr<ValueType>( mAttribute, mTupleAIF, offsets[2], mTupleIndex); mAccessor.setValue(ijk, combine<ValueType>(v0, v1, v2, weights)); } template <class VDBGridType> void AttributeDetail<VDBGridType>::set(const openvdb::Coord& ijk, GA_Offset offset) { mAccessor.setValue(ijk, evalAttr<ValueType>(mAttribute, mTupleAIF, offset, mTupleIndex)); } template <class VDBGridType> AttributeDetailBase::Ptr AttributeDetail<VDBGridType>::copy() { return AttributeDetailBase::Ptr(new AttributeDetail<VDBGridType>(*this)); } //////////////////////////////////////// // TBB object to transfer mesh attributes. // Only quads and/or triangles are supported // NOTE: This class has all code in the header and so it cannot have OPENVDB_HOUDINI_API. class MeshAttrTransfer { public: using IterRange = openvdb::tree::IteratorRange<openvdb::Int32Tree::LeafCIter>; inline MeshAttrTransfer( AttributeDetailList &pointAttributes, AttributeDetailList &vertexAttributes, AttributeDetailList &primitiveAttributes, const openvdb::Int32Grid& closestPrimGrid, const openvdb::math::Transform& transform, const GU_Detail& meshGdp); inline MeshAttrTransfer(const MeshAttrTransfer &other); inline ~MeshAttrTransfer() {} /// Main calls inline void runParallel(); inline void runSerial(); inline void operator()(IterRange &range) const; private: AttributeDetailList mPointAttributes, mVertexAttributes, mPrimitiveAttributes; const openvdb::Int32Grid& mClosestPrimGrid; const openvdb::math::Transform& mTransform; const GA_Detail &mMeshGdp; }; MeshAttrTransfer::MeshAttrTransfer( AttributeDetailList& pointAttributes, AttributeDetailList& vertexAttributes, AttributeDetailList& primitiveAttributes, const openvdb::Int32Grid& closestPrimGrid, const openvdb::math::Transform& transform, const GU_Detail& meshGdp): mPointAttributes(pointAttributes), mVertexAttributes(vertexAttributes), mPrimitiveAttributes(primitiveAttributes), mClosestPrimGrid(closestPrimGrid), mTransform(transform), mMeshGdp(meshGdp) { } MeshAttrTransfer::MeshAttrTransfer(const MeshAttrTransfer &other): mPointAttributes(other.mPointAttributes.size()), mVertexAttributes(other.mVertexAttributes.size()), mPrimitiveAttributes(other.mPrimitiveAttributes.size()), mClosestPrimGrid(other.mClosestPrimGrid), mTransform(other.mTransform), mMeshGdp(other.mMeshGdp) { // Deep-copy the AttributeDetail arrays, to construct unique tree // accessors per thread. for (size_t i = 0, N = other.mPointAttributes.size(); i < N; ++i) { mPointAttributes[i] = other.mPointAttributes[i]->copy(); } for (size_t i = 0, N = other.mVertexAttributes.size(); i < N; ++i) { mVertexAttributes[i] = other.mVertexAttributes[i]->copy(); } for (size_t i = 0, N = other.mPrimitiveAttributes.size(); i < N; ++i) { mPrimitiveAttributes[i] = other.mPrimitiveAttributes[i]->copy(); } } void MeshAttrTransfer::runParallel() { IterRange range(mClosestPrimGrid.tree().beginLeaf()); tbb::parallel_for(range, *this); } void MeshAttrTransfer::runSerial() { IterRange range(mClosestPrimGrid.tree().beginLeaf()); (*this)(range); } void MeshAttrTransfer::operator()(IterRange &range) const { openvdb::Int32Tree::LeafNodeType::ValueOnCIter iter; openvdb::Coord ijk; const bool ptnAttrTransfer = mPointAttributes.size() > 0; const bool vtxAttrTransfer = mVertexAttributes.size() > 0; GA_Offset vtxOffsetList[4], ptnOffsetList[4], vtxOffsets[3], ptnOffsets[3], prmOffset; openvdb::Vec3d ptnList[4], xyz, cpt, cpt2, uvw, uvw2; for ( ; range; ++range) { iter = range.iterator()->beginValueOn(); for ( ; iter; ++iter) { ijk = iter.getCoord(); const GA_Index prmIndex = iter.getValue(); prmOffset = mMeshGdp.primitiveOffset(prmIndex); // Transfer primitive attributes for (size_t i = 0, N = mPrimitiveAttributes.size(); i < N; ++i) { mPrimitiveAttributes[i]->set(ijk, prmOffset); } if (!ptnAttrTransfer && !vtxAttrTransfer) continue; // Transfer vertex and point attributes const GA_Primitive * primRef = mMeshGdp.getPrimitiveList().get(prmOffset); const GA_Size vtxn = primRef->getVertexCount(); // Get vertex and point offests for (GA_Size vtx = 0; vtx < vtxn; ++vtx) { const GA_Offset vtxoff = primRef->getVertexOffset(vtx); ptnOffsetList[vtx] = mMeshGdp.vertexPoint(vtxoff); vtxOffsetList[vtx] = vtxoff; UT_Vector3 p = mMeshGdp.getPos3(ptnOffsetList[vtx]); ptnList[vtx][0] = double(p[0]); ptnList[vtx][1] = double(p[1]); ptnList[vtx][2] = double(p[2]); } xyz = mTransform.indexToWorld(ijk); // Compute barycentric coordinates cpt = closestPointOnTriangleToPoint( ptnList[0], ptnList[2], ptnList[1], xyz, uvw); vtxOffsets[0] = vtxOffsetList[0]; // cpt offsets ptnOffsets[0] = ptnOffsetList[0]; vtxOffsets[1] = vtxOffsetList[2]; ptnOffsets[1] = ptnOffsetList[2]; vtxOffsets[2] = vtxOffsetList[1]; ptnOffsets[2] = ptnOffsetList[1]; if (4 == vtxn) { cpt2 = closestPointOnTriangleToPoint( ptnList[0], ptnList[3], ptnList[2], xyz, uvw2); if ((cpt2 - xyz).lengthSqr() < (cpt - xyz).lengthSqr()) { uvw = uvw2; vtxOffsets[1] = vtxOffsetList[3]; ptnOffsets[1] = ptnOffsetList[3]; vtxOffsets[2] = vtxOffsetList[2]; ptnOffsets[2] = ptnOffsetList[2]; } } // Transfer vertex attributes for (size_t i = 0, N = mVertexAttributes.size(); i < N; ++i) { mVertexAttributes[i]->set(ijk, vtxOffsets, uvw); } // Transfer point attributes for (size_t i = 0, N = mPointAttributes.size(); i < N; ++i) { mPointAttributes[i]->set(ijk, ptnOffsets, uvw); } } // end sparse voxel iteration. } // end leaf-node iteration } //////////////////////////////////////// // TBB object to transfer mesh attributes. // Only quads and/or triangles are supported // NOTE: This class has all code in the header and so it cannot have OPENVDB_HOUDINI_API. class PointAttrTransfer { public: using IterRange = openvdb::tree::IteratorRange<openvdb::Int32Tree::LeafCIter>; inline PointAttrTransfer( AttributeDetailList &pointAttributes, const openvdb::Int32Grid& closestPtnIdxGrid, const GU_Detail& ptGeop); inline PointAttrTransfer(const PointAttrTransfer &other); inline ~PointAttrTransfer() {} /// Main calls inline void runParallel(); inline void runSerial(); inline void operator()(IterRange &range) const; private: AttributeDetailList mPointAttributes; const openvdb::Int32Grid& mClosestPtnIdxGrid; const GA_Detail &mPtGeo; }; PointAttrTransfer::PointAttrTransfer( AttributeDetailList& pointAttributes, const openvdb::Int32Grid& closestPtnIdxGrid, const GU_Detail& ptGeop): mPointAttributes(pointAttributes), mClosestPtnIdxGrid(closestPtnIdxGrid), mPtGeo(ptGeop) { } PointAttrTransfer::PointAttrTransfer(const PointAttrTransfer &other): mPointAttributes(other.mPointAttributes.size()), mClosestPtnIdxGrid(other.mClosestPtnIdxGrid), mPtGeo(other.mPtGeo) { // Deep-copy the AttributeDetail arrays, to construct unique tree // accessors per thread. for (size_t i = 0, N = other.mPointAttributes.size(); i < N; ++i) { mPointAttributes[i] = other.mPointAttributes[i]->copy(); } } void PointAttrTransfer::runParallel() { IterRange range(mClosestPtnIdxGrid.tree().beginLeaf()); tbb::parallel_for(range, *this); } void PointAttrTransfer::runSerial() { IterRange range(mClosestPtnIdxGrid.tree().beginLeaf()); (*this)(range); } void PointAttrTransfer::operator()(IterRange &range) const { openvdb::Int32Tree::LeafNodeType::ValueOnCIter iter; openvdb::Coord ijk; for ( ; range; ++range) { iter = range.iterator()->beginValueOn(); for ( ; iter; ++iter) { ijk = iter.getCoord(); const GA_Index pointIndex = iter.getValue(); const GA_Offset pointOffset = mPtGeo.pointOffset(pointIndex); // Transfer point attributes for (size_t i = 0, N = mPointAttributes.size(); i < N; ++i) { mPointAttributes[i]->set(ijk, pointOffset); } } // end sparse voxel iteration. } // end leaf-node iteration } //////////////////////////////////////// // Mesh to Mesh Attribute Transfer Utils struct AttributeCopyBase { using Ptr = std::shared_ptr<AttributeCopyBase>; virtual ~AttributeCopyBase() {} virtual void copy(GA_Offset /*source*/, GA_Offset /*target*/) = 0; virtual void copy(GA_Offset&, GA_Offset&, GA_Offset&, GA_Offset /*target*/, const openvdb::Vec3d& /*uvw*/) = 0; protected: AttributeCopyBase() {} }; template<class ValueType> struct AttributeCopy: public AttributeCopyBase { public: AttributeCopy(const GA_Attribute& sourceAttr, GA_Attribute& targetAttr) : mSourceAttr(sourceAttr) , mTargetAttr(targetAttr) , mAIFTuple(*mSourceAttr.getAIFTuple()) , mTupleSize(mAIFTuple.getTupleSize(&mSourceAttr)) { } void copy(GA_Offset source, GA_Offset target) override { ValueType data; for (int i = 0; i < mTupleSize; ++i) { mAIFTuple.get(&mSourceAttr, source, data, i); mAIFTuple.set(&mTargetAttr, target, data, i); } } void copy(GA_Offset& v0, GA_Offset& v1, GA_Offset& v2, GA_Offset target, const openvdb::Vec3d& uvw) override { doCopy<ValueType>(v0, v1, v2, target, uvw); } private: template<typename T> typename std::enable_if<std::is_integral<T>::value>::type doCopy(GA_Offset& v0, GA_Offset& v1, GA_Offset& v2, GA_Offset target, const openvdb::Vec3d& uvw) { GA_Offset source = v0; double min = uvw[0]; if (uvw[1] < min) { min = uvw[1]; source = v1; } if (uvw[2] < min) source = v2; ValueType data; for (int i = 0; i < mTupleSize; ++i) { mAIFTuple.get(&mSourceAttr, source, data, i); mAIFTuple.set(&mTargetAttr, target, data, i); } } template <typename T> typename std::enable_if<std::is_floating_point<T>::value>::type doCopy(GA_Offset& v0, GA_Offset& v1, GA_Offset& v2, GA_Offset target, const openvdb::Vec3d& uvw) { ValueType a, b, c; for (int i = 0; i < mTupleSize; ++i) { mAIFTuple.get(&mSourceAttr, v0, a, i); mAIFTuple.get(&mSourceAttr, v1, b, i); mAIFTuple.get(&mSourceAttr, v2, c, i); mAIFTuple.set(&mTargetAttr, target, a*uvw[0] + b*uvw[1] + c*uvw[2], i); } } const GA_Attribute& mSourceAttr; GA_Attribute& mTargetAttr; const GA_AIFTuple& mAIFTuple; int mTupleSize; }; struct StrAttributeCopy: public AttributeCopyBase { public: StrAttributeCopy(const GA_Attribute& sourceAttr, GA_Attribute& targetAttr) : mSourceAttr(sourceAttr) , mTargetAttr(targetAttr) , mAIF(*mSourceAttr.getAIFSharedStringTuple()) , mTupleSize(mAIF.getTupleSize(&mSourceAttr)) { } void copy(GA_Offset source, GA_Offset target) override { for (int i = 0; i < mTupleSize; ++i) { mAIF.setString(&mTargetAttr, target, mAIF.getString(&mSourceAttr, source, i), i); } } void copy(GA_Offset& v0, GA_Offset& v1, GA_Offset& v2, GA_Offset target, const openvdb::Vec3d& uvw) override { GA_Offset source = v0; double min = uvw[0]; if (uvw[1] < min) { min = uvw[1]; source = v1; } if (uvw[2] < min) source = v2; for (int i = 0; i < mTupleSize; ++i) { mAIF.setString(&mTargetAttr, target, mAIF.getString(&mSourceAttr, source, i), i); } } protected: const GA_Attribute& mSourceAttr; GA_Attribute& mTargetAttr; const GA_AIFSharedStringTuple& mAIF; int mTupleSize; }; //////////////////////////////////////// inline AttributeCopyBase::Ptr createAttributeCopier(const GA_Attribute& sourceAttr, GA_Attribute& targetAttr) { const GA_AIFTuple * aifTuple = sourceAttr.getAIFTuple(); AttributeCopyBase::Ptr attr; if (aifTuple) { const GA_Storage sourceStorage = aifTuple->getStorage(&sourceAttr); const GA_Storage targetStorage = aifTuple->getStorage(&targetAttr); const int sourceTupleSize = aifTuple->getTupleSize(&sourceAttr); const int targetTupleSize = aifTuple->getTupleSize(&targetAttr); if (sourceStorage == targetStorage && sourceTupleSize == targetTupleSize) { switch (sourceStorage) { case GA_STORE_INT16: case GA_STORE_INT32: attr = AttributeCopyBase::Ptr( new AttributeCopy<int32>(sourceAttr, targetAttr)); break; case GA_STORE_INT64: attr = AttributeCopyBase::Ptr( new AttributeCopy<int64>(sourceAttr, targetAttr)); break; case GA_STORE_REAL16: case GA_STORE_REAL32: attr = AttributeCopyBase::Ptr( new AttributeCopy<fpreal32>(sourceAttr, targetAttr)); break; case GA_STORE_REAL64: attr = AttributeCopyBase::Ptr( new AttributeCopy<fpreal64>(sourceAttr, targetAttr)); break; default: break; } } } else { const GA_AIFSharedStringTuple * aifString = sourceAttr.getAIFSharedStringTuple(); if (aifString) { attr = AttributeCopyBase::Ptr(new StrAttributeCopy(sourceAttr, targetAttr)); } } return attr; } //////////////////////////////////////// inline GA_Offset findClosestPrimitiveToPoint( const GU_Detail& geo, const std::set<GA_Index>& primitives, const openvdb::Vec3d& p, GA_Offset& vert0, GA_Offset& vert1, GA_Offset& vert2, openvdb::Vec3d& uvw) { std::set<GA_Index>::const_iterator it = primitives.begin(); GA_Offset primOffset = GA_INVALID_OFFSET; const GA_Primitive * primRef = nullptr; double minDist = std::numeric_limits<double>::max(); openvdb::Vec3d a, b, c, d, tmpUVW; UT_Vector3 tmpPoint; for (; it != primitives.end(); ++it) { const GA_Offset offset = geo.primitiveOffset(*it); primRef = geo.getPrimitiveList().get(offset); const GA_Size vertexCount = primRef->getVertexCount(); if (vertexCount == 3 || vertexCount == 4) { tmpPoint = geo.getPos3(primRef->getPointOffset(0)); a[0] = tmpPoint.x(); a[1] = tmpPoint.y(); a[2] = tmpPoint.z(); tmpPoint = geo.getPos3(primRef->getPointOffset(1)); b[0] = tmpPoint.x(); b[1] = tmpPoint.y(); b[2] = tmpPoint.z(); tmpPoint = geo.getPos3(primRef->getPointOffset(2)); c[0] = tmpPoint.x(); c[1] = tmpPoint.y(); c[2] = tmpPoint.z(); double tmpDist = (p - openvdb::math::closestPointOnTriangleToPoint(a, c, b, p, tmpUVW)).lengthSqr(); if (tmpDist < minDist) { minDist = tmpDist; primOffset = offset; uvw = tmpUVW; vert0 = primRef->getVertexOffset(0); vert1 = primRef->getVertexOffset(2); vert2 = primRef->getVertexOffset(1); } if (vertexCount == 4) { tmpPoint = geo.getPos3(primRef->getPointOffset(3)); d[0] = tmpPoint.x(); d[1] = tmpPoint.y(); d[2] = tmpPoint.z(); tmpDist = (p - openvdb::math::closestPointOnTriangleToPoint( a, d, c, p, tmpUVW)).lengthSqr(); if (tmpDist < minDist) { minDist = tmpDist; primOffset = offset; uvw = tmpUVW; vert0 = primRef->getVertexOffset(0); vert1 = primRef->getVertexOffset(3); vert2 = primRef->getVertexOffset(2); } } } } return primOffset; } // Faster for small primitive counts inline GA_Offset findClosestPrimitiveToPoint( const GU_Detail& geo, std::vector<GA_Index>& primitives, const openvdb::Vec3d& p, GA_Offset& vert0, GA_Offset& vert1, GA_Offset& vert2, openvdb::Vec3d& uvw) { GA_Offset primOffset = GA_INVALID_OFFSET; const GA_Primitive * primRef = nullptr; double minDist = std::numeric_limits<double>::max(); openvdb::Vec3d a, b, c, d, tmpUVW; UT_Vector3 tmpPoint; std::sort(primitives.begin(), primitives.end()); GA_Index lastPrim = -1; for (size_t n = 0, N = primitives.size(); n < N; ++n) { if (primitives[n] == lastPrim) continue; lastPrim = primitives[n]; const GA_Offset offset = geo.primitiveOffset(lastPrim); primRef = geo.getPrimitiveList().get(offset); const GA_Size vertexCount = primRef->getVertexCount(); if (vertexCount == 3 || vertexCount == 4) { tmpPoint = geo.getPos3(primRef->getPointOffset(0)); a[0] = tmpPoint.x(); a[1] = tmpPoint.y(); a[2] = tmpPoint.z(); tmpPoint = geo.getPos3(primRef->getPointOffset(1)); b[0] = tmpPoint.x(); b[1] = tmpPoint.y(); b[2] = tmpPoint.z(); tmpPoint = geo.getPos3(primRef->getPointOffset(2)); c[0] = tmpPoint.x(); c[1] = tmpPoint.y(); c[2] = tmpPoint.z(); double tmpDist = (p - openvdb::math::closestPointOnTriangleToPoint(a, c, b, p, tmpUVW)).lengthSqr(); if (tmpDist < minDist) { minDist = tmpDist; primOffset = offset; uvw = tmpUVW; vert0 = primRef->getVertexOffset(0); vert1 = primRef->getVertexOffset(2); vert2 = primRef->getVertexOffset(1); } if (vertexCount == 4) { tmpPoint = geo.getPos3(primRef->getPointOffset(3)); d[0] = tmpPoint.x(); d[1] = tmpPoint.y(); d[2] = tmpPoint.z(); tmpDist = (p - openvdb::math::closestPointOnTriangleToPoint( a, d, c, p, tmpUVW)).lengthSqr(); if (tmpDist < minDist) { minDist = tmpDist; primOffset = offset; uvw = tmpUVW; vert0 = primRef->getVertexOffset(0); vert1 = primRef->getVertexOffset(3); vert2 = primRef->getVertexOffset(2); } } } } return primOffset; } //////////////////////////////////////// template<class GridType> class TransferPrimitiveAttributesOp { public: using IndexT = typename GridType::ValueType; using IndexAccT = typename GridType::ConstAccessor; using AttrCopyPtrVec = std::vector<AttributeCopyBase::Ptr>; TransferPrimitiveAttributesOp( const GU_Detail& sourceGeo, GU_Detail& targetGeo, const GridType& indexGrid, AttrCopyPtrVec& primAttributes, AttrCopyPtrVec& vertAttributes) : mSourceGeo(sourceGeo) , mTargetGeo(targetGeo) , mIndexGrid(indexGrid) , mPrimAttributes(primAttributes) , mVertAttributes(vertAttributes) { } inline void operator()(const GA_SplittableRange&) const; private: inline void copyPrimAttrs(const GA_Primitive&, const UT_Vector3&, IndexAccT&) const; template<typename PrimT> inline void copyVertAttrs(const PrimT&, const UT_Vector3&, IndexAccT&) const; const GU_Detail& mSourceGeo; GU_Detail& mTargetGeo; const GridType& mIndexGrid; AttrCopyPtrVec& mPrimAttributes; AttrCopyPtrVec& mVertAttributes; }; template<class GridType> inline void TransferPrimitiveAttributesOp<GridType>::operator()(const GA_SplittableRange& range) const { if (mPrimAttributes.empty() && mVertAttributes.empty()) return; auto polyIdxAcc = mIndexGrid.getConstAccessor(); for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { auto start = GA_Offset(), end = GA_Offset(); for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (auto targetOffset = start; targetOffset < end; ++targetOffset) { const auto* target = mTargetGeo.getPrimitiveList().get(targetOffset); if (!target) continue; const auto targetN = mTargetGeo.getGEOPrimitive(targetOffset)->computeNormal(); if (!mPrimAttributes.empty()) { // Transfer primitive attributes. copyPrimAttrs(*target, targetN, polyIdxAcc); } if (!mVertAttributes.empty()) { if (target->getTypeId() != GA_PRIMPOLYSOUP) { copyVertAttrs(*target, targetN, polyIdxAcc); } else { if (const auto* soup = UTverify_cast<const GEO_PrimPolySoup*>(target)) { // Iterate in parallel over the member polygons of a polygon soup. using SizeRange = UT_BlockedRange<GA_Size>; const auto processPolyRange = [&](const SizeRange& range) { auto threadLocalPolyIdxAcc = mIndexGrid.getConstAccessor(); for (GEO_PrimPolySoup::PolygonIterator it(*soup, range.begin()); !it.atEnd() && (it.polygon() < range.end()); ++it) { copyVertAttrs(it, it.computeNormal(), threadLocalPolyIdxAcc); } }; UTparallelFor(SizeRange(0, soup->getPolygonCount()), processPolyRange); } } } } } } } /// @brief Find the closest match to the target primitive from among the source primitives /// and copy primitive attributes from that primitive to the target primitive. /// @note This isn't a particularly useful operation when the target is a polygon soup, /// because the entire soup is a single primitive, whereas the source primitives /// are likely to be individual polygons. template<class GridType> inline void TransferPrimitiveAttributesOp<GridType>::copyPrimAttrs( const GA_Primitive& targetPrim, const UT_Vector3& targetNormal, IndexAccT& polyIdxAcc) const { const auto& transform = mIndexGrid.transform(); UT_Vector3 sourceN, targetN = targetNormal; const bool isPolySoup = (targetPrim.getTypeId() == GA_PRIMPOLYSOUP); // Compute avg. vertex position. openvdb::Vec3d pos(0, 0, 0); int count = static_cast<int>(targetPrim.getVertexCount()); for (int vtx = 0; vtx < count; ++vtx) { pos += UTvdbConvert(targetPrim.getPos3(vtx)); } if (count > 1) pos /= double(count); // Find closest source primitive to current avg. vertex position. const auto coord = openvdb::Coord::floor(transform.worldToIndex(pos)); std::vector<GA_Index> primitives(8), similarPrimitives(8); IndexT primIndex; openvdb::Coord ijk; for (int d = 0; d < 8; ++d) { ijk[0] = coord[0] + (((d & 0x02) >> 1) ^ (d & 0x01)); ijk[1] = coord[1] + ((d & 0x02) >> 1); ijk[2] = coord[2] + ((d & 0x04) >> 2); if (polyIdxAcc.probeValue(ijk, primIndex) && openvdb::Index32(primIndex) != openvdb::util::INVALID_IDX) { GA_Offset tmpOffset = mSourceGeo.primitiveOffset(primIndex); sourceN = mSourceGeo.getGEOPrimitive(tmpOffset)->computeNormal(); // Skip the normal test when the target is a polygon soup, because // the entire soup is a single primitive, whose normal is unlikely // to coincide with any of the source primitives. if (isPolySoup || sourceN.dot(targetN) > 0.5) { similarPrimitives.push_back(primIndex); } else { primitives.push_back(primIndex); } } } if (!primitives.empty() || !similarPrimitives.empty()) { GA_Offset source, v0, v1, v2; openvdb::Vec3d uvw; if (!similarPrimitives.empty()) { source = findClosestPrimitiveToPoint( mSourceGeo, similarPrimitives, pos, v0, v1, v2, uvw); } else { source = findClosestPrimitiveToPoint( mSourceGeo, primitives, pos, v0, v1, v2, uvw); } // Transfer attributes const auto targetOffset = targetPrim.getMapOffset(); for (size_t n = 0, N = mPrimAttributes.size(); n < N; ++n) { mPrimAttributes[n]->copy(source, targetOffset); } } } /// @brief Find the closest match to the target primitive from among the source primitives /// (using slightly different criteria than copyPrimAttrs()) and copy vertex attributes /// from that primitive's vertices to the target primitive's vertices. /// @note When the target is a polygon soup, @a targetPrim should be a /// @b GEO_PrimPolySoup::PolygonIterator that points to one of the member polygons of the soup. template<typename GridType> template<typename PrimT> inline void TransferPrimitiveAttributesOp<GridType>::copyVertAttrs( const PrimT& targetPrim, const UT_Vector3& targetNormal, IndexAccT& polyIdxAcc) const { const auto& transform = mIndexGrid.transform(); openvdb::Vec3d pos, uvw; openvdb::Coord ijk; UT_Vector3 sourceNormal; std::vector<GA_Index> primitives(8), similarPrimitives(8); for (GA_Size vtx = 0, vtxN = targetPrim.getVertexCount(); vtx < vtxN; ++vtx) { pos = UTvdbConvert(targetPrim.getPos3(vtx)); const auto coord = openvdb::Coord::floor(transform.worldToIndex(pos)); primitives.clear(); similarPrimitives.clear(); int primIndex; for (int d = 0; d < 8; ++d) { ijk[0] = coord[0] + (((d & 0x02) >> 1) ^ (d & 0x01)); ijk[1] = coord[1] + ((d & 0x02) >> 1); ijk[2] = coord[2] + ((d & 0x04) >> 2); if (polyIdxAcc.probeValue(ijk, primIndex) && (openvdb::Index32(primIndex) != openvdb::util::INVALID_IDX)) { GA_Offset tmpOffset = mSourceGeo.primitiveOffset(primIndex); sourceNormal = mSourceGeo.getGEOPrimitive(tmpOffset)->computeNormal(); if (sourceNormal.dot(targetNormal) > 0.5) { primitives.push_back(primIndex); } } } if (!primitives.empty() || !similarPrimitives.empty()) { GA_Offset v0, v1, v2; if (!similarPrimitives.empty()) { findClosestPrimitiveToPoint(mSourceGeo, similarPrimitives, pos, v0, v1, v2, uvw); } else { findClosestPrimitiveToPoint(mSourceGeo, primitives, pos, v0, v1, v2, uvw); } for (size_t n = 0, N = mVertAttributes.size(); n < N; ++n) { mVertAttributes[n]->copy(v0, v1, v2, targetPrim.getVertexOffset(vtx), uvw); } } } } //////////////////////////////////////// template<class GridType> class TransferPointAttributesOp { public: TransferPointAttributesOp( const GU_Detail& sourceGeo, GU_Detail& targetGeo, const GridType& indexGrid, std::vector<AttributeCopyBase::Ptr>& pointAttributes, const GA_PrimitiveGroup* surfacePrims = nullptr); void operator()(const GA_SplittableRange&) const; private: const GU_Detail& mSourceGeo; GU_Detail& mTargetGeo; const GridType& mIndexGrid; std::vector<AttributeCopyBase::Ptr>& mPointAttributes; const GA_PrimitiveGroup* mSurfacePrims; }; template<class GridType> TransferPointAttributesOp<GridType>::TransferPointAttributesOp( const GU_Detail& sourceGeo, GU_Detail& targetGeo, const GridType& indexGrid, std::vector<AttributeCopyBase::Ptr>& pointAttributes, const GA_PrimitiveGroup* surfacePrims) : mSourceGeo(sourceGeo) , mTargetGeo(targetGeo) , mIndexGrid(indexGrid) , mPointAttributes(pointAttributes) , mSurfacePrims(surfacePrims) { } template<class GridType> void TransferPointAttributesOp<GridType>::operator()(const GA_SplittableRange& range) const { using IndexT = typename GridType::ValueType; GA_Offset start, end, vtxOffset, primOffset, target, v0, v1, v2; typename GridType::ConstAccessor polyIdxAcc = mIndexGrid.getConstAccessor(); const openvdb::math::Transform& transform = mIndexGrid.transform(); openvdb::Vec3d pos, indexPos, uvw; std::vector<GA_Index> primitives(8); openvdb::Coord ijk, coord; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (target = start; target < end; ++target) { vtxOffset = mTargetGeo.pointVertex(target); // Check if point is referenced by a surface primitive. if (mSurfacePrims) { bool surfacePrim = false; while (GAisValid(vtxOffset)) { primOffset = mTargetGeo.vertexPrimitive(vtxOffset); if (mSurfacePrims->containsIndex(mTargetGeo.primitiveIndex(primOffset))) { surfacePrim = true; break; } vtxOffset = mTargetGeo.vertexToNextVertex(vtxOffset); } if (!surfacePrim) continue; } const UT_Vector3 p = mTargetGeo.getPos3(target); pos[0] = p.x(); pos[1] = p.y(); pos[2] = p.z(); indexPos = transform.worldToIndex(pos); coord[0] = int(std::floor(indexPos[0])); coord[1] = int(std::floor(indexPos[1])); coord[2] = int(std::floor(indexPos[2])); primitives.clear(); IndexT primIndex; for (int d = 0; d < 8; ++d) { ijk[0] = coord[0] + (((d & 0x02) >> 1) ^ (d & 0x01)); ijk[1] = coord[1] + ((d & 0x02) >> 1); ijk[2] = coord[2] + ((d & 0x04) >> 2); if (polyIdxAcc.probeValue(ijk, primIndex) && openvdb::Index32(primIndex) != openvdb::util::INVALID_IDX) { primitives.push_back(primIndex); } } if (!primitives.empty()) { findClosestPrimitiveToPoint(mSourceGeo, primitives, pos, v0, v1, v2, uvw); v0 = mSourceGeo.vertexPoint(v0); v1 = mSourceGeo.vertexPoint(v1); v2 = mSourceGeo.vertexPoint(v2); for (size_t n = 0, N = mPointAttributes.size(); n < N; ++n) { mPointAttributes[n]->copy(v0, v1, v2, target, uvw); } } } } } } //////////////////////////////////////// template<class GridType> inline void transferPrimitiveAttributes( const GU_Detail& sourceGeo, GU_Detail& targetGeo, GridType& indexGrid, Interrupter& boss, const GA_PrimitiveGroup* primitives = nullptr) { // Match public primitive attributes GA_AttributeDict::iterator it = sourceGeo.primitiveAttribs().begin(GA_SCOPE_PUBLIC); if (indexGrid.activeVoxelCount() == 0) return; std::vector<AttributeCopyBase::Ptr> primAttributeList; // Primitive attributes for (; !it.atEnd(); ++it) { const GA_Attribute* sourceAttr = it.attrib(); if (nullptr == targetGeo.findPrimitiveAttribute(it.name())) { targetGeo.addPrimAttrib(sourceAttr); } GA_Attribute* targetAttr = targetGeo.findPrimitiveAttribute(it.name()); if (sourceAttr && targetAttr) { AttributeCopyBase::Ptr att = createAttributeCopier(*sourceAttr, *targetAttr); if(att) primAttributeList.push_back(att); } } if (boss.wasInterrupted()) return; std::vector<AttributeCopyBase::Ptr> vertAttributeList; it = sourceGeo.vertexAttribs().begin(GA_SCOPE_PUBLIC); // Vertex attributes for (; !it.atEnd(); ++it) { const GA_Attribute* sourceAttr = it.attrib(); if (nullptr == targetGeo.findVertexAttribute(it.name())) { targetGeo.addVertexAttrib(sourceAttr); } GA_Attribute* targetAttr = targetGeo.findVertexAttribute(it.name()); if (sourceAttr && targetAttr) { targetAttr->hardenAllPages(); AttributeCopyBase::Ptr att = createAttributeCopier(*sourceAttr, *targetAttr); if(att) vertAttributeList.push_back(att); } } if (!boss.wasInterrupted() && (!primAttributeList.empty() || !vertAttributeList.empty())) { UTparallelFor(GA_SplittableRange(targetGeo.getPrimitiveRange(primitives)), TransferPrimitiveAttributesOp<GridType>(sourceGeo, targetGeo, indexGrid, primAttributeList, vertAttributeList)); } if (!boss.wasInterrupted()) { std::vector<AttributeCopyBase::Ptr> pointAttributeList; it = sourceGeo.pointAttribs().begin(GA_SCOPE_PUBLIC); // Point attributes for (; !it.atEnd(); ++it) { if (std::string(it.name()) == "P") continue; // Ignore previous point positions. const GA_Attribute* sourceAttr = it.attrib(); if (nullptr == targetGeo.findPointAttribute(it.name())) { targetGeo.addPointAttrib(sourceAttr); } GA_Attribute* targetAttr = targetGeo.findPointAttribute(it.name()); if (sourceAttr && targetAttr) { AttributeCopyBase::Ptr att = createAttributeCopier(*sourceAttr, *targetAttr); if(att) pointAttributeList.push_back(att); } } if (!boss.wasInterrupted() && !pointAttributeList.empty()) { UTparallelFor(GA_SplittableRange(targetGeo.getPointRange()), TransferPointAttributesOp<GridType>(sourceGeo, targetGeo, indexGrid, pointAttributeList, primitives)); } } } } // namespace openvdb_houdini #endif // OPENVDB_HOUDINI_ATTRIBUTE_TRANSFER_UTIL_HAS_BEEN_INCLUDED
47,264
C
29.04768
100
0.597558
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Read.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Read.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/GEO_PrimVDB.h> #include <openvdb_houdini/GU_PrimVDB.h> #include <UT/UT_Interrupt.h> #include <cctype> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Read: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Read(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Read() override {} void getDescriptiveParmName(UT_String& s) const override { s = "file_name"; } static void registerSop(OP_OperatorTable*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 0); } protected: OP_ERROR cookVDBSop(OP_Context&) override; bool updateParmsFlags() override; }; //////////////////////////////////////// namespace { // Populate a choice list with grid names read from a VDB file. void populateGridMenu(void* data, PRM_Name* choicenames, int listsize, const PRM_SpareData*, const PRM_Parm*) { choicenames[0].setToken(0); choicenames[0].setLabel(0); hvdb::SOP_NodeVDB* sop = static_cast<hvdb::SOP_NodeVDB*>(data); if (sop == nullptr) return; // Get the parameters from the GUI // The file name of the vdb we would like to load const auto file_name = sop->evalStdString("file_name", 0); // Keep track of how many names we have entered int count = 0; // Add the star token to the menu choicenames[0].setTokenAndLabel("*", "*"); ++count; try { // Open the file and read the header, but don't read in any grids. // An exception is thrown if the file is not a valid VDB file. openvdb::io::File file(file_name); file.open(); // Loop over the names of all of the grids in the file. for (openvdb::io::File::NameIterator nameIter = file.beginName(); nameIter != file.endName(); ++nameIter) { // Make sure we don't write more than the listsize, // and reserve a spot for the terminating 0. if (count > listsize - 2) break; std::string gridName = nameIter.gridName(), tokenName = gridName; // When a file contains multiple grids with the same name, the names are // distinguished with a trailing array index ("grid[0]", "grid[1]", etc.). // Escape such names as "grid\[0]", "grid\[1]", etc. to inhibit UT_String's // pattern matching. if (tokenName.back() == ']') { auto start = tokenName.find_last_of('['); if (start != std::string::npos && tokenName[start + 1] != ']') { for (auto i = start + 1; i < tokenName.size() - 1; ++i) { // Only digits should appear between the last '[' and the trailing ']'. if (!std::isdigit(tokenName[i])) { start = std::string::npos; break; } } if (start != std::string::npos) tokenName.replace(start, 1, "\\["); } } // Add the grid's name to the list. choicenames[count].setTokenAndLabel(tokenName.c_str(), gridName.c_str()); ++count; } file.close(); } catch (...) {} // Terminate the list. choicenames[count].setTokenAndLabel(nullptr, nullptr); } // Callback to trigger a file reload int reloadCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Read* sop = static_cast<SOP_OpenVDB_Read*>(data); if (nullptr != sop) { sop->forceRecook(); return 1; // request a refresh of the parameter pane } return 0; // no refresh } } // unnamed namespace //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Metadata-only toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "metadata_only", "Read Metadata Only") .setDefault(PRMzeroDefaults) .setTooltip( "If enabled, output empty VDBs populated with their metadata and transforms only.")); // Clipping toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "clip", "Clip to Reference Bounds") .setDefault(PRMzeroDefaults) .setTooltip("Clip VDBs to the bounding box of the reference geometry.")); // Filename parms.add(hutil::ParmFactory(PRM_FILE, "file_name", "File Name") .setDefault(0, "./filename.vdb") .setTooltip("Select a VDB file.")); // Grid name mask parms.add(hutil::ParmFactory(PRM_STRING, "grids", "VDB(s)") .setDefault(0, "*") .setChoiceList(new PRM_ChoiceList(PRM_CHOICELIST_TOGGLE, populateGridMenu)) .setTooltip("VDB names separated by white space (wildcards allowed)") .setDocumentation( "VDB names separated by white space (wildcards allowed)\n\n" "NOTE:\n" " To distinguish between multiple VDBs with the same name,\n" " append an array index to the name: `density\\[0]`, `density\\[1]`, etc.\n" " Escape the index with a backslash to inhibit wildcard pattern matching.\n")); // Toggle to enable/disable grouping parms.add(hutil::ParmFactory(PRM_TOGGLE, "enable_grouping", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDefault(PRMoneDefaults) .setTooltip( "If enabled, create a group with the given name that comprises the selected VDBs.\n" "If disabled, do not group the selected VDBs.")); // Name for the output group parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setDefault(0, "import os.path\n" "return os.path.splitext(os.path.basename(ch('file_name')))[0]", CH_PYTHON_EXPRESSION) .setTooltip("Specify a name for this group of VDBs.")); // Missing Frame menu { char const * const items[] = { "error", "Report Error", "empty", "No Geometry", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "missingframe", "Missing Frame") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip( "If the specified file does not exist on disk, either report an error" " (Report Error) or warn and continue (No Geometry).")); } // Reload button parms.add(hutil::ParmFactory(PRM_CALLBACK, "reload", "Reload File") .setCallbackFunc(&reloadCB) .setTooltip("Reread the VDB file.")); parms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "Sep")); // Delayed loading parms.add(hutil::ParmFactory(PRM_TOGGLE, "delayload", "Delay Loading") .setDefault(PRMoneDefaults) .setTooltip( "Don't allocate memory for or read voxel values until the values" " are actually accessed.\n\n" "Delayed loading can significantly lower memory usage, but\n" "note that viewport visualization of a volume usually requires\n" "the entire volume to be loaded into memory.")); // Localization file size slider parms.add(hutil::ParmFactory(PRM_FLT_J, "copylimit", "Copy If Smaller Than") .setTypeExtended(PRM_TYPE_JOIN_PAIR) .setDefault(0.5f) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip( "When delayed loading is enabled, a file must not be modified on disk before\n" "it has been fully read. For safety, files smaller than the given size (in GB)\n" "will be copied to a private, temporary location (either $OPENVDB_TEMP_DIR,\n" "$TMPDIR or a system default temp directory).") .setDocumentation( "When delayed loading is enabled, a file must not be modified on disk before" " it has been fully read. For safety, files smaller than the given size (in GB)" " will be copied to a private, temporary location (either `$OPENVDB_TEMP_DIR`," " `$TMPDIR` or a system default temp directory).")); parms.add(hutil::ParmFactory(PRM_LABEL, "copylimitlabel", "GB") .setDocumentation(nullptr)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Read", SOP_OpenVDB_Read::factory, parms, *table) .setNativeName("") .addOptionalInput("Optional Bounding Geometry") .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Read a `.vdb` file from disk.\"\"\"\n\ \n\ @overview\n\ \n\ This node reads VDB volumes from a `.vdb` file.\n\ It is usually preferable to use Houdini's native [File|Node:sop/file] node,\n\ however unlike the native node, this node allows one to take advantage of\n\ delayed loading, meaning that only those portions of a volume that are\n\ actually accessed in a scene get loaded into memory.\n\ Delayed loading can significantly reduce memory usage when working\n\ with large volumes (but note that viewport visualization of a volume\n\ usually requires the entire volume to be loaded into memory).\n\ \n\ @related\n\ - [OpenVDB Write|Node:sop/DW_OpenVDBWrite]\n\ - [Node:sop/file]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Read::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Read(net, name, op); } SOP_OpenVDB_Read::SOP_OpenVDB_Read(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// // Disable parms in the UI. bool SOP_OpenVDB_Read::updateParmsFlags() { bool changed = false; float t = 0.0; changed |= enableParm("group", bool(evalInt("enable_grouping", 0, t))); const bool delayedLoad = evalInt("delayload", 0, t); changed |= enableParm("copylimit", delayedLoad); changed |= enableParm("copylimitlabel", delayedLoad); return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Read::cookVDBSop(OP_Context& context) { try { hutil::ScopedInputLock lock(*this, context); gdp->clearAndDestroy(); const fpreal t = context.getTime(); const bool readMetadataOnly = evalInt("metadata_only", 0, t), missingFrameIsError = (0 == evalInt("missingframe", 0, t)); // Get the file name string from the UI. const std::string filename = evalStdString("file_name", t); // Get the grid mask string. UT_String gridStr; evalString(gridStr, "grids", 0, t); // Get the group name string. UT_String groupStr; if (evalInt("enable_grouping", 0, t)) { evalString(groupStr, "group", 0, t); // If grouping is enabled but no group name was given, derive the group name // from the filename (e.g., "bar", given filename "/foo/bar.vdb"). /// @internal Currently this is done with an expression on the parameter, /// but it could alternatively be done as follows: //if (!groupStr.isstring()) { // groupStr = filename; // groupStr = UT_String(groupStr.fileName()); // groupStr = groupStr.pathUpToExtension(); //} } const bool delayedLoad = evalInt("delayload", 0, t); const openvdb::Index64 copyMaxBytes = openvdb::Index64(1.0e9 * evalFloat("copylimit", 0, t)); openvdb::BBoxd clipBBox; bool clip = evalInt("clip", 0, t); if (clip) { if (const GU_Detail* clipGeo = inputGeo(0)) { UT_BoundingBox box; clipGeo->getBBox(&box); clipBBox.min()[0] = box.xmin(); clipBBox.min()[1] = box.ymin(); clipBBox.min()[2] = box.zmin(); clipBBox.max()[0] = box.xmax(); clipBBox.max()[1] = box.ymax(); clipBBox.max()[2] = box.zmax(); } clip = clipBBox.isSorted(); } UT_AutoInterrupt progress(("Reading " + filename).c_str()); openvdb::io::File file(filename); openvdb::MetaMap::Ptr fileMetadata; try { // Open the VDB file, but don't read any grids yet. file.setCopyMaxBytes(copyMaxBytes); file.open(delayedLoad); // Read the file-level metadata. fileMetadata = file.getMetadata(); if (!fileMetadata) fileMetadata.reset(new openvdb::MetaMap); } catch (std::exception& e) { ///< @todo consider catching only openvdb::IoError std::string mesg; if (const char* s = e.what()) mesg = s; // Strip off the exception name from an openvdb::IoError. if (mesg.substr(0, 9) == "IoError: ") mesg = mesg.substr(9); if (missingFrameIsError) { addError(SOP_MESSAGE, mesg.c_str()); } else { addWarning(SOP_MESSAGE, mesg.c_str()); } return error(); } // Create a group for the grid primitives. GA_PrimitiveGroup* group = nullptr; if (groupStr.isstring()) { group = gdp->newPrimitiveGroup(groupStr.buffer()); } // Loop over all grids in the file. for (openvdb::io::File::NameIterator nameIter = file.beginName(); nameIter != file.endName(); ++nameIter) { if (progress.wasInterrupted()) throw std::runtime_error("Was Interrupted"); // Skip grids whose names don't match the user-supplied mask. const std::string& gridName = nameIter.gridName(); if (!UT_String(gridName).multiMatch(gridStr.buffer(), 1, " ")) continue; hvdb::GridPtr grid; if (readMetadataOnly) { grid = file.readGridMetadata(gridName); } else if (clip) { grid = file.readGrid(gridName, clipBBox); } else { grid = file.readGrid(gridName); } if (grid) { // Copy file-level metadata into the grid, then create (if necessary) // and set a primitive attribute for each metadata item. for (openvdb::MetaMap::ConstMetaIterator fileMetaIt = fileMetadata->beginMeta(), end = fileMetadata->endMeta(); fileMetaIt != end; ++fileMetaIt) { // Resolve file- and grid-level metadata name conflicts // in favor of the grid-level metadata. if (openvdb::Metadata::Ptr meta = fileMetaIt->second) { const std::string name = fileMetaIt->first; if (!(*grid)[name]) { grid->insertMeta(name, *meta); } } } // Add a new VDB primitive for this grid. // Note: this clears the grid's metadata. GEO_PrimVDB* vdb = hvdb::createVdbPrimitive(*gdp, grid); // Add the primitive to the group. if (group) group->add(vdb); } } file.close(); // If a group was created but no grids were added to it, delete the group. if (group && group->isEmpty()) gdp->destroyPrimitiveGroup(group); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
15,956
C++
34.618303
97
0.583918
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Activate.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Activate.cc /// /// @author FX R&D OpenVDB team /// /// @brief Activate VDBs according to various rules // OpenVDB and Houdini use different relative directories, but SESI_OPENVDB // is not yet defined at this point. #if 1 #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #else #include "Utils.h" #include "ParmFactory.h" #include "SOP_NodeVDB.h" #endif #include <GU/GU_PrimVDB.h> #include <OP/OP_Node.h> #include <OP/OP_Operator.h> #include <OP/OP_OperatorTable.h> #include <PRM/PRM_Parm.h> #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/Prune.h> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; using namespace openvdb_houdini; enum REGIONTYPE_NAMES { REGIONTYPE_POSITION, REGIONTYPE_VOXEL, REGIONTYPE_EXPAND, REGIONTYPE_REFERENCE, REGIONTYPE_DEACTIVATE, REGIONTYPE_FILL }; enum OPERATION_NAMES { OPERATION_UNION, OPERATION_INTERSECT, OPERATION_SUBTRACT, OPERATION_COPY }; class SOP_VDBActivate : public hvdb::SOP_NodeVDB { public: const char *inputLabel(unsigned idx) const override; int isRefInput(unsigned i) const override; bool updateParmsFlags() override; static OP_Node *factory(OP_Network*, const char *, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; openvdb::CoordBBox getIndexSpaceBounds(OP_Context &context, const GEO_PrimVDB &vdb); UT_BoundingBox getWorldBBox(fpreal t); protected: REGIONTYPE_NAMES REGIONTYPE(double t) { return (REGIONTYPE_NAMES) evalInt("regiontype", 0, t); } OPERATION_NAMES OPERATION(fpreal t) { return (OPERATION_NAMES) evalInt("operation", 0, t); } UT_Vector3D CENTER(fpreal t) { return UT_Vector3D(evalFloat("center", 0, t), evalFloat("center", 1, t), evalFloat("center", 2, t)); } UT_Vector3D SIZE(fpreal t) { return UT_Vector3D(evalFloat("size", 0, t), evalFloat("size", 1, t), evalFloat("size", 2, t)); } openvdb::Coord MINPOS(fpreal t) { return openvdb::Coord(evalVec3i("min", t)); } openvdb::Coord MAXPOS(fpreal t) { return openvdb::Coord(evalVec3i("max", t)); } }; protected: SOP_VDBActivate(OP_Network *net, const char *name, OP_Operator *entry); ~SOP_VDBActivate() override {} REGIONTYPE_NAMES REGIONTYPE(double t) { return (REGIONTYPE_NAMES) evalInt("regiontype", 0, t); } OPERATION_NAMES OPERATION(fpreal t) { return (OPERATION_NAMES) evalInt("operation", 0, t); } }; void #ifdef SESI_OPENVDB new_SOP_VDBActivate(OP_OperatorTable *table) #else newSopOperator(OP_OperatorTable *table) #endif { if (table == NULL) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Source Group") .setHelpText("Specify a subset of the input VDB grids to be processed.") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("The vdb primitives to change the active region in.") .setDocumentation("The vdb primitives to change the active region in.")); // Match OPERATION const char* operations[] = { "union", "Union", "intersect", "Intersect", "subtract", "A - B", "copy", "Copy", NULL }; parms.add(hutil::ParmFactory(PRM_ORD, "operation", "Operation") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, operations) .setTooltip("The vdb's current region is combined with the specified region in one of several ways.") .setDocumentation( R"(The vdb's current region is combined with the specified region in one of several ways. Union: All voxels that lie in the specified region will be activated. Other voxels will retain their original activation states. Intersect: Any voxel not in the specified region will be deactivated and set to the background value. A - B: Any voxel that is in the specified region will be deactivated and set to the background value. Copy: If a voxel is outside the specified region, it is set to inactive and the background value. If it is inside, it is marked as active.)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setvalue", "Write Value") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDefault(PRMoneDefaults)); parms.add(hutil::ParmFactory(PRM_FLT, "value", "Value") .setDefault(PRMoneDefaults) .setTooltip("In the Union and Copy modes, when voxels are marked active they can also be initialized to a constant value.") .setDocumentation( R"(In the Union and Copy modes, when voxels are marked active they can also be initialized to a constant value. This will be done to all voxels that are made active by the specification - including those that were already active. Thus, the Voxel Coordinats option will have the effect of setting a cube area to a constant value.)")); // Match REGIONTYPE parms.beginExclusiveSwitcher("regiontype", "Region Type"); parms.addFolder("Position"); /* This defines a cube in SOP space. Any voxel that touches this cube will be part of the selected region. */ parms.add(hutil::ParmFactory(PRM_XYZ, "center", "Center") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("This defines a cube in SOP space.") .setDocumentation( R"(This defines a cube in SOP space. Any voxel that touches this cube will be part of the selected region.)")); parms.add(hutil::ParmFactory(PRM_XYZ, "size", "Size") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("This defines a cube in SOP space.") .setDocumentation( R"(This defines a cube in SOP space. Any voxel that touches this cube will be part of the selected region.)")); parms.addFolder("Voxel"); /* Defines minimum and maximum values of a box in voxel-coordinates. This is an inclusive range, so includes the maximum voxel. */ parms.add(hutil::ParmFactory(PRM_XYZ, "min", "Min") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("Defines minimum and maximum values of a box in voxel-coordinates.") .setDocumentation( R"(Defines minimum values of a box in voxel-coordinates. This is an inclusive range, so includes the maximum voxel.)")); parms.add(hutil::ParmFactory(PRM_XYZ, "max", "Max") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("Defines minimum and maximum values of a box in voxel-coordinates.") .setDocumentation( R"(Defines maximum values of a box in voxel-coordinates. This is an inclusive range, so includes the maximum voxel.)")); parms.addFolder("Expand"); /* Expand the active area by the specified number of voxels. Does not support operation or setting of values. */ parms.add(hutil::ParmFactory(PRM_INT, "expand", "Voxels to Expand") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_FREE, -5, PRM_RANGE_FREE, 5) .setTooltip("Expand the active area by the specified number of voxels.") .setDocumentation( R"(Expand the active area by the specified number of voxels. Does not support operation or setting of values.)")); parms.addFolder("Reference"); /* Uses the second input to determine the selected region. */ parms.add(hutil::ParmFactory(PRM_STRING, "boundgroup", "Bound Group") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Which primitives of the second input contribute to the bounding box computation.") .setDocumentation( R"(Which primitives of the second input contribute to the bounding box computation.)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usevdb", "Activate Using VDBs") .setDefault(PRMzeroDefaults) .setTooltip("If turned on, only VDBs are used for activation.") .setDocumentation( R"(If turned on, only VDBs are used for activation. They will activate wherever they themselves are already active. This can be used to transfer the active region from one VDB to another, even if they are not aligned. If turned off, the bounding box of the chosen primitives are used instead and activated as if they were specified as World Positions.)")); parms.addFolder("Deactivate"); /* Any voxels that have the background value will be deactivated. This is useful for cleaning up the result of an operation that may have speculatively activated a large band of voxels, but may not have placed non-background values in all of them. For example, you may have a VDB Activate before a Volume VOP with Expand turned on to ensure you have room to displace the volume. Then when you are done, you can use one with Deactivate to free up the voxels you didn't need to use. */ parms.addFolder("Fill SDF"); /* Any voxels that are inside the SDF will be marked active. If they were previously inactive, they will be set to the negative-background values. Tiles will remain sparse in this process. */ parms.endSwitcher(); // Prune toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "prune", "Prune Tolerance") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("This tolerance is used to detect constant regions and collapse them.") .setDocumentation( R"(After building the VDB grid there may be undetected constant tiles. This tolerance is used to detect constant regions and collapse them. Such areas that are within the background value will also be marked inactive.)")); // Pruning tolerance slider parms.add(hutil::ParmFactory( PRM_FLT_J, "tolerance", "Prune Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip("This tolerance is used to detect constant regions and collapse them.") .setDocumentation( R"(After building the VDB grid there may be undetected constant tiles. This tolerance is used to detect constant regions and collapse them. Such areas that are within the background value will also be marked inactive.)")); hvdb::OpenVDBOpFactory("VDB Activate", SOP_VDBActivate::factory, parms, *table) .addInput("VDBs to Activate") .addOptionalInput("Bounds to Activate") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_VDBActivate::Cache; }) .setDocumentation( R"(#icon: COMMON/openvdb #tags: vdb = OpenVDB Activate = """Activates voxel regions of a VDB for further processing.""" [Include:volume_types] Many volume operations, such as Volume Mix and Volume VOP, only process active voxels in the sparse volume. This can be a problem if you know a certain area in space will evaluate to a non-zero value, but it is inactive in your original volume. The VDB Activate SOP provides tools for manipulating this active region. It can also fill the newly added regions to a constant value, useful for interactively determining what is changing. TIP: To see the current active region, you can use the VDB Visualize SOP and set it to Tree Nodes, Disabled; Active Constant Tiles, Wireframe Box; and Active Voxels, Wireframe Box. @related - [Node:sop/vdb] - [Node:sop/vdbactivatesdf] - [Node:sop/volumevop] - [Node:sop/volumemix] )"); } bool SOP_VDBActivate::updateParmsFlags() { bool has_bounds = (nInputs() > 1); REGIONTYPE_NAMES regiontype = REGIONTYPE(0.0f); OPERATION_NAMES operation = OPERATION(0.0f); bool regionusesvalue = (regiontype != REGIONTYPE_EXPAND) && (regiontype != REGIONTYPE_DEACTIVATE); bool operationusesvalue = (operation == OPERATION_UNION) || (operation == OPERATION_COPY); if (regiontype == REGIONTYPE_FILL) regionusesvalue = false; // Disable the region type switcher int changed = 0; changed += enableParm("boundgroup", has_bounds); changed += enableParm("usevdb", has_bounds); changed += enableParm("operation", regionusesvalue); // Only union supports writing values. changed += enableParm("setvalue", regionusesvalue && operationusesvalue); changed += enableParm("value", regionusesvalue && operationusesvalue && evalInt("setvalue", 0, 0.0)); changed += enableParm("tolerance", (evalInt("prune", 0, 0.0f) != 0)); return changed > 0; } SOP_VDBActivate::SOP_VDBActivate(OP_Network *net, const char *name, OP_Operator *entry) : SOP_NodeVDB(net, name, entry) {} OP_Node * SOP_VDBActivate::factory(OP_Network *net, const char *name, OP_Operator *entry) { return new SOP_VDBActivate(net, name, entry); } UT_BoundingBox SOP_VDBActivate::Cache::getWorldBBox(fpreal t) { UT_Vector3D center = CENTER(t); UT_Vector3D size = SIZE(t); return UT_BoundingBox(center - 0.5*size, center + 0.5*size); } // Get a bounding box around the world space bbox in index space static openvdb::CoordBBox sopSopToIndexBBox(UT_BoundingBoxD sop_bbox, const GEO_PrimVDB &vdb) { UT_Vector3D corners[8]; sop_bbox.getBBoxPoints(corners); openvdb::CoordBBox index_bbox; for (int i=0; i<8; i++) { int x, y, z; vdb.posToIndex(corners[i], x, y, z); openvdb::Coord coord(x,y,z); if (i == 0) index_bbox = openvdb::CoordBBox(coord, coord); else index_bbox.expand(openvdb::Coord(x, y, z)); } return index_bbox; } template <typename GridType> void sopDoPrune(GridType &grid, bool doprune, double tolerance) { typedef typename GridType::ValueType ValueT; // No matter what, axe inactive voxels. openvdb::tools::pruneInactive(grid.tree()); // Optionally prune live tiles if (doprune) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const auto value = openvdb::zeroVal<ValueT>() + tolerance; OPENVDB_NO_TYPE_CONVERSION_WARNING_END grid.tree().prune(static_cast<ValueT>(value)); } } // The result of the union of active regions goes into grid_a template <typename GridType> static void sopDeactivate(GridType &grid, int dummy) { typename GridType::Accessor access = grid.getAccessor(); typedef typename GridType::ValueType ValueT; ValueT background = grid.background(); ValueT value; UT_Interrupt *boss = UTgetInterrupt(); for (typename GridType::ValueOnCIter iter = grid.cbeginValueOn(); iter; ++iter) { if (boss->opInterrupt()) break; openvdb::CoordBBox bbox = iter.getBoundingBox(); for (int k=bbox.min().z(); k<=bbox.max().z(); k++) { for (int j=bbox.min().y(); j<=bbox.max().y(); j++) { for (int i=bbox.min().x(); i<=bbox.max().x(); i++) { openvdb::Coord coord(i, j, k); // If it is on... if (access.probeValue(coord, value)) { if (value == background) { access.setValueOff(coord); } } } } } } } template <typename GridType> static void sopFillSDF(GridType &grid, int dummy) { typename GridType::Accessor access = grid.getAccessor(); typedef typename GridType::ValueType ValueT; ValueT value; UT_Interrupt *boss = UTgetInterrupt(); ValueT background = grid.background(); for (typename GridType::ValueOffCIter iter = grid.cbeginValueOff(); iter; ++iter) { if (boss->opInterrupt()) break; openvdb::CoordBBox bbox = iter.getBoundingBox(); // Assuming the SDF is at all well-formed, any crossing // of sign must have a crossing of inactive->active. openvdb::Coord coord(bbox.min().x(), bbox.min().y(), bbox.min().z()); // We do not care about the active state as it is hopefully inactive access.probeValue(coord, value); if (value < 0) { // Fill the region to negative background. grid.fill(bbox, -background, /*active=*/true); } } } template <typename GridType> static void sopDilateVoxels(GridType& grid, int count) { openvdb::tools::dilateVoxels(grid.tree(), count); } template <typename GridType> static void sopErodeVoxels(GridType& grid, int count) { openvdb::tools::erodeVoxels(grid.tree(), count); } // Based on mode the parameters imply, get an index space bounds for this vdb openvdb::CoordBBox SOP_VDBActivate::Cache::getIndexSpaceBounds(OP_Context &context, const GEO_PrimVDB &vdb) { fpreal t = context.getTime(); using namespace openvdb; CoordBBox index_bbox; // Get the bbox switch(REGIONTYPE(t)) { case REGIONTYPE_POSITION: // world index_bbox = sopSopToIndexBBox(getWorldBBox(t), vdb); break; case REGIONTYPE_VOXEL: // index index_bbox = CoordBBox(MINPOS(t), MAXPOS(t)); break; default: UT_ASSERT("Invalid region type" == nullptr); break; } return index_bbox; } GEO_PrimVDB::ActivateOperation sopXlateOperation(OPERATION_NAMES operation) { switch (operation) { case OPERATION_UNION: return GEO_PrimVDB::ACTIVATE_UNION; case OPERATION_INTERSECT: return GEO_PrimVDB::ACTIVATE_INTERSECT; case OPERATION_SUBTRACT: return GEO_PrimVDB::ACTIVATE_SUBTRACT; case OPERATION_COPY: return GEO_PrimVDB::ACTIVATE_COPY; } UT_ASSERT("Unhandled operation" == nullptr); return GEO_PrimVDB::ACTIVATE_UNION; } OP_ERROR SOP_VDBActivate::Cache::cookVDBSop(OP_Context &context) { using namespace openvdb; using namespace openvdb::math; try { fpreal t = context.getTime(); UT_Interrupt *boss = UTgetInterrupt(); // Get the group UT_String group_name; evalString(group_name, "group", 0, t); const GA_PrimitiveGroup* group = 0; if (group_name.isstring()) { bool success; group = gop.parseOrderedPrimitiveDetached((const char *) group_name, gdp, false, success); } // A group was specified but not found if (!group && group_name.isstring()) { addError(SOP_ERR_BADGROUP, group_name); return error(); } UT_AutoInterrupt progress("Activating VDB grids"); // For each primitive in the group, go through the primitives in the // second input's group and GEO_Primitive *prim; GA_FOR_ALL_GROUP_PRIMITIVES(gdp, group, prim) { if (!(prim->getPrimitiveId() & GEO_PrimTypeCompat::GEOPRIMVDB)) break; GEO_PrimVDB *vdb = UTverify_cast<GEO_PrimVDB *>(prim); vdb->makeGridUnique(); // Apply the operation for all VDB primitives on input 2 const GU_Detail *bounds_src = inputGeo(1, context); switch (REGIONTYPE(t)) { case REGIONTYPE_REFERENCE: // Second input! { if (bounds_src) { UT_String boundgroupname; evalString(boundgroupname, "boundgroup", 0, t); const GA_PrimitiveGroup *boundgroup = 0; if (boundgroupname.isstring()) { bool success; boundgroup = gop.parseOrderedPrimitiveDetached((const char *) boundgroupname, bounds_src, true, success); if (!success) addWarning(SOP_ERR_BADGROUP, boundgroupname); } if (evalInt("usevdb", 0, t)) { bool foundvdb = false; const GEO_Primitive *input_prim; GA_FOR_ALL_GROUP_PRIMITIVES(bounds_src, boundgroup, input_prim) { if (!(input_prim->getPrimitiveId() & GEO_PrimTypeCompat::GEOPRIMVDB)) break; const GEO_PrimVDB *input_vdb = UTverify_cast<const GEO_PrimVDB *>(input_prim); vdb->activateByVDB(input_vdb, sopXlateOperation(OPERATION(t)), evalInt("setvalue", 0, t), evalFloat("value", 0, t)); foundvdb = true; } if (!foundvdb) { addWarning(SOP_MESSAGE, "No VDB primitives found in second input"); } } else { // Activate by bounding box. UT_BoundingBox bbox; bounds_src->getBBox(&bbox, boundgroup); vdb->activateIndexBBox(sopSopToIndexBBox(bbox, *vdb), sopXlateOperation(OPERATION(t)), evalInt("setvalue", 0, t), evalFloat("value", 0, t)); } } else { addError(SOP_MESSAGE, "Not enough inputs."); } break; } case REGIONTYPE_POSITION: // World space case REGIONTYPE_VOXEL: // Coord Space { vdb->activateIndexBBox(getIndexSpaceBounds(context, *vdb), sopXlateOperation(OPERATION(t)), evalInt("setvalue", 0, t), evalFloat("value", 0, t)); break; } case REGIONTYPE_EXPAND: // Dilate { int dilation = static_cast<int>(evalInt("expand", 0, t)); if (dilation > 0) { if (boss->opInterrupt()) break; UTvdbCallAllTopology(vdb->getStorageType(), sopDilateVoxels, vdb->getGrid(), dilation); } if (dilation < 0) { if (boss->opInterrupt()) break; UTvdbCallAllTopology(vdb->getStorageType(), sopErodeVoxels, vdb->getGrid(), -dilation); } break; } case REGIONTYPE_DEACTIVATE: // Deactivate { if (boss->opInterrupt()) break; UTvdbCallAllTopology(vdb->getStorageType(), sopDeactivate, vdb->getGrid(), 1); break; } case REGIONTYPE_FILL: // Fill interior of SDF. { if (boss->opInterrupt()) break; UTvdbCallRealType(vdb->getStorageType(), sopFillSDF, vdb->getGrid(), 1); break; } } UTvdbCallAllTopology(vdb->getStorageType(), sopDoPrune, vdb->getGrid(), evalInt("prune", 0, t), evalFloat("tolerance", 0, t)); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } const char * SOP_VDBActivate::inputLabel(unsigned index) const { switch (index) { case 0: return "VDBs to activate"; case 1: return "Region to activate"; } return NULL; } int SOP_VDBActivate::isRefInput(unsigned i) const { switch (i) { case 0: return false; case 1: return true; default: return true; } }
25,557
C++
33.168449
168
0.57722
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/geometry.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file geometry.cc /// @author FX R&D OpenVDB team #include "geometry.h" #include <GU/GU_PrimPoly.h> namespace houdini_utils { void createBox(GU_Detail& gdp, UT_Vector3 corners[8], const UT_Vector3* color, bool shaded, float alpha) { // Create points GA_Offset ptoff[8]; for (size_t i = 0; i < 8; ++i) { ptoff[i] = gdp.appendPointOffset(); gdp.setPos3(ptoff[i], corners[i].x(), corners[i].y(), corners[i].z()); } if (color != NULL) { GA_RWHandleV3 cd(gdp.addDiffuseAttribute(GA_ATTRIB_POINT)); for (size_t i = 0; i < 8; ++i) { cd.set(ptoff[i], *color); } } if (alpha < 0.99) { GA_RWHandleF A(gdp.addAlphaAttribute(GA_ATTRIB_POINT)); for (size_t i = 0; i < 8; ++i) { A.set(ptoff[i], alpha); } } GEO_PrimPoly *poly; if (shaded) { // Bottom poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[0]); poly->appendVertex(ptoff[1]); poly->appendVertex(ptoff[2]); poly->appendVertex(ptoff[3]); poly->close(); // Top poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[7]); poly->appendVertex(ptoff[6]); poly->appendVertex(ptoff[5]); poly->appendVertex(ptoff[4]); poly->close(); // Front poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[4]); poly->appendVertex(ptoff[5]); poly->appendVertex(ptoff[1]); poly->appendVertex(ptoff[0]); poly->close(); // Back poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[6]); poly->appendVertex(ptoff[7]); poly->appendVertex(ptoff[3]); poly->appendVertex(ptoff[2]); poly->close(); // Left poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[0]); poly->appendVertex(ptoff[3]); poly->appendVertex(ptoff[7]); poly->appendVertex(ptoff[4]); poly->close(); // Right poly = GU_PrimPoly::build(&gdp, 0); poly->appendVertex(ptoff[1]); poly->appendVertex(ptoff[5]); poly->appendVertex(ptoff[6]); poly->appendVertex(ptoff[2]); poly->close(); } else { // 12 Edges as one line poly = GU_PrimPoly::build(&gdp, 0, GU_POLY_OPEN); poly->appendVertex(ptoff[0]); poly->appendVertex(ptoff[1]); poly->appendVertex(ptoff[2]); poly->appendVertex(ptoff[3]); poly->appendVertex(ptoff[0]); poly->appendVertex(ptoff[4]); poly->appendVertex(ptoff[5]); poly->appendVertex(ptoff[6]); poly->appendVertex(ptoff[7]); poly->appendVertex(ptoff[4]); poly->appendVertex(ptoff[5]); poly->appendVertex(ptoff[1]); poly->appendVertex(ptoff[2]); poly->appendVertex(ptoff[6]); poly->appendVertex(ptoff[7]); poly->appendVertex(ptoff[3]); } } // createBox } // namespace houdini_utils
3,141
C++
26.805309
78
0.548551
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Fill.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Fill.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <UT/UT_UniquePtr.h> #include <memory> #include <stdexcept> #include <string> #include <type_traits> namespace hutil = houdini_utils; namespace hvdb = openvdb_houdini; class SOP_OpenVDB_Fill: public hvdb::SOP_NodeVDB { public: enum Mode { MODE_INDEX = 0, MODE_WORLD, MODE_GEOM }; SOP_OpenVDB_Fill(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Fill() override; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 1); } static Mode getMode(const std::string& modeStr) { if (modeStr == "index") return MODE_INDEX; if (modeStr == "world") return MODE_WORLD; if (modeStr == "geom") return MODE_GEOM; throw std::runtime_error{"unrecognized mode \"" + modeStr + "\""}; } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be processed.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "mode", "Bounds") .setDefault("index") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "index", "Min and Max in Index Space", "world", "Min and Max in World Space", "geom", "Reference Geometry" }) .setTooltip( "Index Space:\n" " Interpret the given min and max coordinates in index-space units.\n" "World Space:\n" " Interpret the given min and max coordinates in world-space units.\n" "Reference Geometry:\n" " Use the world-space bounds of the reference input geometry.") .setDocumentation( "How to specify the bounding box to be filled\n\n" "Index Space:\n" " Interpret the given min and max coordinates in" " [index-space|http://www.openvdb.org/documentation/doxygen/overview.html#subsecVoxSpace] units.\n" "World Space:\n" " Interpret the given min and max coordinates in" " [world-space|http://www.openvdb.org/documentation/doxygen/overview.html#subsecWorSpace] units.\n" "Reference Geometry:\n" " Use the world-space bounds of the reference input geometry.\n")); parms.add(hutil::ParmFactory(PRM_INT_XYZ, "min", "Min Coord") .setVectorSize(3) .setTooltip("The minimum coordinate of the bounding box to be filled")); parms.add(hutil::ParmFactory(PRM_INT_XYZ, "max", "Max Coord") .setVectorSize(3) .setTooltip("The maximum coordinate of the bounding box to be filled")); parms.add(hutil::ParmFactory(PRM_XYZ, "worldmin", "Min Coord") .setVectorSize(3) .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_XYZ, "worldmax", "Max Coord") .setVectorSize(3) .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_XYZ, "val", "Value").setVectorSize(3) .setTypeExtended(PRM_TYPE_JOIN_PAIR) .setTooltip( "The value with which to fill voxels\n" "(y and z are ignored when filling scalar grids)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "active", "Active") .setDefault(PRMoneDefaults) .setTooltip("If enabled, activate voxels in the fill region, otherwise deactivate them.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "sparse", "Sparse") .setDefault(PRMoneDefaults) .setTooltip("If enabled, represent the filled region sparsely (if possible).")); hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "value", "Value")); hvdb::OpenVDBOpFactory("VDB Fill", SOP_OpenVDB_Fill::factory, parms, *table) .setNativeName("") .setObsoleteParms(obsoleteParms) .addInput("Input with VDB grids to operate on") .addOptionalInput("Optional bounding geometry") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Fill::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Fill and activate/deactivate regions of voxels within a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node sets all voxels within an axis-aligned bounding box of a VDB volume\n\ to a given value and active state.\n\ By default, the operation uses a sparse voxel representation to reduce\n\ the memory footprint of the output volume.\n\ \n\ @related\n\ - [Node:sop/vdbactivate]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Fill::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; PRM_Parm* parm = obsoleteParms->getParmPtr("value"); if (parm && !parm->isFactoryDefault()) { // Transfer the scalar value of the obsolete parameter "value" // to the new, vector-valued parameter "val". const fpreal val = obsoleteParms->evalFloat("value", 0, /*time=*/0.0); setFloat("val", 0, 0.0, val); setFloat("val", 1, 0.0, val); setFloat("val", 2, 0.0, val); } // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_Fill::updateParmsFlags() { bool changed = false; const fpreal time = 0; //int refExists = (nInputs() == 2); Mode mode = MODE_INDEX; try { mode = getMode(evalStdString("mode", time)); } catch (std::runtime_error&) {} switch (mode) { case MODE_INDEX: changed |= enableParm("min", true); changed |= enableParm("max", true); changed |= setVisibleState("min", true); changed |= setVisibleState("max", true); changed |= setVisibleState("worldmin", false); changed |= setVisibleState("worldmax", false); break; case MODE_WORLD: changed |= enableParm("worldmin", true); changed |= enableParm("worldmax", true); changed |= setVisibleState("min", false); changed |= setVisibleState("max", false); changed |= setVisibleState("worldmin", true); changed |= setVisibleState("worldmax", true); break; case MODE_GEOM: changed |= enableParm("min", false); changed |= enableParm("max", false); changed |= enableParm("worldmin", false); changed |= enableParm("worldmax", false); break; } return changed; } OP_Node* SOP_OpenVDB_Fill::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Fill(net, name, op); } SOP_OpenVDB_Fill::SOP_OpenVDB_Fill(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } SOP_OpenVDB_Fill::~SOP_OpenVDB_Fill() { } namespace { // Convert a Vec3 value to a vector of another value type or to a scalar value // Overload for scalar types (discards all but the first vector component) template<typename ValueType> inline typename std::enable_if<!openvdb::VecTraits<ValueType>::IsVec, ValueType>::type convertValue(const openvdb::Vec3R& val) { return ValueType(val[0]); } // Overload for Vec2 types (not currently used) template<typename ValueType> inline typename std::enable_if<openvdb::VecTraits<ValueType>::IsVec && openvdb::VecTraits<ValueType>::Size == 2, ValueType>::type convertValue(const openvdb::Vec3R& val) { using ElemType = typename openvdb::VecTraits<ValueType>::ElementType; return ValueType(ElemType(val[0]), ElemType(val[1])); } // Overload for Vec3 types template<typename ValueType> inline typename std::enable_if<openvdb::VecTraits<ValueType>::IsVec && openvdb::VecTraits<ValueType>::Size == 3, ValueType>::type convertValue(const openvdb::Vec3R& val) { using ElemType = typename openvdb::VecTraits<ValueType>::ElementType; return ValueType(ElemType(val[0]), ElemType(val[1]), ElemType(val[2])); } // Overload for Vec4 types (not currently used) template<typename ValueType> inline typename std::enable_if<openvdb::VecTraits<ValueType>::IsVec && openvdb::VecTraits<ValueType>::Size == 4, ValueType>::type convertValue(const openvdb::Vec3R& val) { using ElemType = typename openvdb::VecTraits<ValueType>::ElementType; return ValueType(ElemType(val[0]), ElemType(val[1]), ElemType(val[2]), ElemType(1.0)); } //////////////////////////////////////// struct FillOp { const openvdb::CoordBBox indexBBox; const openvdb::BBoxd worldBBox; const openvdb::Vec3R value; const bool active, sparse; FillOp(const openvdb::CoordBBox& b, const openvdb::Vec3R& val, bool on, bool sparse_): indexBBox(b), value(val), active(on), sparse(sparse_) {} FillOp(const openvdb::BBoxd& b, const openvdb::Vec3R& val, bool on, bool sparse_): worldBBox(b), value(val), active(on), sparse(sparse_) {} template<typename GridT> void operator()(GridT& grid) const { openvdb::CoordBBox bbox = indexBBox; if (worldBBox) { openvdb::math::Vec3d imin, imax; openvdb::math::calculateBounds(grid.constTransform(), worldBBox.min(), worldBBox.max(), imin, imax); bbox.reset(openvdb::Coord::floor(imin), openvdb::Coord::ceil(imax)); } using ValueT = typename GridT::ValueType; if (sparse) { grid.sparseFill(bbox, convertValue<ValueT>(value), active); } else { grid.denseFill(bbox, convertValue<ValueT>(value), active); } } }; } // unnamed namespace OP_ERROR SOP_OpenVDB_Fill::Cache::cookVDBSop(OP_Context& context) { try { const fpreal t = context.getTime(); const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", t)); const openvdb::Vec3R value = evalVec3R("val", t); const bool active = evalInt("active", 0, t), sparse = evalInt("sparse", 0, t); UT_UniquePtr<const FillOp> fillOp; switch (SOP_OpenVDB_Fill::getMode(evalStdString("mode", t))) { case MODE_INDEX: { const openvdb::CoordBBox bbox( openvdb::Coord( static_cast<openvdb::Int32>(evalInt("min", 0, t)), static_cast<openvdb::Int32>(evalInt("min", 1, t)), static_cast<openvdb::Int32>(evalInt("min", 2, t))), openvdb::Coord( static_cast<openvdb::Int32>(evalInt("max", 0, t)), static_cast<openvdb::Int32>(evalInt("max", 1, t)), static_cast<openvdb::Int32>(evalInt("max", 2, t)))); fillOp.reset(new FillOp(bbox, value, active, sparse)); break; } case MODE_WORLD: { const openvdb::BBoxd bbox( openvdb::BBoxd::ValueType( evalFloat("worldmin", 0, t), evalFloat("worldmin", 1, t), evalFloat("worldmin", 2, t)), openvdb::BBoxd::ValueType( evalFloat("worldmax", 0, t), evalFloat("worldmax", 1, t), evalFloat("worldmax", 2, t))); fillOp.reset(new FillOp(bbox, value, active, sparse)); break; } case MODE_GEOM: { openvdb::BBoxd bbox; if (const GU_Detail* refGeo = inputGeo(1)) { UT_BoundingBox b; refGeo->getBBox(&b); if (!b.isValid()) { throw std::runtime_error("no reference geometry found"); } bbox.min()[0] = b.xmin(); bbox.min()[1] = b.ymin(); bbox.min()[2] = b.zmin(); bbox.max()[0] = b.xmax(); bbox.max()[1] = b.ymax(); bbox.max()[2] = b.zmax(); } else { throw std::runtime_error("reference input is unconnected"); } fillOp.reset(new FillOp(bbox, value, active, sparse)); break; } } UT_AutoInterrupt progress("Filling VDB grids"); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, *fillOp); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
13,453
C++
32.80402
99
0.60113
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Filter.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Filter.cc /// /// @author FX R&D OpenVDB team /// /// @brief Filtering operations for non-level-set grids #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/Filter.h> #include <OP/OP_AutoLockInputs.h> #include <UT/UT_Interrupt.h> #include <algorithm> #include <iostream> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { // Operations should be numbered sequentially starting from 0. // When adding an item to the end of this list, be sure to update NUM_OPERATIONS. enum Operation { OP_MEAN = 0, OP_GAUSS, OP_MEDIAN, #ifndef SESI_OPENVDB OP_OFFSET, #endif NUM_OPERATIONS }; inline Operation intToOp(int i) { switch (i) { #ifndef SESI_OPENVDB case OP_OFFSET: return OP_OFFSET; #endif case OP_MEAN: return OP_MEAN; case OP_GAUSS: return OP_GAUSS; case OP_MEDIAN: return OP_MEDIAN; case NUM_OPERATIONS: break; } throw std::runtime_error{"unknown operation (" + std::to_string(i) + ")"}; } inline Operation stringToOp(const std::string& s) { if (s == "mean") return OP_MEAN; if (s == "gauss") return OP_GAUSS; if (s == "median") return OP_MEDIAN; #ifndef SESI_OPENVDB if (s == "offset") return OP_OFFSET; #endif throw std::runtime_error{"unknown operation \"" + s + "\""}; } inline std::string opToString(Operation op) { switch (op) { #ifndef SESI_OPENVDB case OP_OFFSET: return "offset"; #endif case OP_MEAN: return "mean"; case OP_GAUSS: return "gauss"; case OP_MEDIAN: return "median"; case NUM_OPERATIONS: break; } throw std::runtime_error{"unknown operation (" + std::to_string(int(op)) + ")"}; } inline std::string opToMenuName(Operation op) { switch (op) { #ifndef SESI_OPENVDB case OP_OFFSET: return "Offset"; #endif case OP_MEAN: return "Mean Value"; case OP_GAUSS: return "Gaussian"; case OP_MEDIAN: return "Median Value"; case NUM_OPERATIONS: break; } throw std::runtime_error{"unknown operation (" + std::to_string(int(op)) + ")"}; } struct FilterParms { FilterParms(Operation _op): op(_op) {} Operation op; int iterations = 1; int radius = 1; float worldRadius = 0.1f; float minMask = 0.0f; float maxMask = 0.0f; bool invertMask = false; bool useWorldRadius = false; const openvdb::FloatGrid* mask = nullptr; #ifndef SESI_OPENVDB float offset = 0.0f; bool verbose = false; #endif }; using FilterParmVec = std::vector<FilterParms>; } // anonymous namespace //////////////////////////////////////// class SOP_OpenVDB_Filter: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Filter(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Filter() override = default; static void registerSop(OP_OperatorTable*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 1); } protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; public: class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; OP_ERROR evalFilterParms(OP_Context&, GU_Detail&, FilterParmVec&); }; // class Cache private: struct FilterOp; }; //////////////////////////////////////// OP_Node* SOP_OpenVDB_Filter::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Filter(net, name, op); } SOP_OpenVDB_Filter::SOP_OpenVDB_Filter(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } void newSopOperator(OP_OperatorTable* table) { SOP_OpenVDB_Filter::registerSop(table); } void SOP_OpenVDB_Filter::registerSop(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Input group parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "mask", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "Alpha Mask") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Optional scalar VDB used for alpha masking\n\n" "Values are assumed to be between 0 and 1.")); // Menu of operations { std::vector<std::string> items; for (int i = 0; i < NUM_OPERATIONS; ++i) { const Operation op = intToOp(i); items.push_back(opToString(op)); // token items.push_back(opToMenuName(op)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "operation", "Operation") .setDefault(opToString(OP_MEAN)) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("The operation to be applied to input volumes") .setDocumentation("\ The operation to be applied to input volumes\n\n\ Gaussian:\n\ Set the value of each active voxel to a Gaussian-weighted sum\n\ over the voxel's neighborhood.\n\n\ This is equivalent to a Gaussian blur.\n\ Mean Value:\n\ Set the value of each active voxel to the average value over\n\ the voxel's neighborhood.\n\n\ One iteration is equivalent to a box blur. For a cone blur,\n\ multiply the radius by 0.454545 and use two iterations.\n\ Median Value:\n\ Set the value of each active voxel to the median value over\n\ the voxel's neighborhood.\n\n\ This is useful for suppressing outlier values.\n\ Offset:\n\ Add a given offset to each active voxel's value.\n\ ")); } // Filter radius parms.add(hutil::ParmFactory(PRM_TOGGLE, "worldunits", "Use World Space Radius Units") .setTooltip( "If enabled, specify the filter neighborhood size in world units,\n" "otherwise specify the size in voxels.")); parms.add(hutil::ParmFactory(PRM_INT_J, "radius", "Filter Voxel Radius") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 5) .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_FLT_J, "worldradius", "Filter Radius") .setDefault(0.1) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("Half the width of a side of the cubic filter neighborhood")); // Number of iterations parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(PRMfourDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("The number of times to apply the operation")); #ifndef SESI_OPENVDB // Offset parms.add(hutil::ParmFactory(PRM_FLT_J, "offset", "Offset") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, -10.0, PRM_RANGE_UI, 10.0) .setTooltip("When the operation is Offset, add this value to all active voxels.")); #endif //Invert mask. parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert Alpha Mask") .setTooltip("Invert the mask so that alpha value 0 maps to 1 and 1 to 0.")); // Min mask range parms.add(hutil::ParmFactory(PRM_FLT_J, "minmask", "Min Mask Cutoff") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold below which mask values are clamped to zero")); // Max mask range parms.add(hutil::ParmFactory(PRM_FLT_J, "maxmask", "Max Mask Cutoff") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold above which mask values are clamped to one")); #ifndef SESI_OPENVDB // Verbosity toggle. parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setTooltip("Print the sequence of operations to the terminal.")); #endif // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR,"sep1", "")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "minMask", "Min Mask Cutoff") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxMask", "Max Mask Cutoff") .setDefault(PRMoneDefaults)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Smooth", SOP_OpenVDB_Filter::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBFilter") .addAliasVerbatim("DW_OpenVDBSmooth") #endif .setObsoleteParms(obsoleteParms) .addInput("VDBs to Smooth") .addOptionalInput("Optional VDB Alpha Mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Filter::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Filters/smooths the values in a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node assigns to each active voxel in a VDB volume a value,\n\ such as the mean or median, that is representative of the voxel's neighborhood,\n\ where the neighborhood is a cube centered on the voxel.\n\ This has the effect of reducing high-frequency content and suppressing noise.\n\ \n\ If the optional scalar mask volume is provided, the output value of\n\ each voxel is a linear blend between its input value and the neighborhood value.\n\ A mask value of zero leaves the input value unchanged.\n\ \n\ NOTE:\n\ To filter a level set, use the\n\ [OpenVDB Smooth Level Set|Node:sop/DW_OpenVDBSmoothLevelSet] node.\n\ \n\ @related\n\ - [OpenVDB Noise|Node:sop/DW_OpenVDBNoise]\n\ - [OpenVDB Smooth Level Set|Node:sop/DW_OpenVDBSmoothLevelSet]\n\ - [Node:sop/vdbsmooth]\n\ - [Node:sop/vdbsmoothsdf]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Filter::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "minMask", "minmask"); resolveRenamedParm(*obsoleteParms, "maxMask", "maxmask"); hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Disable UI Parms. bool SOP_OpenVDB_Filter::updateParmsFlags() { bool changed = false, hasMask = (this->nInputs() == 2); changed |= enableParm("mask", hasMask); bool useMask = bool(evalInt("mask", 0, 0)) && hasMask; changed |= enableParm("invert", useMask); changed |= enableParm("minmask", useMask); changed |= enableParm("maxmask", useMask); changed |= enableParm("maskname", useMask); const bool worldUnits = bool(evalInt("worldunits", 0, 0)); Operation op = OP_MEAN; bool gotOp = false; try { op = stringToOp(evalStdString("operation", 0)); gotOp = true; } catch (std::runtime_error&) {} // Disable and hide unused parameters. if (gotOp) { bool enable = (op == OP_MEAN || op == OP_GAUSS || op == OP_MEDIAN); changed |= enableParm("iterations", enable); changed |= enableParm("radius", enable); changed |= enableParm("worldradius", enable); changed |= setVisibleState("iterations", enable); changed |= setVisibleState("worldunits", enable); changed |= setVisibleState("radius", enable && !worldUnits); changed |= setVisibleState("worldradius", enable && worldUnits); #ifndef SESI_OPENVDB enable = (op == OP_OFFSET); changed |= enableParm("offset", enable); changed |= setVisibleState("offset", enable); #endif } return changed; } //////////////////////////////////////// // Helper class for use with GridBase::apply() struct SOP_OpenVDB_Filter::FilterOp { FilterParmVec opSequence; hvdb::Interrupter* interrupt; template<typename GridT> void operator()(GridT& grid) { using ValueT = typename GridT::ValueType; using MaskT = openvdb::FloatGrid; openvdb::tools::Filter<GridT, MaskT, hvdb::Interrupter> filter(grid, interrupt); for (size_t i = 0, N = opSequence.size(); i < N; ++i) { if (interrupt && interrupt->wasInterrupted()) return; const FilterParms& parms = opSequence[i]; int radius = parms.radius; if (parms.useWorldRadius) { double voxelRadius = double(parms.worldRadius) / grid.voxelSize()[0]; radius = std::max(1, int(voxelRadius)); } filter.setMaskRange(parms.minMask, parms.maxMask); filter.invertMask(parms.invertMask); switch (parms.op) { #ifndef SESI_OPENVDB case OP_OFFSET: { const ValueT offset = static_cast<ValueT>(parms.offset); if (parms.verbose) std::cout << "Applying Offset by " << offset << std::endl; filter.offset(offset, parms.mask); } break; #endif case OP_MEAN: #ifndef SESI_OPENVDB if (parms.verbose) { std::cout << "Applying " << parms.iterations << " iterations of mean value" " filtering with a radius of " << radius << std::endl; } #endif filter.mean(radius, parms.iterations, parms.mask); break; case OP_GAUSS: #ifndef SESI_OPENVDB if (parms.verbose) { std::cout << "Applying " << parms.iterations << " iterations of gaussian" " filtering with a radius of " <<radius << std::endl; } #endif filter.gaussian(radius, parms.iterations, parms.mask); break; case OP_MEDIAN: #ifndef SESI_OPENVDB if (parms.verbose) { std::cout << "Applying " << parms.iterations << " iterations of median value" " filtering with a radius of " << radius << std::endl; } #endif filter.median(radius, parms.iterations, parms.mask); break; case NUM_OPERATIONS: break; } } } }; //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Filter::Cache::evalFilterParms( OP_Context& context, GU_Detail&, FilterParmVec& parmVec) { const fpreal now = context.getTime(); const Operation op = stringToOp(evalStdString("operation", 0)); FilterParms parms(op); parms.radius = static_cast<int>(evalInt("radius", 0, now)); parms.worldRadius = float(evalFloat("worldradius", 0, now)); parms.useWorldRadius = bool(evalInt("worldunits", 0, now)); parms.iterations = static_cast<int>(evalInt("iterations", 0, now)); #ifndef SESI_OPENVDB parms.offset = static_cast<float>(evalFloat("offset", 0, now)); parms.verbose = bool(evalInt("verbose", 0, now)); #endif openvdb::FloatGrid::ConstPtr maskGrid; if (this->nInputs() == 2 && evalInt("mask", 0, now)) { const GU_Detail* maskGeo = inputGeo(1); const auto maskName = evalStdString("maskname", now); if (maskGeo) { const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups(maskName.c_str(), GroupCreator(maskGeo)); if (!maskGroup && !maskName.empty()) { addWarning(SOP_MESSAGE, "Mask not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { if (maskIt->getStorageType() == UT_VDB_FLOAT) { maskGrid = openvdb::gridConstPtrCast<openvdb::FloatGrid>( maskIt->getGridPtr()); } else { addWarning(SOP_MESSAGE, "The mask grid has to be a FloatGrid."); } } else { addWarning(SOP_MESSAGE, "The mask input is empty."); } } } } parms.mask = maskGrid.get(); parms.minMask = static_cast<float>(evalFloat("minmask", 0, now)); parms.maxMask = static_cast<float>(evalFloat("maxmask", 0, now)); parms.invertMask = evalInt("invert", 0, now); parmVec.push_back(parms); return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Filter::Cache::cookVDBSop(OP_Context& context) { try { const fpreal now = context.getTime(); FilterOp filterOp; evalFilterParms(context, *gdp, filterOp.opSequence); // Get the group of grids to process. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", now)); hvdb::Interrupter progress("Filtering VDB grids"); filterOp.interrupt = &progress; // Process each VDB primitive in the selected group. for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } GU_PrimVDB* vdbPrim = *it; UT_String name = it.getPrimitiveNameOrIndex(); #ifndef SESI_OPENVDB if (evalInt("verbose", 0, now)) { std::cout << "\nFiltering \"" << name << "\"" << std::endl; } #endif if (!hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(*vdbPrim, filterOp)) { std::stringstream ss; ss << "VDB grid " << name << " of type " << vdbPrim->getConstGrid().valueType() << " was skipped"; addWarning(SOP_MESSAGE, ss.str().c_str()); continue; } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
18,230
C++
30.217466
97
0.614098
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GR_PrimVDBPoints.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file GR_PrimVDBPoints.cc /// /// @author Dan Bailey, Nick Avramoussis /// /// @brief GR Render Hook and Primitive for VDB PointDataGrid #include <UT/UT_Version.h> #include <openvdb/Grid.h> #include <openvdb/Platform.h> #include <openvdb/Types.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <openvdb_houdini/PointUtils.h> #include <DM/DM_RenderTable.h> #include <GEO/GEO_PrimVDB.h> #include <GR/GR_Primitive.h> #include <GR/GR_Utils.h> // inViewFrustum #include <GT/GT_PrimitiveTypes.h> #include <GT/GT_PrimVDB.h> #include <GUI/GUI_PrimitiveHook.h> #include <RE/RE_Geometry.h> #include <RE/RE_Render.h> #include <RE/RE_ShaderHandle.h> #include <RE/RE_VertexArray.h> #include <UT/UT_DSOVersion.h> #include <UT/UT_UniquePtr.h> #include <tbb/mutex.h> #include <iostream> #include <limits> #include <sstream> #include <string> #include <utility> #include <vector> //////////////////////////////////////// static RE_ShaderHandle theMarkerDecorShader("decor/GL32/point_marker.prog"); static RE_ShaderHandle theNormalDecorShader("decor/GL32/point_normal.prog"); static RE_ShaderHandle theVelocityDecorShader("decor/GL32/user_point_vector3.prog"); static RE_ShaderHandle theLineShader("basic/GL32/wire_color.prog"); static RE_ShaderHandle thePixelShader("particle/GL32/pixel.prog"); static RE_ShaderHandle thePointShader("particle/GL32/point.prog"); /// @note An additional scale for velocity trails to accurately match /// the visualization of velocity for Houdini points #define VELOCITY_DECOR_SCALE -0.041f; namespace { /// @note The render hook guard should not be required.. // Declare this at file scope to ensure thread-safe initialization. tbb::mutex sRenderHookRegistryMutex; bool renderHookRegistered = false; } // anonymous namespace using namespace openvdb; using namespace openvdb::points; //////////////////////////////////////// /// Primitive Render Hook for VDB Points class GUI_PrimVDBPointsHook : public GUI_PrimitiveHook { public: GUI_PrimVDBPointsHook() : GUI_PrimitiveHook("DWA VDB Points") { } ~GUI_PrimVDBPointsHook() override = default; /// This is called when a new GR_Primitive is required for a VDB Points primitive. GR_Primitive* createPrimitive( const GT_PrimitiveHandle& gt_prim, const GEO_Primitive* geo_prim, const GR_RenderInfo* info, const char* cache_name, GR_PrimAcceptResult& processed) override; }; // class GUI_PrimVDBPointsHook /// Primitive object that is created by GUI_PrimVDBPointsHook whenever a /// VDB Points primitive is found. This object can be persistent between /// renders, though display flag changes or navigating though SOPs can cause /// it to be deleted and recreated later. class GR_PrimVDBPoints : public GR_Primitive { public: GR_PrimVDBPoints(const GR_RenderInfo *info, const char *cache_name, const GEO_Primitive* geo_prim); ~GR_PrimVDBPoints() override = default; const char *className() const override { return "GR_PrimVDBPoints"; } /// See if the tetra primitive can be consumed by this primitive. GR_PrimAcceptResult acceptPrimitive(GT_PrimitiveType, int geo_type, const GT_PrimitiveHandle&, const GEO_Primitive*) override; /// This should reset any lists of primitives. void resetPrimitives() override {} /// Called whenever the parent detail is changed, draw modes are changed, /// selection is changed, or certain volatile display options are changed /// (such as level of detail). void update(RE_Render*, const GT_PrimitiveHandle&, const GR_UpdateParms&) override; /// return true if the primitive is in or overlaps the view frustum. /// always returning true will effectively disable frustum culling. bool inViewFrustum(const UT_Matrix4D &objviewproj #if (UT_VERSION_INT >= 0x1105014e) // 17.5.334 or later , const UT_BoundingBoxD *bbox #endif ) override; /// Called whenever the primitive is required to render, which may be more /// than one time per viewport redraw (beauty, shadow passes, wireframe-over) /// It also may be called outside of a viewport redraw to do picking of the /// geometry. void render(RE_Render*, GR_RenderMode, GR_RenderFlags, GR_DrawParms) override; int renderPick(RE_Render*, const GR_DisplayOption*, unsigned int, GR_PickStyle, bool) override { return 0; } void renderDecoration(RE_Render*, GR_Decoration, const GR_DecorationParms&) override; protected: void computeCentroid(const openvdb::points::PointDataGrid& grid); void computeBbox(const openvdb::points::PointDataGrid& grid); void updatePosBuffer(RE_Render* r, const openvdb::points::PointDataGrid& grid, const RE_CacheVersion& version); void updateWireBuffer(RE_Render* r, const openvdb::points::PointDataGrid& grid, const RE_CacheVersion& version); bool updateVec3Buffer(RE_Render* r, const openvdb::points::PointDataGrid& grid, const std::string& attributeName, const std::string& bufferName, const RE_CacheVersion& version); bool updateVec3Buffer(RE_Render* r, const std::string& attributeName, const std::string& bufferName, const RE_CacheVersion& version); void removeBuffer(const std::string& name); private: UT_UniquePtr<RE_Geometry> myGeo; UT_UniquePtr<RE_Geometry> myWire; bool mDefaultPointColor = true; openvdb::Vec3f mCentroid{0, 0, 0}; openvdb::BBoxd mBbox; }; //////////////////////////////////////// void newRenderHook(DM_RenderTable* table) { tbb::mutex::scoped_lock lock(sRenderHookRegistryMutex); if (!renderHookRegistered) { static_cast<DM_RenderTable*>(table)->registerGTHook( new GUI_PrimVDBPointsHook(), GT_PRIM_VDB_VOLUME, /*hook_priority=*/1, GUI_HOOK_FLAG_AUGMENT_PRIM); OPENVDB_START_THREADSAFE_STATIC_WRITE renderHookRegistered = true; // mutex-protected OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } static inline bool grIsPointDataGrid(const GT_PrimitiveHandle& gt_prim) { if (gt_prim->getPrimitiveType() != GT_PRIM_VDB_VOLUME) return false; const GT_PrimVDB* gt_vdb = static_cast<const GT_PrimVDB*>(gt_prim.get()); const GEO_PrimVDB* gr_vdb = gt_vdb->getGeoPrimitive(); return (gr_vdb->getStorageType() == UT_VDB_POINTDATA); } GR_Primitive* GUI_PrimVDBPointsHook::createPrimitive( const GT_PrimitiveHandle& gt_prim, const GEO_Primitive* geo_prim, const GR_RenderInfo* info, const char* cache_name, GR_PrimAcceptResult& processed) { if (grIsPointDataGrid(gt_prim)) { processed = GR_PROCESSED; return new GR_PrimVDBPoints(info, cache_name, geo_prim); } processed = GR_NOT_PROCESSED; return nullptr; } //////////////////////////////////////// namespace { using StringPair = std::pair<std::string, std::string>; bool patchShader(RE_Render* r, RE_ShaderHandle& shader, RE_ShaderType type, const std::vector<StringPair>& stringReplacements, const std::vector<std::string>& stringInsertions = {}) { // check if the point shader has already been patched r->pushShader(); r->bindShader(shader); const RE_ShaderStage* const patchedShader = shader->getShader("pointOffset", type); if (patchedShader) { r->popShader(); return false; } // retrieve the shader source and version UT_String source; shader->getShaderSource(r, source, type); const int version = shader->getCodeVersion(); // patch the shader to replace the strings for (const auto& stringPair : stringReplacements) { source.substitute(stringPair.first.c_str(), stringPair.second.c_str(), /*all=*/true); } // patch the shader to insert the strings for (const auto& str: stringInsertions) { source.insert(0, str.c_str()); } // move the version up to the top of the file source.substitute("#version ", "// #version"); std::stringstream ss; ss << "#version " << version << "\n"; source.insert(0, ss.str().c_str()); // remove the existing shader and add the patched one shader->clearShaders(r, type); UT_String message; const bool success = shader->addShader(r, type, source, "pointOffset", version, &message); r->popShader(); if (!success) { if (type == RE_SHADER_VERTEX) std::cerr << "Vertex Shader ("; else if (type == RE_SHADER_GEOMETRY) std::cerr << "Geometry Shader ("; else if (type == RE_SHADER_FRAGMENT) std::cerr << "Fragment Shader ("; std::cerr << shader->getName(); std::cerr << ") Compile Failure: " << message.toStdString() << std::endl; } assert(success); return true; } void patchShaderVertexOffset(RE_Render* r, RE_ShaderHandle& shader) { // patch the shader to add a uniform offset to the position static const std::vector<StringPair> stringReplacements { StringPair("void main()", "uniform vec3 offset;\n\nvoid main()"), StringPair("vec4(P, 1.0)", "vec4(P + offset, 1.0)"), StringPair("vec4(P,1.0)", "vec4(P + offset, 1.0)") }; patchShader(r, shader, RE_SHADER_VERTEX, stringReplacements); } void patchShaderNoRedeclarations(RE_Render* r, RE_ShaderHandle& shader) { static const std::vector<StringPair> stringReplacements { StringPair("\t", " "), StringPair(" ", " "), StringPair(" ", " "), StringPair("uniform vec2 glH_DepthProject;", "//uniform vec2 glH_DepthProject;"), StringPair("uniform vec2 glH_ScreenSize", "//uniform vec2 glH_ScreenSize") }; static const std::vector<std::string> stringInsertions { "uniform vec2 glH_DepthProject;", "uniform vec2 glH_ScreenSize;" }; patchShader(r, shader, RE_SHADER_GEOMETRY, stringReplacements, stringInsertions); } } // namespace //////////////////////////////////////// GR_PrimVDBPoints::GR_PrimVDBPoints( const GR_RenderInfo *info, const char *cache_name, const GEO_Primitive*) : GR_Primitive(info, cache_name, GA_PrimCompat::TypeMask(0)) { } GR_PrimAcceptResult GR_PrimVDBPoints::acceptPrimitive(GT_PrimitiveType, int geo_type, const GT_PrimitiveHandle& gt_prim, const GEO_Primitive*) { if (geo_type == GT_PRIM_VDB_VOLUME && grIsPointDataGrid(gt_prim)) return GR_PROCESSED; return GR_NOT_PROCESSED; } namespace gr_primitive_internal { struct FillGPUBuffersLeafBoxes { FillGPUBuffersLeafBoxes(UT_Vector3H* buffer, const std::vector<openvdb::Coord>& coords, const openvdb::math::Transform& transform, const openvdb::Vec3f& positionOffset) : mBuffer(buffer) , mCoords(coords) , mTransform(transform) , mPositionOffset(positionOffset) { } void operator()(const tbb::blocked_range<size_t>& range) const { std::vector<UT_Vector3H> corners; corners.reserve(8); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const openvdb::Coord& origin = mCoords[n]; // define 8 corners corners.clear(); const openvdb::Vec3f pos000 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(0.0, 0.0, 0.0)) - mPositionOffset; corners.emplace_back(pos000.x(), pos000.y(), pos000.z()); const openvdb::Vec3f pos001 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(0.0, 0.0, 8.0)) - mPositionOffset; corners.emplace_back(pos001.x(), pos001.y(), pos001.z()); const openvdb::Vec3f pos010 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(0.0, 8.0, 0.0)) - mPositionOffset; corners.emplace_back(pos010.x(), pos010.y(), pos010.z()); const openvdb::Vec3f pos011 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(0.0, 8.0, 8.0)) - mPositionOffset; corners.emplace_back(pos011.x(), pos011.y(), pos011.z()); const openvdb::Vec3f pos100 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(8.0, 0.0, 0.0)) - mPositionOffset; corners.emplace_back(pos100.x(), pos100.y(), pos100.z()); const openvdb::Vec3f pos101 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(8.0, 0.0, 8.0)) - mPositionOffset; corners.emplace_back(pos101.x(), pos101.y(), pos101.z()); const openvdb::Vec3f pos110 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(8.0, 8.0, 0.0)) - mPositionOffset; corners.emplace_back(pos110.x(), pos110.y(), pos110.z()); const openvdb::Vec3f pos111 = mTransform.indexToWorld(origin.asVec3d() + openvdb::Vec3f(8.0, 8.0, 8.0)) - mPositionOffset; corners.emplace_back(pos111.x(), pos111.y(), pos111.z()); openvdb::Index64 offset = n*8*3; // Z axis mBuffer[offset++] = corners[0]; mBuffer[offset++] = corners[1]; mBuffer[offset++] = corners[2]; mBuffer[offset++] = corners[3]; mBuffer[offset++] = corners[4]; mBuffer[offset++] = corners[5]; mBuffer[offset++] = corners[6]; mBuffer[offset++] = corners[7]; // Y axis mBuffer[offset++] = corners[0]; mBuffer[offset++] = corners[2]; mBuffer[offset++] = corners[1]; mBuffer[offset++] = corners[3]; mBuffer[offset++] = corners[4]; mBuffer[offset++] = corners[6]; mBuffer[offset++] = corners[5]; mBuffer[offset++] = corners[7]; // X axis mBuffer[offset++] = corners[0]; mBuffer[offset++] = corners[4]; mBuffer[offset++] = corners[1]; mBuffer[offset++] = corners[5]; mBuffer[offset++] = corners[2]; mBuffer[offset++] = corners[6]; mBuffer[offset++] = corners[3]; mBuffer[offset++] = corners[7]; } } ////////// UT_Vector3H* mBuffer; const std::vector<openvdb::Coord>& mCoords; const openvdb::math::Transform& mTransform; const openvdb::Vec3f mPositionOffset; }; // class FillGPUBuffersLeafBoxes } // namespace gr_primitive_internal void GR_PrimVDBPoints::computeCentroid(const openvdb::points::PointDataGrid& grid) { // compute the leaf bounding box in index space openvdb::CoordBBox coordBBox; if (!grid.tree().evalLeafBoundingBox(coordBBox)) { mCentroid.init(0.0f, 0.0f, 0.0f); } else { // get the centroid and convert to world space mCentroid = openvdb::Vec3f(grid.transform().indexToWorld(coordBBox.getCenter())); } } void GR_PrimVDBPoints::computeBbox(const openvdb::points::PointDataGrid& grid) { // compute and store the world-space bounding box of the grid const CoordBBox bbox = grid.evalActiveVoxelBoundingBox(); const BBoxd bboxIndex(bbox.min().asVec3d(), bbox.max().asVec3d()); mBbox = bboxIndex.applyMap(*(grid.transform().baseMap())); } struct PositionAttribute { using ValueType = Vec3f; struct Handle { Handle(PositionAttribute& attribute) : mBuffer(attribute.mBuffer) , mPositionOffset(attribute.mPositionOffset) , mStride(attribute.mStride) { } void set(openvdb::Index offset, openvdb::Index /*stride*/, const ValueType& value) { const ValueType transformedValue = value - mPositionOffset; mBuffer[offset * mStride] = UT_Vector3F(transformedValue.x(), transformedValue.y(), transformedValue.z()); } private: UT_Vector3F* mBuffer; const ValueType& mPositionOffset; const Index mStride; }; // struct Handle PositionAttribute(UT_Vector3F* buffer, const ValueType& positionOffset, Index stride = 1) : mBuffer(buffer) , mPositionOffset(positionOffset) , mStride(stride) { } void expand() { } void compact() { } private: UT_Vector3F* mBuffer; const ValueType mPositionOffset; const Index mStride; }; // struct PositionAttribute template <typename T> struct VectorAttribute { using ValueType = T; struct Handle { Handle(VectorAttribute& attribute) : mBuffer(attribute.mBuffer) { } template <typename ValueType> void set(openvdb::Index offset, openvdb::Index /*stride*/, const openvdb::math::Vec3<ValueType>& value) { mBuffer[offset] = UT_Vector3H(float(value.x()), float(value.y()), float(value.z())); } private: UT_Vector3H* mBuffer; }; // struct Handle VectorAttribute(UT_Vector3H* buffer) : mBuffer(buffer) { } void expand() { } void compact() { } private: UT_Vector3H* mBuffer; }; // struct VectorAttribute void GR_PrimVDBPoints::updatePosBuffer(RE_Render* r, const openvdb::points::PointDataGrid& grid, const RE_CacheVersion& version) { const bool gl3 = (getRenderVersion() >= GR_RENDER_GL3); // Initialize the geometry with the proper name for the GL cache if (!myGeo) myGeo.reset(new RE_Geometry); myGeo->cacheBuffers(getCacheName()); using GridType = openvdb::points::PointDataGrid; using TreeType = GridType::TreeType; using AttributeSet = openvdb::points::AttributeSet; const TreeType& tree = grid.tree(); auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet::Descriptor& descriptor = iter->attributeSet().descriptor(); // check if group viewport is in use const openvdb::StringMetadata::ConstPtr s = grid.getMetadata<openvdb::StringMetadata>(openvdb_houdini::META_GROUP_VIEWPORT); const std::string groupName = s ? s->value() : ""; const bool useGroup = !groupName.empty() && descriptor.hasGroup(groupName); // count up total points ignoring any leaf nodes that are out of core int numPoints = 0; if (useGroup) { GroupFilter filter(groupName, iter->attributeSet()); numPoints = static_cast<int>(pointCount(tree, filter, /*inCoreOnly=*/true)); } else { NullFilter filter; numPoints = static_cast<int>(pointCount(tree, filter, /*inCoreOnly=*/true)); } if (numPoints == 0) return; // Initialize the number of points in the geometry. myGeo->setNumPoints(numPoints); const size_t positionIndex = descriptor.find("P"); // determine whether position exists if (positionIndex == AttributeSet::INVALID_POS) return; // fetch point position attribute, if its cache version matches, no upload is required. RE_VertexArray* posGeo = myGeo->findCachedAttrib(r, "P", RE_GPU_FLOAT32, 3, RE_ARRAY_POINT, true); if (posGeo->getCacheVersion() != version) { std::vector<Name> includeGroups, excludeGroups; if (useGroup) includeGroups.emplace_back(groupName); // @note We've tried using UT_Vector3H here but we get serious aliasing in // leaf nodes which are a small distance away from the origin of the VDB // primitive (~5-6 nodes away) MultiGroupFilter filter(includeGroups, excludeGroups, iter->attributeSet()); std::vector<Index64> offsets; pointOffsets(offsets, grid.tree(), filter, /*inCoreOnly=*/true); UT_UniquePtr<UT_Vector3F[]> pdata(new UT_Vector3F[numPoints]); PositionAttribute positionAttribute(pdata.get(), mCentroid); convertPointDataGridPosition(positionAttribute, grid, offsets, /*startOffset=*/ 0, filter, /*inCoreOnly=*/true); posGeo->setArray(r, pdata.get()); posGeo->setCacheVersion(version); } if (gl3) { // Extra constant inputs for the GL3 default shaders we are using. fpreal32 uv[2] = { 0.0, 0.0 }; fpreal32 alpha = 1.0; fpreal32 pnt = 0.0; UT_Matrix4F instance; instance.identity(); // TODO: point scale !? myGeo->createConstAttribute(r, "uv", RE_GPU_FLOAT32, 2, uv); myGeo->createConstAttribute(r, "Alpha", RE_GPU_FLOAT32, 1, &alpha); myGeo->createConstAttribute(r, "pointSelection", RE_GPU_FLOAT32, 1,&pnt); myGeo->createConstAttribute(r, "instmat", RE_GPU_MATRIX4, 1, instance.data()); } RE_PrimType primType = RE_PRIM_POINTS; myGeo->connectAllPrims(r, RE_GEO_WIRE_IDX, primType, nullptr, true); } void GR_PrimVDBPoints::updateWireBuffer(RE_Render *r, const openvdb::points::PointDataGrid& grid, const RE_CacheVersion& version) { const bool gl3 = (getRenderVersion() >= GR_RENDER_GL3); // Initialize the geometry with the proper name for the GL cache if (!myWire) myWire.reset(new RE_Geometry); myWire->cacheBuffers(getCacheName()); using GridType = openvdb::points::PointDataGrid; using TreeType = GridType::TreeType; using LeafNode = TreeType::LeafNodeType; const TreeType& tree = grid.tree(); if (tree.leafCount() == 0) return; // count up total points ignoring any leaf nodes that are out of core size_t outOfCoreLeaves = 0; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { if (iter->buffer().isOutOfCore()) outOfCoreLeaves++; } if (outOfCoreLeaves == 0) return; // Initialize the number of points for the wireframe box per leaf. int numPoints = static_cast<int>(outOfCoreLeaves*8*3); myWire->setNumPoints(numPoints); // fetch wireframe position, if its cache version matches, no upload is required. RE_VertexArray* posWire = myWire->findCachedAttrib(r, "P", RE_GPU_FLOAT16, 3, RE_ARRAY_POINT, true); if (posWire->getCacheVersion() != version) { using gr_primitive_internal::FillGPUBuffersLeafBoxes; // fill the wire data UT_UniquePtr<UT_Vector3H[]> data(new UT_Vector3H[numPoints]); std::vector<openvdb::Coord> coords; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { const LeafNode& leaf = *iter; // skip in-core leaf nodes (for use when delay loading VDBs) if (!leaf.buffer().isOutOfCore()) continue; coords.push_back(leaf.origin()); } FillGPUBuffersLeafBoxes fill(data.get(), coords, grid.transform(), mCentroid); const tbb::blocked_range<size_t> range(0, coords.size()); tbb::parallel_for(range, fill); posWire->setArray(r, data.get()); posWire->setCacheVersion(version); } if (gl3) { // Extra constant inputs for the GL3 default shaders we are using. fpreal32 uv[2] = { 0.0, 0.0 }; fpreal32 alpha = 1.0; fpreal32 pnt = 0.0; UT_Matrix4F instance; instance.identity(); myWire->createConstAttribute(r, "uv", RE_GPU_FLOAT32, 2, uv); myWire->createConstAttribute(r, "Alpha", RE_GPU_FLOAT32, 1, &alpha); myWire->createConstAttribute(r, "pointSelection", RE_GPU_FLOAT32, 1,&pnt); myWire->createConstAttribute(r, "instmat", RE_GPU_MATRIX4, 1, instance.data()); } myWire->connectAllPrims(r, RE_GEO_WIRE_IDX, RE_PRIM_LINES, nullptr, true); } void GR_PrimVDBPoints::update(RE_Render *r, const GT_PrimitiveHandle &primh, const GR_UpdateParms &p) { // patch the point shaders at run-time to add an offset (does nothing if already patched) patchShaderVertexOffset(r, theLineShader); patchShaderVertexOffset(r, thePixelShader); patchShaderVertexOffset(r, thePointShader); // patch the decor shaders at run-time to add an offset etc (does nothing if already patched) patchShaderVertexOffset(r, theNormalDecorShader); patchShaderVertexOffset(r, theVelocityDecorShader); patchShaderVertexOffset(r, theMarkerDecorShader); patchShaderNoRedeclarations(r, theMarkerDecorShader); // geometry itself changed. GR_GEO_TOPOLOGY changed indicates a large // change, such as changes in the point, primitive or vertex counts // GR_GEO_CHANGED indicates that some attribute data may have changed. if (p.reason & (GR_GEO_CHANGED | GR_GEO_TOPOLOGY_CHANGED)) { const GT_PrimVDB& gt_primVDB = static_cast<const GT_PrimVDB&>(*primh); const openvdb::GridBase* grid = const_cast<GT_PrimVDB&>((static_cast<const GT_PrimVDB&>(gt_primVDB))).getGrid(); const openvdb::points::PointDataGrid& pointDataGrid = static_cast<const openvdb::points::PointDataGrid&>(*grid); computeCentroid(pointDataGrid); computeBbox(pointDataGrid); updatePosBuffer(r, pointDataGrid, p.geo_version); updateWireBuffer(r, pointDataGrid, p.geo_version); mDefaultPointColor = !updateVec3Buffer(r, pointDataGrid, "Cd", "Cd", p.geo_version); } } bool GR_PrimVDBPoints::inViewFrustum(const UT_Matrix4D& objviewproj #if (UT_VERSION_INT >= 0x1105014e) // 17.5.334 or later , const UT_BoundingBoxD *passed_bbox #endif ) { const UT_BoundingBoxD bbox( mBbox.min().x(), mBbox.min().y(), mBbox.min().z(), mBbox.max().x(), mBbox.max().y(), mBbox.max().z()); #if (UT_VERSION_INT >= 0x1105014e) // 17.5.334 or later return GR_Utils::inViewFrustum(passed_bbox ? *passed_bbox :bbox, objviewproj); #else return GR_Utils::inViewFrustum(bbox, objviewproj); #endif } bool GR_PrimVDBPoints::updateVec3Buffer(RE_Render* r, const openvdb::points::PointDataGrid& grid, const std::string& attributeName, const std::string& bufferName, const RE_CacheVersion& version) { // Initialize the geometry with the proper name for the GL cache if (!myGeo) return false; using GridType = openvdb::points::PointDataGrid; using TreeType = GridType::TreeType; using AttributeSet = openvdb::points::AttributeSet; const TreeType& tree = grid.tree(); auto iter = tree.cbeginLeaf(); if (!iter) return false; const int numPoints = myGeo->getNumPoints(); if (numPoints == 0) return false; const AttributeSet::Descriptor& descriptor = iter->attributeSet().descriptor(); const size_t index = descriptor.find(attributeName); // early exit if attribute does not exist if (index == AttributeSet::INVALID_POS) return false; // fetch vector attribute, if its cache version matches, no upload is required. RE_VertexArray* bufferGeo = myGeo->findCachedAttrib(r, bufferName.c_str(), RE_GPU_FLOAT16, 3, RE_ARRAY_POINT, true); if (bufferGeo->getCacheVersion() != version) { UT_UniquePtr<UT_Vector3H[]> data(new UT_Vector3H[numPoints]); const openvdb::Name& type = descriptor.type(index).first; if (type == "vec3s") { // check if group viewport is in use const openvdb::StringMetadata::ConstPtr s = grid.getMetadata<openvdb::StringMetadata>(openvdb_houdini::META_GROUP_VIEWPORT); const std::string groupName = s ? s->value() : ""; const bool useGroup = !groupName.empty() && descriptor.hasGroup(groupName); std::vector<Name> includeGroups, excludeGroups; if (useGroup) includeGroups.emplace_back(groupName); MultiGroupFilter filter(includeGroups, excludeGroups, iter->attributeSet()); std::vector<Index64> offsets; pointOffsets(offsets, grid.tree(), filter, /*inCoreOnly=*/true); VectorAttribute<Vec3f> typedAttribute(data.get()); convertPointDataGridAttribute(typedAttribute, grid.tree(), offsets, /*startOffset=*/ 0, static_cast<unsigned>(index), /*stride=*/1, filter, /*inCoreOnly=*/true); } bufferGeo->setArray(r, data.get()); bufferGeo->setCacheVersion(version); } return true; } bool GR_PrimVDBPoints::updateVec3Buffer(RE_Render* r, const std::string& attributeName, const std::string& bufferName, const RE_CacheVersion& version) { const GT_PrimVDB& gt_primVDB = static_cast<const GT_PrimVDB&>(*getCachedGTPrimitive()); const openvdb::GridBase* grid = const_cast<GT_PrimVDB&>((static_cast<const GT_PrimVDB&>(gt_primVDB))).getGrid(); using PointDataGrid = openvdb::points::PointDataGrid; const PointDataGrid& pointDataGrid = static_cast<const PointDataGrid&>(*grid); return updateVec3Buffer(r, pointDataGrid, attributeName, bufferName, version); } void GR_PrimVDBPoints::removeBuffer(const std::string& name) { myGeo->clearAttribute(name.c_str()); } void GR_PrimVDBPoints::render(RE_Render *r, GR_RenderMode, GR_RenderFlags, GR_DrawParms dp) { if (!myGeo && !myWire) return; const bool gl3 = (getRenderVersion() >= GR_RENDER_GL3); if (!gl3) return; const GR_CommonDispOption& commonOpts = dp.opts->common(); // draw points if (myGeo) { const bool pointDisplay = commonOpts.particleDisplayType() == GR_PARTICLE_POINTS; RE_ShaderHandle* const shader = pointDisplay ? &thePointShader : &thePixelShader; // bind the shader r->pushShader(); r->bindShader(*shader); // bind the position offset UT_Vector3F positionOffset(mCentroid.x(), mCentroid.y(), mCentroid.z()); (*shader)->bindVector(r, "offset", positionOffset); // for default point colors, use white if dark viewport background, black otherwise if (mDefaultPointColor) { const bool darkBackground = (commonOpts.color(GR_BACKGROUND_COLOR) == UT_Color(0)); fpreal32 white[3] = { 0.6f, 0.6f, 0.5f }; fpreal32 black[3] = { 0.01f, 0.01f, 0.01f }; myGeo->createConstAttribute(r, "Cd", RE_GPU_FLOAT32, 3, (darkBackground ? white : black)); } if (pointDisplay) r->pushPointSize(commonOpts.pointSize()); myGeo->draw(r, RE_GEO_WIRE_IDX); if (pointDisplay) r->popPointSize(); r->popShader(); } // draw leaf bboxes if (myWire && myWire->getNumPoints() > 0) { // bind the shader r->pushShader(); r->bindShader(theLineShader); // bind the position offset UT_Vector3F positionOffset(mCentroid.x(), mCentroid.y(), mCentroid.z()); theLineShader->bindVector(r, "offset", positionOffset); fpreal32 constcol[3] = { 0.6f, 0.6f, 0.6f }; myWire->createConstAttribute(r, "Cd", RE_GPU_FLOAT32, 3, constcol); r->pushLineWidth(commonOpts.wireWidth()); myWire->draw(r, RE_GEO_WIRE_IDX); r->popLineWidth(); r->popShader(); } } void GR_PrimVDBPoints::renderDecoration(RE_Render* r, GR_Decoration decor, const GR_DecorationParms& p) { // just render native GR_Primitive decorations if position not available const RE_VertexArray* const position = myGeo->getAttribute("P"); if (!position) { GR_Primitive::renderDecoration(r, decor, p); return; } const GR_CommonDispOption& commonOpts = p.opts->common(); const RE_CacheVersion version = position->getCacheVersion(); // update normal buffer GR_Decoration normalMarkers[2] = {GR_POINT_NORMAL, GR_NO_DECORATION}; const bool normalMarkerChanged = standardMarkersChanged(*p.opts, normalMarkers, false); if (normalMarkerChanged) { const bool drawNormals = p.opts->drawPointNmls() && updateVec3Buffer(r, "N", "N", version); if (!drawNormals) { removeBuffer("N"); } } // update velocity buffer GR_Decoration velocityMarkers[2] = {GR_POINT_VELOCITY, GR_NO_DECORATION}; const bool velocityMarkerChanged = standardMarkersChanged(*p.opts, velocityMarkers, false); if (velocityMarkerChanged) { const bool drawVelocity = p.opts->drawPointVelocity() && updateVec3Buffer(r, "v", "V", version); if (!drawVelocity) { removeBuffer("V"); } } // setup shader and scale RE_ShaderHandle* shader = nullptr; float scale = 1.0f; UT_Color color; if (decor == GR_POINT_MARKER) { shader = &theMarkerDecorShader; scale = static_cast<float>(commonOpts.markerSize()); color = commonOpts.getColor(GR_POINT_COLOR); } else if (decor == GR_POINT_NORMAL) { if (static_cast<bool>(myGeo->getAttribute("N"))) { shader = &theNormalDecorShader; scale = commonOpts.normalScale(); color = commonOpts.getColor(GR_POINT_COLOR); // No normal enum, use GR_POINT_COLOR } } else if (decor == GR_POINT_VELOCITY) { if (static_cast<bool>(myGeo->getAttribute("V"))) { shader = &theVelocityDecorShader; scale = static_cast<float>(commonOpts.vectorScale()) * VELOCITY_DECOR_SCALE; color = commonOpts.getColor(GR_POINT_TRAIL_COLOR); } } else if (decor == GR_POINT_NUMBER || decor == GR_POINT_POSITION) { // not currently supported return; } if (shader) { // bind the shader r->pushShader(); r->bindShader(*shader); // enable alpha usage in the fragment shader r->pushBlendState(); r->blendAlpha(/*onoff=*/1); // bind the position offset const UT_Vector3F positionOffset(mCentroid.x(), mCentroid.y(), mCentroid.z()); (*shader)->bindVector(r, "offset", positionOffset); r->pushUniformColor(RE_UNIFORM_WIRE_COLOR, color); r->pushUniformData(RE_UNIFORM_DECORATION_SCALE, &scale); // render myGeo->draw(r, RE_GEO_WIRE_IDX); // pop uniforms, blend state and the shader r->popUniform(RE_UNIFORM_WIRE_COLOR); r->popUniform(RE_UNIFORM_DECORATION_SCALE); r->popBlendState(); r->popShader(); } else { // fall back on default rendering GR_Primitive::renderDecoration(r, decor, p); } }
34,753
C++
32.321189
134
0.630852
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/geometry.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file geoemetry.h /// @author FX R&D OpenVDB team /// /// @brief A collection of Houdini geometry related methods and helper functions. #ifndef HOUDINI_UTILS_GEOMETRY_HAS_BEEN_INCLUDED #define HOUDINI_UTILS_GEOMETRY_HAS_BEEN_INCLUDED #include <UT/UT_VectorTypes.h> #include <GU/GU_Detail.h> #if defined(PRODDEV_BUILD) || defined(DWREAL_IS_DOUBLE) || defined(SESI_OPENVDB) // OPENVDB_HOUDINI_API, which has no meaning in a DWA build environment but // must at least exist, is normally defined by including openvdb/Platform.h. // For DWA builds (i.e., if either PRODDEV_BUILD or DWREAL_IS_DOUBLE exists), // that introduces an unwanted and unnecessary library dependency. #ifndef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #else #include <openvdb/Platform.h> #endif namespace houdini_utils { /// @brief Add geometry to the given GU_Detail to create a box with the given corners. /// @param corners the eight corners of the box /// @param color an optional color for the added geometry /// @param shaded if false, generate a wireframe box; otherwise, generate a solid box /// @param alpha an optional opacity for the added geometry OPENVDB_HOUDINI_API void createBox(GU_Detail&, UT_Vector3 corners[8], const UT_Vector3* color = nullptr, bool shaded = false, float alpha = 1.0); } // namespace houdini_utils #endif // HOUDINI_UTILS_GEOMETRY_HAS_BEEN_INCLUDED
1,497
C
36.449999
87
0.739479
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GeometryUtil.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file GeometryUtil.cc /// @author FX R&D Simulation team /// @brief Utility methods and tools for geometry processing #include "GeometryUtil.h" #include "Utils.h" #include <houdini_utils/geometry.h> // for createBox() #include <openvdb/tools/VolumeToMesh.h> #include <GA/GA_ElementWrangler.h> #include <GA/GA_PageIterator.h> #include <GA/GA_SplittableRange.h> #include <GA/GA_Types.h> #include <GU/GU_ConvertParms.h> #include <GU/GU_PrimPoly.h> #include <OBJ/OBJ_Camera.h> #include <UT/UT_BoundingBox.h> #include <UT/UT_String.h> #include <UT/UT_UniquePtr.h> #include <cmath> #include <stdexcept> #include <vector> namespace openvdb_houdini { void drawFrustum( GU_Detail& geo, const openvdb::math::Transform& transform, const UT_Vector3* boxColor, const UT_Vector3* tickColor, bool shaded, bool drawTicks) { if (transform.mapType() != openvdb::math::NonlinearFrustumMap::mapType()) { return; } const openvdb::math::NonlinearFrustumMap& frustum = *transform.map<openvdb::math::NonlinearFrustumMap>(); const openvdb::BBoxd bbox = frustum.getBBox(); UT_Vector3 corners[8]; struct Local{ static inline void floatVecFromDoubles(UT_Vector3& v, double x, double y, double z) { v[0] = static_cast<float>(x); v[1] = static_cast<float>(y); v[2] = static_cast<float>(z); } }; openvdb::Vec3d wp = frustum.applyMap(bbox.min()); Local::floatVecFromDoubles(corners[0], wp[0], wp[1], wp[2]); wp[0] = bbox.min()[0]; wp[1] = bbox.min()[1]; wp[2] = bbox.max()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[1], wp[0], wp[1], wp[2]); wp[0] = bbox.max()[0]; wp[1] = bbox.min()[1]; wp[2] = bbox.max()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[2], wp[0], wp[1], wp[2]); wp[0] = bbox.max()[0]; wp[1] = bbox.min()[1]; wp[2] = bbox.min()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[3], wp[0], wp[1], wp[2]); wp[0] = bbox.min()[0]; wp[1] = bbox.max()[1]; wp[2] = bbox.min()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[4], wp[0], wp[1], wp[2]); wp[0] = bbox.min()[0]; wp[1] = bbox.max()[1]; wp[2] = bbox.max()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[5], wp[0], wp[1], wp[2]); wp = frustum.applyMap(bbox.max()); Local::floatVecFromDoubles(corners[6], wp[0], wp[1], wp[2]); wp[0] = bbox.max()[0]; wp[1] = bbox.max()[1]; wp[2] = bbox.min()[2]; wp = frustum.applyMap(wp); Local::floatVecFromDoubles(corners[7], wp[0], wp[1], wp[2]); float alpha = shaded ? 0.3f : 1.0f; houdini_utils::createBox(geo, corners, boxColor, shaded, alpha); // Add voxel ticks if (drawTicks) { GA_RWHandleV3 cd; int count = 0; double length = 4, maxLength = (bbox.max()[1] - bbox.min()[1]); size_t total_count = 0; if (tickColor) { cd.bind(geo.addDiffuseAttribute(GA_ATTRIB_POINT)); } for (double z = bbox.min()[2] + 1, Z = bbox.max()[2]; z < Z; ++z) { GA_Offset v0 = geo.appendPointOffset(); GA_Offset v1 = geo.appendPointOffset(); if (tickColor) { cd.set(v0, *tickColor); cd.set(v1, *tickColor); } length = 4; ++count; if (count == 5) { length = 8; count = 0; } length = std::min(length, maxLength); wp[0] = bbox.max()[0]; wp[1] = bbox.max()[1]-length; wp[2] = z; wp = frustum.applyMap(wp); geo.setPos3(v0, wp[0], wp[1], wp[2]); wp[0] = bbox.max()[0]; wp[1] = bbox.max()[1]; wp[2] = z; wp = frustum.applyMap(wp); geo.setPos3(v1, wp[0], wp[1], wp[2]); GEO_PrimPoly& prim = *GU_PrimPoly::build(&geo, 0, GU_POLY_OPEN, 0); prim.appendVertex(v0); prim.appendVertex(v1); if (++total_count > 999) break; } count = 0; total_count = 0; maxLength = (bbox.max()[2] - bbox.min()[2]); for (double x = bbox.min()[0] + 1, X = bbox.max()[0]; x < X; ++x) { GA_Offset v0 = geo.appendPointOffset(); GA_Offset v1 = geo.appendPointOffset(); if (tickColor) { cd.set(v0, *tickColor); cd.set(v1, *tickColor); } length = 1; ++count; if (count == 5) { length = 2; count = 0; } length = std::min(length, maxLength); wp[0] = x; wp[1] = bbox.max()[1]; wp[2] = bbox.max()[2]-length; wp = frustum.applyMap(wp); geo.setPos3(v0, wp[0], wp[1], wp[2]); wp[0] = x; wp[1] = bbox.max()[1]; wp[2] = bbox.max()[2]; wp = frustum.applyMap(wp); geo.setPos3(v1, wp[0], wp[1], wp[2]); GEO_PrimPoly& prim = *GU_PrimPoly::build(&geo, 0, GU_POLY_OPEN, 0); prim.appendVertex(v0); prim.appendVertex(v1); if (++total_count > 999) break; } } } //////////////////////////////////////// openvdb::math::Transform::Ptr frustumTransformFromCamera( OP_Node& node, OP_Context& context, OBJ_Camera& cam, float offset, float nearPlaneDist, float farPlaneDist, float voxelDepthSize, int voxelCountX) { cam.addInterestOnCameraParms(&node); const fpreal time = context.getTime(); // Eval camera parms const fpreal camAspect = cam.ASPECT(time); const fpreal camFocal = cam.FOCAL(time); const fpreal camAperture = cam.APERTURE(time); const fpreal camXRes = cam.RESX(time); const fpreal camYRes = cam.RESY(time); nearPlaneDist += offset; farPlaneDist += offset; const fpreal depth = farPlaneDist - nearPlaneDist; const fpreal zoom = camAperture / camFocal; const fpreal aspectRatio = camYRes / (camXRes * camAspect); openvdb::Vec2d nearPlaneSize; nearPlaneSize.x() = nearPlaneDist * zoom; nearPlaneSize.y() = nearPlaneSize.x() * aspectRatio; openvdb::Vec2d farPlaneSize; farPlaneSize.x() = farPlaneDist * zoom; farPlaneSize.y() = farPlaneSize.x() * aspectRatio; // Create the linear map openvdb::math::Mat4d xform(openvdb::math::Mat4d::identity()); xform.setToTranslation(openvdb::Vec3d(0, 0, -(nearPlaneDist - offset))); /// this will be used to scale the frust to the correct size, and orient the /// into the frustum as the negative z-direction xform.preScale(openvdb::Vec3d(nearPlaneSize.x(), nearPlaneSize.x(), -nearPlaneSize.x())); openvdb::math::Mat4d camxform(openvdb::math::Mat4d::identity()); { UT_Matrix4 M; OBJ_Node *meobj = node.getCreator()->castToOBJNode(); if (meobj) { node.addExtraInput(meobj, OP_INTEREST_DATA); if (!cam.getRelativeTransform(*meobj, M, context)) { node.addTransformError(cam, "relative"); } } else { if (!static_cast<OP_Node*>(&cam)->getWorldTransform(M, context)) { node.addTransformError(cam, "world"); } } for (unsigned i = 0; i < 4; ++i) { for (unsigned j = 0; j < 4; ++j) { camxform(i,j) = M(i,j); } } } openvdb::math::MapBase::Ptr linearMap(openvdb::math::simplify( openvdb::math::AffineMap(xform * camxform).getAffineMap())); // Create the non linear map const int voxelCountY = int(std::ceil(float(voxelCountX) * aspectRatio)); const int voxelCountZ = int(std::ceil(depth / voxelDepthSize)); // the frustum will be the image of the coordinate in this bounding box openvdb::BBoxd bbox(openvdb::Vec3d(0, 0, 0), openvdb::Vec3d(voxelCountX, voxelCountY, voxelCountZ)); // define the taper const fpreal taper = nearPlaneSize.x() / farPlaneSize.x(); // note that the depth is scaled on the nearPlaneSize. // the linearMap will uniformly scale the frustum to the correct size // and rotate to align with the camera return openvdb::math::Transform::Ptr(new openvdb::math::Transform( openvdb::math::MapBase::Ptr(new openvdb::math::NonlinearFrustumMap( bbox, taper, depth/nearPlaneSize.x(), linearMap)))); } //////////////////////////////////////// bool pointInPrimGroup(GA_Offset ptnOffset, GU_Detail& geo, const GA_PrimitiveGroup& group) { bool surfacePrim = false; GA_Offset primOffset, vtxOffset = geo.pointVertex(ptnOffset); while (GAisValid(vtxOffset)) { primOffset = geo.vertexPrimitive(vtxOffset); if (group.containsIndex(geo.primitiveIndex(primOffset))) { surfacePrim = true; break; } vtxOffset = geo.vertexToNextVertex(vtxOffset); } return surfacePrim; } //////////////////////////////////////// std::unique_ptr<GU_Detail> convertGeometry(const GU_Detail& geometry, std::string& warning, Interrupter* boss) { const GU_Detail* geo = &geometry; std::unique_ptr<GU_Detail> geoPtr; const GEO_Primitive *prim; bool needconvert = false, needdivide = false, needclean = false; GA_FOR_ALL_PRIMITIVES(geo, prim) { if (boss && boss->wasInterrupted()) return geoPtr; if (prim->getTypeId() == GA_PRIMPOLY) { const GEO_PrimPoly *poly = static_cast<const GEO_PrimPoly*>(prim); if (poly->getVertexCount() > 4) needdivide = true; if (poly->getVertexCount() < 3) needclean = true; } else { needconvert = true; // Some conversions will break division requirements, // like polysoup -> polygon. needdivide = true; break; } } if (needconvert || needdivide || needclean) { geoPtr.reset(new GU_Detail()); geoPtr->duplicate(*geo); geo = geoPtr.get(); } if (boss && boss->wasInterrupted()) return geoPtr; if (needconvert) { GU_ConvertParms parms; parms.setFromType(GEO_PrimTypeCompat::GEOPRIMALL); parms.setToType(GEO_PrimTypeCompat::GEOPRIMPOLY); // We don't want interior tetrahedron faces as they will just // distract us and fill up the vdb. parms.setSharedFaces(false); geoPtr->convert(parms); } if (boss && boss->wasInterrupted()) return geoPtr; if (needdivide) { geoPtr->convex(4); } if (needclean || needdivide || needconvert) { // Final pass to delete anything illegal. // There could be little fligs left over that // we don't want confusing the mesher. GEO_Primitive *delprim; GA_PrimitiveGroup *delgrp = 0; GA_FOR_ALL_PRIMITIVES(geoPtr.get(), delprim) { if (boss && boss->wasInterrupted()) return geoPtr; bool kill = false; if (delprim->getPrimitiveId() & GEO_PrimTypeCompat::GEOPRIMPOLY) { GEO_PrimPoly *poly = static_cast<GEO_PrimPoly*>(delprim); if (poly->getVertexCount() > 4) kill = true; if (poly->getVertexCount() < 3) kill = true; } else { kill = true; } if (kill) { if (!delgrp) { delgrp = geoPtr->newPrimitiveGroup("__delete_group__", 1); } delgrp->add(delprim); } } if (delgrp) { geoPtr->deletePrimitives(*delgrp, 1); geoPtr->destroyPrimitiveGroup(delgrp); delgrp = 0; } } if (geoPtr && needconvert) { warning = "Geometry has been converted to quads and triangles."; } return geoPtr; } //////////////////////////////////////// TransformOp::TransformOp(GU_Detail const * const gdp, const openvdb::math::Transform& transform, std::vector<openvdb::Vec3s>& pointList) : mGdp(gdp) , mTransform(transform) , mPointList(&pointList) { } void TransformOp::operator()(const GA_SplittableRange &r) const { GA_Offset start, end; UT_Vector3 pos; openvdb::Vec3d ipos; // Iterate over pages in the range for (GA_PageIterator pit = r.beginPages(); !pit.atEnd(); ++pit) { // Iterate over block for (GA_Iterator it(pit.begin()); it.blockAdvance(start, end); ) { // Element for (GA_Offset i = start; i < end; ++i) { pos = mGdp->getPos3(i); ipos = mTransform.worldToIndex(openvdb::Vec3d(pos.x(), pos.y(), pos.z())); (*mPointList)[mGdp->pointIndex(i)] = openvdb::Vec3s(ipos); } } } } //////////////////////////////////////// PrimCpyOp::PrimCpyOp(GU_Detail const * const gdp, std::vector<openvdb::Vec4I>& primList) : mGdp(gdp) , mPrimList(&primList) { } void PrimCpyOp::operator()(const GA_SplittableRange &r) const { openvdb::Vec4I prim; GA_Offset start, end; // Iterate over pages in the range for (GA_PageIterator pit = r.beginPages(); !pit.atEnd(); ++pit) { // Iterate over the elements in the page. for (GA_Iterator it(pit.begin()); it.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { const GA_Primitive* primRef = mGdp->getPrimitiveList().get(i); const GA_Size vtxn = primRef->getVertexCount(); if (primRef->getTypeId() == GEO_PRIMPOLY && (3 == vtxn || 4 == vtxn)) { for (int vtx = 0; vtx < int(vtxn); ++vtx) { prim[vtx] = static_cast<openvdb::Vec4I::ValueType>( primRef->getPointIndex(vtx)); } if (vtxn != 4) prim[3] = openvdb::util::INVALID_IDX; (*mPrimList)[mGdp->primitiveIndex(i)] = prim; } else { throw std::runtime_error( "Invalid geometry; only quads and triangles are supported."); } } } } } //////////////////////////////////////// VertexNormalOp::VertexNormalOp(GU_Detail& detail, const GA_PrimitiveGroup *interiorPrims, float angle) : mDetail(detail) , mInteriorPrims(interiorPrims) , mAngle(angle) { GA_RWAttributeRef attributeRef = detail.findFloatTuple(GA_ATTRIB_VERTEX, "N", 3); if (!attributeRef.isValid()) { attributeRef = detail.addFloatTuple( GA_ATTRIB_VERTEX, "N", 3, GA_Defaults(0)); } mNormalHandle = attributeRef.getAttribute(); } void VertexNormalOp::operator()(const GA_SplittableRange& range) const { GA_Offset start, end, primOffset; UT_Vector3 primN, avgN, tmpN; bool interiorPrim = false; const GA_Primitive* primRef = nullptr; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { primRef = mDetail.getPrimitiveList().get(i); primN = mDetail.getGEOPrimitive(i)->computeNormal(); interiorPrim = isInteriorPrim(i); for (GA_Size vtx = 0, vtxN = primRef->getVertexCount(); vtx < vtxN; ++vtx) { avgN = primN; const GA_Offset vtxoff = primRef->getVertexOffset(vtx); GA_Offset vtxOffset = mDetail.pointVertex(mDetail.vertexPoint(vtxoff)); while (GAisValid(vtxOffset)) { primOffset = mDetail.vertexPrimitive(vtxOffset); if (interiorPrim == isInteriorPrim(primOffset)) { tmpN = mDetail.getGEOPrimitive(primOffset)->computeNormal(); if (tmpN.dot(primN) > mAngle) avgN += tmpN; } vtxOffset = mDetail.vertexToNextVertex(vtxOffset); } avgN.normalize(); mNormalHandle.set(vtxoff, avgN); } } } } } //////////////////////////////////////// SharpenFeaturesOp::SharpenFeaturesOp( GU_Detail& meshGeo, const GU_Detail& refGeo, EdgeData& edgeData, const openvdb::math::Transform& xform, const GA_PrimitiveGroup * surfacePrims, const openvdb::BoolTree * mask) : mMeshGeo(meshGeo) , mRefGeo(refGeo) , mEdgeData(edgeData) , mXForm(xform) , mSurfacePrims(surfacePrims) , mMaskTree(mask) { } void SharpenFeaturesOp::operator()(const GA_SplittableRange& range) const { openvdb::tools::MeshToVoxelEdgeData::Accessor acc = mEdgeData.getAccessor(); using BoolAccessor = openvdb::tree::ValueAccessor<const openvdb::BoolTree>; UT_UniquePtr<BoolAccessor> maskAcc; if (mMaskTree) { maskAcc.reset(new BoolAccessor(*mMaskTree)); } GA_Offset start, end, ptnOffset, primOffset; UT_Vector3 tmpN, tmpP, avgP; UT_BoundingBoxD cell; openvdb::Vec3d pos, normal; openvdb::Coord ijk; std::vector<openvdb::Vec3d> points(12), normals(12); std::vector<openvdb::Index32> primitives(12); for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (ptnOffset = start; ptnOffset < end; ++ptnOffset) { // Check if this point is referenced by a surface primitive. if (mSurfacePrims && !pointInPrimGroup(ptnOffset, mMeshGeo, *mSurfacePrims)) continue; tmpP = mMeshGeo.getPos3(ptnOffset); pos[0] = tmpP.x(); pos[1] = tmpP.y(); pos[2] = tmpP.z(); pos = mXForm.worldToIndex(pos); ijk[0] = int(std::floor(pos[0])); ijk[1] = int(std::floor(pos[1])); ijk[2] = int(std::floor(pos[2])); if (maskAcc && !maskAcc->isValueOn(ijk)) continue; points.clear(); normals.clear(); primitives.clear(); // get voxel-edge intersections mEdgeData.getEdgeData(acc, ijk, points, primitives); avgP.assign(0.0, 0.0, 0.0); // get normal list for (size_t n = 0, N = points.size(); n < N; ++n) { avgP.x() = static_cast<float>(avgP.x() + points[n].x()); avgP.y() = static_cast<float>(avgP.y() + points[n].y()); avgP.z() = static_cast<float>(avgP.z() + points[n].z()); primOffset = mRefGeo.primitiveOffset(primitives[n]); tmpN = mRefGeo.getGEOPrimitive(primOffset)->computeNormal(); normal[0] = tmpN.x(); normal[1] = tmpN.y(); normal[2] = tmpN.z(); normals.push_back(normal); } // Calculate feature point position if (points.size() > 1) { pos = openvdb::tools::findFeaturePoint(points, normals); // Constrain points to stay inside their initial // coordinate cell. cell.setBounds(double(ijk[0]), double(ijk[1]), double(ijk[2]), double(ijk[0]+1), double(ijk[1]+1), double(ijk[2]+1)); cell.expandBounds(0.3, 0.3, 0.3); if (!cell.isInside(pos[0], pos[1], pos[2])) { UT_Vector3 org( static_cast<float>(pos[0]), static_cast<float>(pos[1]), static_cast<float>(pos[2])); avgP *= 1.f / float(points.size()); UT_Vector3 dir = avgP - org; dir.normalize(); double distance; if(cell.intersectRay(org, dir, 1E17F, &distance) > 0) { tmpP = org + dir * distance; pos[0] = tmpP.x(); pos[1] = tmpP.y(); pos[2] = tmpP.z(); } } pos = mXForm.indexToWorld(pos); tmpP.x() = static_cast<float>(pos[0]); tmpP.y() = static_cast<float>(pos[1]); tmpP.z() = static_cast<float>(pos[2]); mMeshGeo.setPos3(ptnOffset, tmpP); } } } } } } // namespace openvdb_houdini
21,036
C++
29.31268
93
0.535843
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/Utils.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Utils.cc /// @author FX R&D Simulation team /// @brief Utility classes and functions for OpenVDB plugins #include "Utils.h" #include <houdini_utils/ParmFactory.h> #include "GEO_PrimVDB.h" #include <GU/GU_Detail.h> #include <UT/UT_String.h> #include <UT/UT_Version.h> #ifdef OPENVDB_USE_LOG4CPLUS #include <openvdb/util/logging.h> #include <UT/UT_ErrorManager.h> #include <CHOP/CHOP_Error.h> // for CHOP_ERROR_MESSAGE #include <DOP/DOP_Error.h> // for DOP_MESSAGE #if UT_VERSION_INT < 0x11050000 // earlier than 17.5.0 #include <POP/POP_Error.h> // for POP_MESSAGE #endif #include <ROP/ROP_Error.h> // for ROP_MESSAGE #include <VOP/VOP_Error.h> // for VOP_MESSAGE #include <VOPNET/VOPNET_Error.h> // for VOPNET_MESSAGE #include <string> #endif namespace openvdb_houdini { VdbPrimCIterator::VdbPrimCIterator(const GEO_Detail* gdp, const GA_PrimitiveGroup* group, FilterFunc filter): mIter(gdp ? new GA_GBPrimitiveIterator(*gdp, group) : nullptr), mFilter(filter) { // Ensure that, after construction, this iterator points to // a valid VDB primitive (if there is one). if (nullptr == getPrimitive()) advance(); } VdbPrimCIterator::VdbPrimCIterator(const GEO_Detail* gdp, GA_Range::safedeletions, const GA_PrimitiveGroup* group, FilterFunc filter): mIter(gdp ? new GA_GBPrimitiveIterator(*gdp, group, GA_Range::safedeletions()) : nullptr), mFilter(filter) { // Ensure that, after construction, this iterator points to // a valid VDB primitive (if there is one). if (nullptr == getPrimitive()) advance(); } VdbPrimCIterator::VdbPrimCIterator(const VdbPrimCIterator& other): mIter(other.mIter ? new GA_GBPrimitiveIterator(*other.mIter) : nullptr), mFilter(other.mFilter) { } VdbPrimCIterator& VdbPrimCIterator::operator=(const VdbPrimCIterator& other) { if (&other != this) { mIter.reset(other.mIter ? new GA_GBPrimitiveIterator(*other.mIter) : nullptr); mFilter = other.mFilter; } return *this; } void VdbPrimCIterator::advance() { if (mIter) { GA_GBPrimitiveIterator& iter = *mIter; for (++iter; iter.getPrimitive() != nullptr && getPrimitive() == nullptr; ++iter) {} } } const GU_PrimVDB* VdbPrimCIterator::getPrimitive() const { if (mIter) { if (GA_Primitive* prim = mIter->getPrimitive()) { const GA_PrimitiveTypeId primVdbTypeId = GA_PRIMVDB; if (prim->getTypeId() == primVdbTypeId) { GU_PrimVDB* vdb = UTverify_cast<GU_PrimVDB*>(prim); if (mFilter && !mFilter(*vdb)) return nullptr; return vdb; } } } return nullptr; } UT_String VdbPrimCIterator::getPrimitiveName(const UT_String& defaultName) const { // We must have ALWAYS_DEEP enabled on returned UT_String objects to avoid // having it deleted before the caller has a chance to use it. UT_String name(UT_String::ALWAYS_DEEP); if (const GU_PrimVDB* vdb = getPrimitive()) { name = vdb->getGridName(); if (!name.isstring()) name = defaultName; } return name; } UT_String VdbPrimCIterator::getPrimitiveNameOrIndex() const { UT_String name; name.itoa(this->getIndex()); return this->getPrimitiveName(/*defaultName=*/name); } UT_String VdbPrimCIterator::getPrimitiveIndexAndName(bool keepEmptyName) const { // We must have ALWAYS_DEEP enabled on returned UT_String objects to avoid // having it deleted before the caller has a chance to use it. UT_String result(UT_String::ALWAYS_DEEP); if (const GU_PrimVDB* vdb = getPrimitive()) { result.itoa(this->getIndex()); UT_String name = vdb->getGridName(); if (keepEmptyName || name.isstring()) { result += (" (" + name.toStdString() + ")").c_str(); } } return result; } //////////////////////////////////////// VdbPrimIterator::VdbPrimIterator(const VdbPrimIterator& other): VdbPrimCIterator(other) { } VdbPrimIterator& VdbPrimIterator::operator=(const VdbPrimIterator& other) { if (&other != this) VdbPrimCIterator::operator=(other); return *this; } //////////////////////////////////////// GU_PrimVDB* createVdbPrimitive(GU_Detail& gdp, GridPtr grid, const char* name) { return (!grid ? nullptr : GU_PrimVDB::buildFromGrid(gdp, grid, /*src=*/nullptr, name)); } GU_PrimVDB* replaceVdbPrimitive(GU_Detail& gdp, GridPtr grid, GEO_PrimVDB& src, const bool copyAttrs, const char* name) { GU_PrimVDB* vdb = nullptr; if (grid) { vdb = GU_PrimVDB::buildFromGrid(gdp, grid, (copyAttrs ? &src : nullptr), name); gdp.destroyPrimitive(src, /*andPoints=*/true); } return vdb; } //////////////////////////////////////// bool evalGridBBox(GridCRef grid, UT_Vector3 corners[8], bool expandHalfVoxel) { if (grid.activeVoxelCount() == 0) return false; openvdb::CoordBBox activeBBox = grid.evalActiveVoxelBoundingBox(); if (!activeBBox) return false; openvdb::BBoxd voxelBBox(activeBBox.min().asVec3d(), activeBBox.max().asVec3d()); if (expandHalfVoxel) { voxelBBox.min() -= openvdb::Vec3d(0.5); voxelBBox.max() += openvdb::Vec3d(0.5); } openvdb::Vec3R bbox[8]; bbox[0] = voxelBBox.min(); bbox[1].init(voxelBBox.min()[0], voxelBBox.min()[1], voxelBBox.max()[2]); bbox[2].init(voxelBBox.max()[0], voxelBBox.min()[1], voxelBBox.max()[2]); bbox[3].init(voxelBBox.max()[0], voxelBBox.min()[1], voxelBBox.min()[2]); bbox[4].init(voxelBBox.min()[0], voxelBBox.max()[1], voxelBBox.min()[2]); bbox[5].init(voxelBBox.min()[0], voxelBBox.max()[1], voxelBBox.max()[2]); bbox[6] = voxelBBox.max(); bbox[7].init(voxelBBox.max()[0], voxelBBox.max()[1], voxelBBox.min()[2]); const openvdb::math::Transform& xform = grid.transform(); bbox[0] = xform.indexToWorld(bbox[0]); bbox[1] = xform.indexToWorld(bbox[1]); bbox[2] = xform.indexToWorld(bbox[2]); bbox[3] = xform.indexToWorld(bbox[3]); bbox[4] = xform.indexToWorld(bbox[4]); bbox[5] = xform.indexToWorld(bbox[5]); bbox[6] = xform.indexToWorld(bbox[6]); bbox[7] = xform.indexToWorld(bbox[7]); for (size_t i = 0; i < 8; ++i) { corners[i].assign(float(bbox[i][0]), float(bbox[i][1]), float(bbox[i][2])); } return true; } //////////////////////////////////////// openvdb::CoordBBox makeCoordBBox(const UT_BoundingBox& b, const openvdb::math::Transform& t) { openvdb::Vec3d minWS, maxWS, minIS, maxIS; minWS[0] = double(b.xmin()); minWS[1] = double(b.ymin()); minWS[2] = double(b.zmin()); maxWS[0] = double(b.xmax()); maxWS[1] = double(b.ymax()); maxWS[2] = double(b.zmax()); openvdb::math::calculateBounds(t, minWS, maxWS, minIS, maxIS); openvdb::CoordBBox box; box.min() = openvdb::Coord::floor(minIS); box.max() = openvdb::Coord::ceil(maxIS); return box; } //////////////////////////////////////// #ifndef OPENVDB_USE_LOG4CPLUS void startLogForwarding(OP_OpTypeId) {} void stopLogForwarding(OP_OpTypeId) {} bool isLogForwarding(OP_OpTypeId) { return false; } #else namespace { namespace l4c = log4cplus; /// @brief log4cplus appender that directs log messages to UT_ErrorManager class HoudiniAppender: public l4c::Appender { public: /// @param opType SOP_OPTYPE_NAME, ROP_OPTYPE_NAME, etc. (see OP_Node.h) /// @param code SOP_MESSAGE, SOP_VEX_ERROR, ROP_MESSAGE, etc. /// (see SOP_Error.h, ROP_Error.h, etc.) HoudiniAppender(const char* opType, int code): mOpType(opType), mCode(code) {} ~HoudiniAppender() override { close(); destructorImpl(); // must be called by Appender subclasses } void append(const l4c::spi::InternalLoggingEvent& event) override { if (mClosed) return; auto* errMgr = UTgetErrorManager(); if (!errMgr || errMgr->isDisabled()) return; const l4c::LogLevel level = event.getLogLevel(); const std::string& msg = event.getMessage(); const std::string& file = event.getFile(); const int line = event.getLine(); const UT_SourceLocation loc{file.c_str(), line}, *locPtr = (file.empty() ? nullptr : &loc); UT_ErrorSeverity severity = UT_ERROR_NONE; switch (level) { case l4c::DEBUG_LOG_LEVEL: severity = UT_ERROR_MESSAGE; break; case l4c::INFO_LOG_LEVEL: severity = UT_ERROR_MESSAGE; break; case l4c::WARN_LOG_LEVEL: severity = UT_ERROR_WARNING; break; case l4c::ERROR_LOG_LEVEL: severity = UT_ERROR_ABORT; break; case l4c::FATAL_LOG_LEVEL: severity = UT_ERROR_FATAL; break; } errMgr->addGeneric(mOpType.c_str(), mCode, msg.c_str(), severity, locPtr); } void close() override { mClosed = true; } private: std::string mOpType = INVALID_OPTYPE_NAME; int mCode = 0; bool mClosed = false; }; inline l4c::tstring getAppenderName(const OP_TypeInfo& opInfo) { return LOG4CPLUS_STRING_TO_TSTRING( std::string{"HOUDINI_"} + static_cast<const char*>(opInfo.myOptypeName)); } /// @brief Return the error code for user-supplied messages in operators of the given type. inline int getGenericMessageCode(OP_OpTypeId opId) { switch (opId) { case CHOP_OPTYPE_ID: return CHOP_ERROR_MESSAGE; case DOP_OPTYPE_ID: return DOP_MESSAGE; #if UT_VERSION_INT < 0x11050000 // earlier than 17.5.0 case POP_OPTYPE_ID: return POP_MESSAGE; #endif case ROP_OPTYPE_ID: return ROP_MESSAGE; case SOP_OPTYPE_ID: return SOP_MESSAGE; case VOP_OPTYPE_ID: return VOP_MESSAGE; case VOPNET_OPTYPE_ID: return VOPNET_MESSAGE; default: break; } return 0; } inline void setLogForwarding(OP_OpTypeId opId, bool enable) { const auto* opInfo = OP_Node::getOpInfoFromOpTypeID(opId); if (!opInfo) return; const auto appenderName = getAppenderName(*opInfo); auto logger = openvdb::logging::internal::getLogger(); auto appender = logger.getAppender(appenderName); if (appender && !enable) { // If an appender for the given operator type exists, remove it. logger.removeAppender(appender); } else if (!appender && enable) { // If an appender for the given operator type doesn't already exist, create one. // Otherwise, do nothing: operators of the same type can share a single appender. appender = log4cplus::SharedAppenderPtr{ new HoudiniAppender{opInfo->myOptypeName, getGenericMessageCode(opId)}}; appender->setName(appenderName); // Don't forward debug or lower-level messages. appender->setThreshold(log4cplus::INFO_LOG_LEVEL); logger.addAppender(appender); } } } // anonymous namespace void startLogForwarding(OP_OpTypeId opId) { setLogForwarding(opId, true); } void stopLogForwarding(OP_OpTypeId opId) { setLogForwarding(opId, false); } bool isLogForwarding(OP_OpTypeId opId) { if (const auto* opInfo = OP_Node::getOpInfoFromOpTypeID(opId)) { return openvdb::logging::internal::getLogger().getAppender( getAppenderName(*opInfo)); } return false; } #endif // OPENVDB_USE_LOG4CPLUS } // namespace openvdb_houdini
11,380
C++
27.381546
94
0.641916
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Rebuild_Level_Set.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Rebuild_Level_Set.cc /// /// @author FX R&D OpenVDB team /// /// @brief Rebuild level sets or fog volumes. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/LevelSetRebuild.h> #include <UT/UT_Interrupt.h> #include <CH/CH_Manager.h> #include <PRM/PRM_Parm.h> #include <PRM/PRM_SharedFunc.h> #include <algorithm> #include <limits> #include <stdexcept> #include <string> #include <vector> #include <hboost/algorithm/string/join.hpp> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_Rebuild_Level_Set: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Rebuild_Level_Set(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Rebuild_Level_Set() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; ////////// // Conversion settings hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed\n" "(scalar, floating-point grids only)") .setDocumentation( "A subset of the scalar, floating-point input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setRange(PRM_RANGE_UI, -1, PRM_RANGE_UI, 1) .setTooltip("The isovalue that defines the implicit surface")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "worldunits", "Use World Space Units") .setTooltip("If enabled, specify the width of the narrow band in world units.")); // Voxel unit narrow-band width { parms.add(hutil::ParmFactory(PRM_FLT_J, "exteriorBandWidth", "Exterior Band Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("Specify the width of the exterior (distance >= 0) portion of the narrow band. " "(3 voxel units is optimal for level set operations.)") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_FLT_J, "interiorBandWidth", "Interior Band Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("Specify the width of the interior (distance < 0) portion of the narrow band. " "(3 voxel units is optimal for level set operations.)") .setDocumentation(nullptr)); // } // World unit narrow-band width { parms.add(hutil::ParmFactory(PRM_FLT_J, "exteriorBandWidthWS", "Exterior Band") .setDefault(0.1) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("Specify the width of the exterior (distance >= 0) portion of the narrow band.") .setDocumentation( "Specify the width of the exterior (_distance_ => 0) portion of the narrow band.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "interiorBandWidthWS", "Interior Band") .setDefault(0.1) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("Specify the width of the interior (distance < 0) portion of the narrow band.") .setDocumentation( "Specify the width of the interior (_distance_ < 0) portion of the narrow band.")); // } parms.add(hutil::ParmFactory(PRM_TOGGLE, "fillinterior", "Fill Interior") .setTooltip( "If enabled, extract signed distances for all interior voxels.\n\n" "This operation densifies the interior of the surface and requires" " a closed, watertight surface.")); ////////// // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "halfbandwidth", "Half-Band Width") .setDefault(PRMthreeDefaults)); ////////// // Register this operator. hvdb::OpenVDBOpFactory("VDB Rebuild SDF", SOP_OpenVDB_Rebuild_Level_Set::factory, parms, *table) .setNativeName("") #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBRebuildLevelSet") #endif .setObsoleteParms(obsoleteParms) .addInput("VDB grids to process") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Rebuild_Level_Set::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Repair level sets represented by VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ Certain operations on a level set volume can cause the signed distances\n\ to its zero crossing to become invalid.\n\ This node restores proper distances by surfacing the level set with\n\ a polygon mesh and then converting the mesh back to a level set.\n\ As such, it can repair more badly damaged level sets than can the\n\ [OpenVDB Renormalize Level Set|Node:sop/DW_OpenVDBRenormalizeLevelSet] node.\n\ \n\ @related\n\ - [OpenVDB Offset Level Set|Node:sop/DW_OpenVDBOffsetLevelSet]\n\ - [OpenVDB Renormalize Level Set|Node:sop/DW_OpenVDBRenormalizeLevelSet]\n\ - [OpenVDB Smooth Level Set|Node:sop/DW_OpenVDBSmoothLevelSet]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Rebuild_Level_Set::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Rebuild_Level_Set(net, name, op); } SOP_OpenVDB_Rebuild_Level_Set::SOP_OpenVDB_Rebuild_Level_Set(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Rebuild_Level_Set::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = CHgetEvalTime(); PRM_Parm* parm = obsoleteParms->getParmPtr("halfbandwidth"); if (parm && !parm->isFactoryDefault()) { const fpreal voxelWidth = obsoleteParms->evalFloat("halfbandwidth", 0, time); setFloat("exteriorBandWidth", 0, time, voxelWidth); setFloat("interiorBandWidth", 0, time, voxelWidth); } // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// // Enable/disable or show/hide parameters in the UI. bool SOP_OpenVDB_Rebuild_Level_Set::updateParmsFlags() { bool changed = false; const bool fillInterior = bool(evalInt("fillinterior", 0, 0)); changed |= enableParm("interiorBandWidth", !fillInterior); changed |= enableParm("interiorBandWidthWS", !fillInterior); const bool worldUnits = bool(evalInt("worldunits", 0, 0)); changed |= setVisibleState("interiorBandWidth", !worldUnits); changed |= setVisibleState("interiorBandWidthWS", worldUnits); changed |= setVisibleState("exteriorBandWidth", !worldUnits); changed |= setVisibleState("exteriorBandWidthWS", worldUnits); return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Rebuild_Level_Set::Cache::cookVDBSop( OP_Context& context) { try { const fpreal time = context.getTime(); // Get the group of grids to process. const GA_PrimitiveGroup* group = this->matchGroup(*gdp, evalStdString("group", time)); // Get other UI parameters. const bool fillInterior = bool(evalInt("fillinterior", 0, time)); const bool worldUnits = bool(evalInt("worldunits", 0, time)); float exBandWidthVoxels = float(evalFloat("exteriorBandWidth", 0, time)); float inBandWidthVoxels = fillInterior ? std::numeric_limits<float>::max() : float(evalFloat("interiorBandWidth", 0, time)); float exBandWidthWorld = float(evalFloat("exteriorBandWidthWS", 0, time)); float inBandWidthWorld = fillInterior ? std::numeric_limits<float>::max() : float(evalFloat("interiorBandWidthWS", 0, time)); const float iso = float(evalFloat("isovalue", 0, time)); hvdb::Interrupter boss("Rebuilding Level Set Grids"); std::vector<std::string> skippedGrids; // Process each VDB primitive that belongs to the selected group. for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) break; GU_PrimVDB* vdbPrim = *it; float exWidth = exBandWidthVoxels, inWidth = inBandWidthVoxels; if (worldUnits) { const float voxelSize = float(vdbPrim->getGrid().voxelSize()[0]); exWidth = exBandWidthWorld / voxelSize; if (!fillInterior) inWidth = inBandWidthWorld / voxelSize; if (exWidth < 1.0f || inWidth < 1.0f) { exWidth = std::max(exWidth, 1.0f); inWidth = std::max(inWidth, 1.0f); std::string s = it.getPrimitiveNameOrIndex().toStdString(); s += " - band width is smaller than one voxel."; addWarning(SOP_MESSAGE, s.c_str()); } } // Process floating point grids. if (vdbPrim->getStorageType() == UT_VDB_FLOAT) { openvdb::FloatGrid& grid = UTvdbGridCast<openvdb::FloatGrid>(vdbPrim->getGrid()); vdbPrim->setGrid(*openvdb::tools::levelSetRebuild( grid, iso, exWidth, inWidth, /*xform=*/nullptr, &boss)); const GEO_VolumeOptions& visOps = vdbPrim->getVisOptions(); vdbPrim->setVisualization(GEO_VOLUMEVIS_ISO, visOps.myIso, visOps.myDensity); } else if (vdbPrim->getStorageType() == UT_VDB_DOUBLE) { openvdb::DoubleGrid& grid = UTvdbGridCast<openvdb::DoubleGrid>(vdbPrim->getGrid()); vdbPrim->setGrid(*openvdb::tools::levelSetRebuild( grid, iso, exWidth, inWidth, /*xform=*/nullptr, &boss)); const GEO_VolumeOptions& visOps = vdbPrim->getVisOptions(); vdbPrim->setVisualization(GEO_VOLUMEVIS_ISO, visOps.myIso, visOps.myDensity); } else { skippedGrids.push_back(it.getPrimitiveNameOrIndex().toStdString()); } } if (!skippedGrids.empty()) { std::string s = "The following non-floating-point grids were skipped: " + hboost::algorithm::join(skippedGrids, ", ") + "."; addWarning(SOP_MESSAGE, s.c_str()); } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted."); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
11,321
C++
33.10241
100
0.631305
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/PointUtils.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointUtils.h /// /// @authors Dan Bailey, Nick Avramoussis, Richard Kwok /// /// @brief Utility classes and functions for OpenVDB Points Houdini plugins #ifndef OPENVDB_HOUDINI_POINT_UTILS_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_POINT_UTILS_HAS_BEEN_INCLUDED #include <openvdb/math/Vec3.h> #include <openvdb/Types.h> #include <openvdb/points/PointDataGrid.h> #include <GA/GA_Attribute.h> #include <GU/GU_Detail.h> #include <PRM/PRM_ChoiceList.h> #include <iosfwd> #include <map> #include <memory> #include <string> #include <vector> #ifdef SESI_OPENVDB #ifdef OPENVDB_HOUDINI_API #undef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #endif namespace openvdb_houdini { using OffsetList = std::vector<GA_Offset>; using OffsetListPtr = std::shared_ptr<OffsetList>; using OffsetPair = std::pair<GA_Offset, GA_Offset>; using OffsetPairList = std::vector<OffsetPair>; using OffsetPairListPtr = std::shared_ptr<OffsetPairList>; // note that the bool parameter here for toggling in-memory compression is now deprecated using AttributeInfoMap = std::map<openvdb::Name, std::pair<int, bool>>; using WarnFunc = std::function<void (const std::string&)>; /// Metadata name for viewport groups const std::string META_GROUP_VIEWPORT = "group_viewport"; /// Enum to store available compression types for point grids enum POINT_COMPRESSION_TYPE { COMPRESSION_NONE = 0, COMPRESSION_TRUNCATE, COMPRESSION_UNIT_VECTOR, COMPRESSION_UNIT_FIXED_POINT_8, COMPRESSION_UNIT_FIXED_POINT_16, }; // forward declaration class Interrupter; /// @brief Compute a voxel size from a Houdini detail /// /// @param detail GU_Detail to compute the voxel size from /// @param pointsPerVoxel the target number of points per voxel, must be positive and non-zero /// @param matrix voxel size will be computed using this transform /// @param decimalPlaces for readability, truncate voxel size to this number of decimals /// @param interrupter a Houdini interrupter OPENVDB_HOUDINI_API float computeVoxelSizeFromHoudini( const GU_Detail& detail, const openvdb::Index pointsPerVoxel, const openvdb::math::Mat4d& matrix, const openvdb::Index decimalPlaces, Interrupter& interrupter); /// @brief Convert a Houdini detail into a VDB Points grid /// /// @param detail GU_Detail to convert the points and attributes from /// @param compression position compression to use /// @param attributes a vector of VDB Points attributes to be included /// (empty vector defaults to all) /// @param transform transform to use for the new point grid /// @param warnings list of warnings to be added to the SOP OPENVDB_HOUDINI_API openvdb::points::PointDataGrid::Ptr convertHoudiniToPointDataGrid( const GU_Detail& detail, const int compression, const AttributeInfoMap& attributes, const openvdb::math::Transform& transform, const WarnFunc& warnings = [](const std::string&){}); /// @brief Convert a VDB Points grid into Houdini points and append them to a Houdini Detail /// /// @param detail GU_Detail to append the converted points and attributes to /// @param grid grid containing the points that will be converted /// @param attributes a vector of VDB Points attributes to be included /// (empty vector defaults to all) /// @param includeGroups a vector of VDB Points groups to be included /// (empty vector defaults to all) /// @param excludeGroups a vector of VDB Points groups to be excluded /// (empty vector defaults to none) /// @param inCoreOnly true if out-of-core leaf nodes are to be ignored OPENVDB_HOUDINI_API void convertPointDataGridToHoudini( GU_Detail& detail, const openvdb::points::PointDataGrid& grid, const std::vector<std::string>& attributes = {}, const std::vector<std::string>& includeGroups = {}, const std::vector<std::string>& excludeGroups = {}, const bool inCoreOnly = false); /// @brief Populate VDB Points grid metadata from Houdini detail attributes /// /// @param grid grid to be populated with metadata /// @param detail GU_Detail to extract the detail attributes from /// @param warnings list of warnings to be added to the SOP OPENVDB_HOUDINI_API void populateMetadataFromHoudini( openvdb::points::PointDataGrid& grid, const GU_Detail& detail, const WarnFunc& warnings = [](const std::string&){}); /// @brief Convert VDB Points grid metadata into Houdini detail attributes /// /// @param detail GU_Detail to add the Houdini detail attributes /// @param metaMap the metamap to create the Houdini detail attributes from /// @param warnings list of warnings to be added to the SOP OPENVDB_HOUDINI_API void convertMetadataToHoudini( GU_Detail& detail, const openvdb::MetaMap& metaMap, const WarnFunc& warnings = [](const std::string&){}); /// @brief Returns supported tuple sizes for conversion from GA_Attribute OPENVDB_HOUDINI_API int16_t attributeTupleSize(const GA_Attribute* const attribute); /// @brief Returns supported Storage types for conversion from GA_Attribute OPENVDB_HOUDINI_API GA_Storage attributeStorageType(const GA_Attribute* const attribute); /////////////////////////////////////// /// @brief If the given grid is a PointDataGrid, add node specific info text to /// the stream provided. This is used to populate the MMB window in Houdini /// versions 15 and earlier, as well as the Operator Information Window. OPENVDB_HOUDINI_API void pointDataGridSpecificInfoText(std::ostream&, const openvdb::GridBase&); /// @brief Populates string data with information about the provided OpenVDB /// Points grid. /// @param grid The OpenVDB Points grid to retrieve information from. /// @param countStr The total point count as a formatted integer. /// @param groupStr The list of comma separated groups (or "none" if no /// groups exist). Enclosed by parentheses. /// @param attributeStr The list of comma separated attributes (or "none" if /// no attributes exist). Enclosed by parentheses. /// Each attribute takes the form "name [type] [code] /// [stride]" where code and stride are added for non /// default attributes. OPENVDB_HOUDINI_API void collectPointInfo(const openvdb::points::PointDataGrid& grid, std::string& countStr, std::string& groupStr, std::string& attributeStr); /////////////////////////////////////// // VDB Points group name drop-down menu OPENVDB_HOUDINI_API extern const PRM_ChoiceList VDBPointsGroupMenuInput1; OPENVDB_HOUDINI_API extern const PRM_ChoiceList VDBPointsGroupMenuInput2; OPENVDB_HOUDINI_API extern const PRM_ChoiceList VDBPointsGroupMenuInput3; OPENVDB_HOUDINI_API extern const PRM_ChoiceList VDBPointsGroupMenuInput4; /// @note Use this if you have more than 4 inputs, otherwise use /// the input specific menus instead which automatically /// handle the appropriate spare data settings. OPENVDB_HOUDINI_API extern const PRM_ChoiceList VDBPointsGroupMenu; } // namespace openvdb_houdini #endif // OPENVDB_HOUDINI_POINT_UTILS_HAS_BEEN_INCLUDED
7,485
C
34.478673
97
0.702739
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/UT_VDBUtils.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc. * 123 Front Street West, Suite 1401 * Toronto, Ontario * Canada M5J 2M2 * 416-366-4607 */ #include <UT/UT_Version.h> #if !defined(SESI_OPENVDB) && !defined(SESI_OPENVDB_PRIM) #include <UT/UT_VDBUtils.h> #else #ifndef __HDK_UT_VDBUtils__ #define __HDK_UT_VDBUtils__ enum UT_VDBType { UT_VDB_INVALID, UT_VDB_FLOAT, UT_VDB_DOUBLE, UT_VDB_INT32, UT_VDB_INT64, UT_VDB_BOOL, UT_VDB_VEC3F, UT_VDB_VEC3D, UT_VDB_VEC3I, UT_VDB_POINTINDEX, UT_VDB_POINTDATA, }; #include <openvdb/openvdb.h> #include <openvdb/tools/PointIndexGrid.h> #include <openvdb/points/PointDataGrid.h> #include <UT/UT_Assert.h> #include <UT/UT_BoundingBox.h> #include <UT/UT_Matrix4.h> #include <UT/UT_Matrix3.h> #include <UT/UT_Matrix2.h> #include <SYS/SYS_Math.h> /// Calls openvdb::initialize() inline void UTvdbInitialize() { openvdb::initialize(); } /// Find the UT_VDBType from a grid inline UT_VDBType UTvdbGetGridType(const openvdb::GridBase &grid) { using namespace openvdb; using namespace openvdb::tools; using namespace openvdb::points; if (grid.isType<FloatGrid>()) return UT_VDB_FLOAT; if (grid.isType<DoubleGrid>()) return UT_VDB_DOUBLE; if (grid.isType<Int32Grid>()) return UT_VDB_INT32; if (grid.isType<Int64Grid>()) return UT_VDB_INT64; if (grid.isType<BoolGrid>()) return UT_VDB_BOOL; if (grid.isType<Vec3fGrid>()) return UT_VDB_VEC3F; if (grid.isType<Vec3dGrid>()) return UT_VDB_VEC3D; if (grid.isType<Vec3IGrid>()) return UT_VDB_VEC3I; if (grid.isType<Vec3IGrid>()) return UT_VDB_VEC3I; if (grid.isType<PointIndexGrid>()) return UT_VDB_POINTINDEX; if (grid.isType<PointDataGrid>()) return UT_VDB_POINTDATA; return UT_VDB_INVALID; } /// Return the string representation of a grid's underlying value type inline const char * UTvdbGetGridTypeString(const openvdb::GridBase &grid) { switch(UTvdbGetGridType(grid)) { case UT_VDB_FLOAT: return "float"; case UT_VDB_DOUBLE: return "double"; case UT_VDB_INT32: return "int32"; case UT_VDB_INT64: return "int64"; case UT_VDB_BOOL: return "bool"; case UT_VDB_VEC3F: return "Vec3f"; case UT_VDB_VEC3D: return "Vec3d"; case UT_VDB_VEC3I: return "Vec3i"; case UT_VDB_POINTINDEX: return "PointIndex"; case UT_VDB_POINTDATA: return "PointData"; default: return "invalid type"; } } /// Returns the tuple size of a grid given its value type. inline int UTvdbGetGridTupleSize(UT_VDBType type) { switch(type) { case UT_VDB_FLOAT: case UT_VDB_DOUBLE: case UT_VDB_INT32: case UT_VDB_INT64: case UT_VDB_BOOL: return 1; case UT_VDB_VEC3F: case UT_VDB_VEC3D: case UT_VDB_VEC3I: return 3; case UT_VDB_POINTINDEX: case UT_VDB_POINTDATA: case UT_VDB_INVALID: default: break; } return 0; } /// Returns the tuple size of a grid inline int UTvdbGetGridTupleSize(const openvdb::GridBase &grid) { return UTvdbGetGridTupleSize(UTvdbGetGridType(grid)); } /// Special plusEqual class to avoid bool warnings /// @{ template <typename T> struct UT_VDBMath { static void plusEqual(T &lhs, const T &rhs) { lhs += rhs; } }; template <> struct UT_VDBMath<bool> { static void plusEqual(bool &lhs, const bool &rhs) { lhs = lhs | rhs; } }; /// @} /// Helpers for downcasting to a specific grid type /// @{ template <typename GridType> inline const GridType * UTvdbGridCast(const openvdb::GridBase *grid) { return UTverify_cast<const GridType *>(grid); } template <typename GridType> inline GridType * UTvdbGridCast(openvdb::GridBase *grid) { return UTverify_cast<GridType *>(grid); } template <typename GridType> inline const GridType & UTvdbGridCast(const openvdb::GridBase &grid) { return *UTverify_cast<const GridType *>(&grid); } template <typename GridType> inline GridType & UTvdbGridCast(openvdb::GridBase &grid) { return *UTverify_cast<GridType *>(&grid); } template <typename GridType> inline typename GridType::ConstPtr UTvdbGridCast(openvdb::GridBase::ConstPtr grid) { return openvdb::gridConstPtrCast<GridType>(grid); } template <typename GridType> inline typename GridType::Ptr UTvdbGridCast(openvdb::GridBase::Ptr grid) { return openvdb::gridPtrCast<GridType>(grid); } /// @} //////////////////////////////////////// namespace UT_VDBUtils { // Helper function used internally by UTvdbProcessTypedGrid() // to instantiate a templated functor for a specific grid type // and then to call the functor with a grid of that type template<typename GridType, typename OpType, typename GridBaseType> inline void callTypedGrid(GridBaseType &grid, OpType& op) { op.template operator()<GridType>(UTvdbGridCast<GridType>(grid)); } } // namespace UT_VDBUtils //////////////////////////////////////// /// @brief Utility function that, given a generic grid pointer, /// calls a functor on the fully-resolved grid /// /// @par Example: /// @code /// using openvdb::Coord; /// using openvdb::CoordBBox; /// /// struct FillOp { /// const CoordBBox bbox; /// /// FillOp(const CoordBBox& b): bbox(b) {} /// /// template<typename GridT> /// void operator()(GridT& grid) const { /// using ValueT = typename GridT::ValueType; /// grid.fill(bbox, ValueT(1)); /// } /// }; /// /// GU_PrimVDB* vdb = ...; /// vdb->makeGridUnique(); /// CoordBBox bbox(Coord(0,0,0), Coord(10,10,10)); /// UTvdbProcessTypedGrid(vdb->getStorageType(), vdb->getGrid(), FillOp(bbox)); /// @endcode /// /// @return @c false if the grid type is unknown or unhandled. /// @{ #define UT_VDB_DECL_PROCESS_TYPED_GRID(GRID_BASE_T) \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGrid(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_FLOAT: callTypedGrid<FloatGrid>(grid, op); break; \ case UT_VDB_DOUBLE: callTypedGrid<DoubleGrid>(grid, op); break; \ case UT_VDB_INT32: callTypedGrid<Int32Grid>(grid, op); break; \ case UT_VDB_INT64: callTypedGrid<Int64Grid>(grid, op); break; \ case UT_VDB_VEC3F: callTypedGrid<Vec3SGrid>(grid, op); break; \ case UT_VDB_VEC3D: callTypedGrid<Vec3DGrid>(grid, op); break; \ case UT_VDB_VEC3I: callTypedGrid<Vec3IGrid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGridTopology(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_FLOAT: callTypedGrid<FloatGrid>(grid, op); break; \ case UT_VDB_DOUBLE: callTypedGrid<DoubleGrid>(grid, op); break; \ case UT_VDB_INT32: callTypedGrid<Int32Grid>(grid, op); break; \ case UT_VDB_INT64: callTypedGrid<Int64Grid>(grid, op); break; \ case UT_VDB_VEC3F: callTypedGrid<Vec3SGrid>(grid, op); break; \ case UT_VDB_VEC3D: callTypedGrid<Vec3DGrid>(grid, op); break; \ case UT_VDB_VEC3I: callTypedGrid<Vec3IGrid>(grid, op); break; \ case UT_VDB_BOOL: callTypedGrid<BoolGrid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGridVec3(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_VEC3F: callTypedGrid<Vec3SGrid>(grid, op); break; \ case UT_VDB_VEC3D: callTypedGrid<Vec3DGrid>(grid, op); break; \ case UT_VDB_VEC3I: callTypedGrid<Vec3IGrid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGridScalar(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_FLOAT: callTypedGrid<FloatGrid>(grid, op); break; \ case UT_VDB_DOUBLE: callTypedGrid<DoubleGrid>(grid, op); break; \ case UT_VDB_INT32: callTypedGrid<Int32Grid>(grid, op); break; \ case UT_VDB_INT64: callTypedGrid<Int64Grid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGridReal(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_FLOAT: callTypedGrid<FloatGrid>(grid, op); break; \ case UT_VDB_DOUBLE: callTypedGrid<DoubleGrid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ template<typename OpType> \ inline bool \ UTvdbProcessTypedGridPoint(UT_VDBType grid_type, GRID_BASE_T grid, OpType& op) \ { \ using namespace openvdb; \ using namespace openvdb::tools; \ using namespace openvdb::points; \ using namespace UT_VDBUtils; \ switch (grid_type) \ { \ case UT_VDB_POINTINDEX: callTypedGrid<PointIndexGrid>(grid, op); break; \ case UT_VDB_POINTDATA: callTypedGrid<PointDataGrid>(grid, op); break; \ default: return false; \ } \ return true; \ } \ /**/ UT_VDB_DECL_PROCESS_TYPED_GRID(const openvdb::GridBase &) UT_VDB_DECL_PROCESS_TYPED_GRID(const openvdb::GridBase *) UT_VDB_DECL_PROCESS_TYPED_GRID(openvdb::GridBase::ConstPtr) UT_VDB_DECL_PROCESS_TYPED_GRID(openvdb::GridBase &) UT_VDB_DECL_PROCESS_TYPED_GRID(openvdb::GridBase *) UT_VDB_DECL_PROCESS_TYPED_GRID(openvdb::GridBase::Ptr) /// @} // Helper macro for UTvdbCall* macros, do not outside of this file! #define UT_VDB_CALL(GRIDT, RETURN, FNAME, GRIDBASE, ...) \ { \ RETURN FNAME <GRIDT> (UTvdbGridCast<GRIDT>(GRIDBASE), __VA_ARGS__ ); \ } \ /**/ //@{ /// Macro to invoke the correct type of grid. /// Use like: /// @code /// UTvdbCallScalarType(grid_type, myfunction, grid, parms) /// @endcode /// to invoke /// @code /// template <typename GridType> /// static void /// myfunction(const GridType &grid, parms) /// { } /// @endcode #define UTvdbCallRealType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_FLOAT) \ UT_VDB_CALL(openvdb::FloatGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_DOUBLE) \ UT_VDB_CALL(openvdb::DoubleGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbCallScalarType(TYPE, FNAME, GRIDBASE, ...) \ UTvdbCallRealType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else if (TYPE == UT_VDB_INT32) \ UT_VDB_CALL(openvdb::Int32Grid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_INT64) \ UT_VDB_CALL(openvdb::Int64Grid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbCallVec3Type(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_VEC3F) \ UT_VDB_CALL(openvdb::Vec3fGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_VEC3D) \ UT_VDB_CALL(openvdb::Vec3dGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_VEC3I) \ UT_VDB_CALL(openvdb::Vec3IGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbCallPointType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_POINTINDEX) \ UT_VDB_CALL(openvdb::tools::PointIndexGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_POINTDATA) \ UT_VDB_CALL(openvdb::points::PointDataGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbCallBoolType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_BOOL) \ UT_VDB_CALL(openvdb::BoolGrid,(void),FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbCallAllType(TYPE, FNAME, GRIDBASE, ...) \ UTvdbCallScalarType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbCallVec3Type(TYPE, FNAME, GRIDBASE, __VA_ARGS__); \ /**/ #define UTvdbCallAllTopology(TYPE, FNAME, GRIDBASE, ...) \ UTvdbCallScalarType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbCallVec3Type(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbCallBoolType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ /**/ //@} //@{ /// Macro to invoke the correct type of grid. /// Use like: /// @code /// UTvdbReturnScalarType(grid_type, myfunction, grid, parms) /// @endcode /// to invoke /// @code /// return myfunction(grid, parms); /// @endcode /// via: /// @code /// template <typename GridType> /// static RESULT /// myfunction(const GridType &grid, parms) /// { } /// @endcode #define UTvdbReturnRealType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_FLOAT) \ UT_VDB_CALL(openvdb::FloatGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_DOUBLE) \ UT_VDB_CALL(openvdb::DoubleGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbReturnScalarType(TYPE, FNAME, GRIDBASE, ...) \ UTvdbReturnRealType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else if (TYPE == UT_VDB_INT32) \ UT_VDB_CALL(openvdb::Int32Grid,return,FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_INT64) \ UT_VDB_CALL(openvdb::Int64Grid,return,FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbReturnVec3Type(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_VEC3F) \ UT_VDB_CALL(openvdb::Vec3fGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_VEC3D) \ UT_VDB_CALL(openvdb::Vec3dGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_VEC3I) \ UT_VDB_CALL(openvdb::Vec3IGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbReturnPointType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_POINTINDEX) \ UT_VDB_CALL(openvdb::tools::PointIndexGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ else if (TYPE == UT_VDB_POINTDATA) \ UT_VDB_CALL(openvdb::points::PointDataGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbReturnBoolType(TYPE, FNAME, GRIDBASE, ...) \ if (TYPE == UT_VDB_BOOL) \ UT_VDB_CALL(openvdb::BoolGrid,return,FNAME,GRIDBASE,__VA_ARGS__) \ /**/ #define UTvdbReturnAllType(TYPE, FNAME, GRIDBASE, ...) \ UTvdbReturnScalarType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbReturnVec3Type(TYPE, FNAME, GRIDBASE, __VA_ARGS__); \ /**/ #define UTvdbReturnAllTopology(TYPE, FNAME, GRIDBASE, ...) \ UTvdbReturnScalarType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbReturnVec3Type(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ else UTvdbReturnBoolType(TYPE, FNAME, GRIDBASE, __VA_ARGS__) \ /**/ //@} //////////////////////////////////////// /// Matrix conversion from openvdb to UT // @{ template <typename S> UT_Matrix4T<S> UTvdbConvert(const openvdb::math::Mat4<S> &src) { return UT_Matrix4T<S>(src(0,0), src(0,1), src(0,2), src(0,3), src(1,0), src(1,1), src(1,2), src(1,3), src(2,0), src(2,1), src(2,2), src(2,3), src(3,0), src(3,1), src(3,2), src(3,3)); } template <typename S> UT_Matrix3T<S> UTvdbConvert(const openvdb::math::Mat3<S> &src) { return UT_Matrix3T<S>(src(0,0), src(0,1), src(0,2), src(1,0), src(1,1), src(1,2), src(2,0), src(2,1), src(2,2)); } template <typename S> UT_Matrix2T<S> UTvdbConvert(const openvdb::math::Mat2<S> &src) { return UT_Matrix2T<S>(src(0,0), src(0,1), src(1,0), src(1,1)); } // @} /// Matrix conversion from UT to openvdb // @{ template <typename S> openvdb::math::Mat4<S> UTvdbConvert(const UT_Matrix4T<S> &src) { return openvdb::math::Mat4<S>(src(0,0), src(0,1), src(0,2), src(0,3), src(1,0), src(1,1), src(1,2), src(1,3), src(2,0), src(2,1), src(2,2), src(2,3), src(3,0), src(3,1), src(3,2), src(3,3)); } template <typename S> openvdb::math::Mat3<S> UTvdbConvert(const UT_Matrix3T<S> &src) { return openvdb::math::Mat3<S>(src(0,0), src(0,1), src(0,2), src(1,0), src(1,1), src(1,2), src(2,0), src(2,1), src(2,2)); } template <typename S> openvdb::math::Mat2<S> UTvdbConvert(const UT_Matrix2T<S> &src) { return openvdb::math::Mat2<S>(src(0,0), src(0,1), src(1,0), src(1,1)); } // @} /// Vector conversion from openvdb to UT // @{ template <typename S> UT_Vector4T<S> UTvdbConvert(const openvdb::math::Vec4<S> &src) { return UT_Vector4T<S>(src.asPointer()); } template <typename S> UT_Vector3T<S> UTvdbConvert(const openvdb::math::Vec3<S> &src) { return UT_Vector3T<S>(src.asPointer()); } template <typename S> UT_Vector2T<S> UTvdbConvert(const openvdb::math::Vec2<S> &src) { return UT_Vector2T<S>(src.asPointer()); } // @} /// Vector conversion from UT to openvdb // @{ template <typename S> openvdb::math::Vec4<S> UTvdbConvert(const UT_Vector4T<S> &src) { return openvdb::math::Vec4<S>(src.data()); } template <typename S> openvdb::math::Vec3<S> UTvdbConvert(const UT_Vector3T<S> &src) { return openvdb::math::Vec3<S>(src.data()); } template <typename S> openvdb::math::Vec2<S> UTvdbConvert(const UT_Vector2T<S> &src) { return openvdb::math::Vec2<S>(src.data()); } // @} /// Bounding box conversion from openvdb to UT inline UT_BoundingBoxD UTvdbConvert(const openvdb::CoordBBox &bbox) { return UT_BoundingBoxD(UTvdbConvert(bbox.getStart().asVec3d()), UTvdbConvert(bbox.getEnd().asVec3d())); } /// Bounding box conversion from openvdb to UT inline openvdb::math::CoordBBox UTvdbConvert(const UT_BoundingBoxI &bbox) { return openvdb::math::CoordBBox( openvdb::math::Coord(bbox.xmin(), bbox.ymin(), bbox.zmin()), openvdb::math::Coord(bbox.xmax(), bbox.ymax(), bbox.zmax())); } /// Utility method to construct a Transform that lines up with a /// cell-centered Houdini volume with specified origin and voxel size. inline openvdb::math::Transform::Ptr UTvdbCreateTransform(const UT_Vector3 &orig, const UT_Vector3 &voxsize) { // Transforms only valid for square voxels. UT_ASSERT(SYSalmostEqual(voxsize.minComponent(), voxsize.maxComponent())); fpreal vs = voxsize.maxComponent(); openvdb::math::Transform::Ptr xform = openvdb::math::Transform::createLinearTransform(vs); // Ensure voxel centers line up. xform->postTranslate(UTvdbConvert(orig) + vs / 2); return xform; } template <typename T> inline openvdb::math::Vec4<T> SYSabs(const openvdb::math::Vec4<T> &v1) { return openvdb::math::Vec4<T>( SYSabs(v1[0]), SYSabs(v1[1]), SYSabs(v1[2]), SYSabs(v1[3]) ); } template <typename T> inline openvdb::math::Vec3<T> SYSabs(const openvdb::math::Vec3<T> &v1) { return openvdb::math::Vec3<T>( SYSabs(v1[0]), SYSabs(v1[1]), SYSabs(v1[2]) ); } template <typename T> inline openvdb::math::Vec2<T> SYSabs(const openvdb::math::Vec2<T> &v1) { return openvdb::math::Vec2<T>( SYSabs(v1[0]), SYSabs(v1[1]) ); } template <typename T> inline openvdb::math::Vec4<T> SYSmin(const openvdb::math::Vec4<T> &v1, const openvdb::math::Vec4<T> &v2) { return openvdb::math::Vec4<T>( SYSmin(v1[0], v2[0]), SYSmin(v1[1], v2[1]), SYSmin(v1[2], v2[2]), SYSmin(v1[3], v2[3]) ); } template <typename T> inline openvdb::math::Vec4<T> SYSmax(const openvdb::math::Vec4<T> &v1, const openvdb::math::Vec4<T> &v2) { return openvdb::math::Vec4<T>( SYSmax(v1[0], v2[0]), SYSmax(v1[1], v2[1]), SYSmax(v1[2], v2[2]), SYSmax(v1[3], v2[3]) ); } template <typename T> inline openvdb::math::Vec4<T> SYSmin(const openvdb::math::Vec4<T> &v1, const openvdb::math::Vec4<T> &v2, const openvdb::math::Vec4<T> &v3) { return openvdb::math::Vec4<T>( SYSmin(v1[0], v2[0], v3[0]), SYSmin(v1[1], v2[1], v3[1]), SYSmin(v1[2], v2[2], v3[2]), SYSmin(v1[3], v2[3], v3[3]) ); } template <typename T> inline openvdb::math::Vec4<T> SYSmax(const openvdb::math::Vec4<T> &v1, const openvdb::math::Vec4<T> &v2, const openvdb::math::Vec4<T> &v3) { return openvdb::math::Vec4<T>( SYSmax(v1[0], v2[0], v3[0]), SYSmax(v1[1], v2[1], v3[1]), SYSmax(v1[2], v2[2], v3[2]), SYSmax(v1[3], v2[3], v3[3]) ); } template <typename T> inline openvdb::math::Vec3<T> SYSmin(const openvdb::math::Vec3<T> &v1, const openvdb::math::Vec3<T> &v2) { return openvdb::math::Vec3<T>( SYSmin(v1[0], v2[0]), SYSmin(v1[1], v2[1]), SYSmin(v1[2], v2[2]) ); } template <typename T> inline openvdb::math::Vec3<T> SYSmax(const openvdb::math::Vec3<T> &v1, const openvdb::math::Vec3<T> &v2) { return openvdb::math::Vec3<T>( SYSmax(v1[0], v2[0]), SYSmax(v1[1], v2[1]), SYSmax(v1[2], v2[2]) ); } template <typename T> inline openvdb::math::Vec3<T> SYSmin(const openvdb::math::Vec3<T> &v1, const openvdb::math::Vec3<T> &v2, const openvdb::math::Vec3<T> &v3) { return openvdb::math::Vec3<T>( SYSmin(v1[0], v2[0], v3[0]), SYSmin(v1[1], v2[1], v3[1]), SYSmin(v1[2], v2[2], v3[2]) ); } template <typename T> inline openvdb::math::Vec3<T> SYSmax(const openvdb::math::Vec3<T> &v1, const openvdb::math::Vec3<T> &v2, const openvdb::math::Vec3<T> &v3) { return openvdb::math::Vec3<T>( SYSmax(v1[0], v2[0], v3[0]), SYSmax(v1[1], v2[1], v3[1]), SYSmax(v1[2], v2[2], v3[2]) ); } template <typename T> inline openvdb::math::Vec2<T> SYSmin(const openvdb::math::Vec2<T> &v1, const openvdb::math::Vec2<T> &v2) { return openvdb::math::Vec2<T>( SYSmin(v1[0], v2[0]), SYSmin(v1[1], v2[1]) ); } template <typename T> inline openvdb::math::Vec2<T> SYSmax(const openvdb::math::Vec2<T> &v1, const openvdb::math::Vec2<T> &v2) { return openvdb::math::Vec2<T>( SYSmax(v1[0], v2[0]), SYSmax(v1[1], v2[1]) ); } template <typename T> inline openvdb::math::Vec2<T> SYSmin(const openvdb::math::Vec2<T> &v1, const openvdb::math::Vec2<T> &v2, const openvdb::math::Vec2<T> &v3) { return openvdb::math::Vec2<T>( SYSmin(v1[0], v2[0], v3[0]), SYSmin(v1[1], v2[1], v3[1]) ); } template <typename T> inline openvdb::math::Vec2<T> SYSmax(const openvdb::math::Vec2<T> &v1, const openvdb::math::Vec2<T> &v2, const openvdb::math::Vec2<T> &v3) { return openvdb::math::Vec2<T>( SYSmax(v1[0], v2[0], v3[0]), SYSmax(v1[1], v2[1], v3[1]) ); } #endif // __HDK_UT_VDBUtils__ #endif // SESI_OPENVDB || SESI_OPENVDB_PRIM
24,374
C
32.118206
140
0.587552
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GU_PrimVDB.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: GU_PrimVDB.C ( GU Library, C++) * * COMMENTS: Definitions for utility functions of vdb. */ #include <UT/UT_Version.h> #if defined(SESI_OPENVDB) || defined(SESI_OPENVDB_PRIM) #include "GU_PrimVDB.h" #include "GT_GEOPrimCollectVDB.h" #include <GU/GU_ConvertParms.h> #include <GU/GU_PrimPoly.h> #include <GU/GU_PrimPolySoup.h> #include <GU/GU_PrimVolume.h> #include <GU/GU_RayIntersect.h> #include <GEO/GEO_AttributeHandleList.h> #include <GEO/GEO_Closure.h> #include <GEO/GEO_WorkVertexBuffer.h> #include <GA/GA_AIFTuple.h> #include <GA/GA_AttributeFilter.h> #include <GA/GA_ElementWrangler.h> #include <GA/GA_Handle.h> #include <GA/GA_PageHandle.h> #include <GA/GA_PageIterator.h> #include <GA/GA_SplittableRange.h> #include <UT/UT_Debug.h> #include <UT/UT_Interrupt.h> #include <UT/UT_Lock.h> #include <UT/UT_MemoryCounter.h> #include <UT/UT_ParallelUtil.h> #include <UT/UT_UniquePtr.h> #include <UT/UT_Singleton.h> #include <UT/UT_StopWatch.h> #include <SYS/SYS_Inline.h> #include <SYS/SYS_Types.h> #include <SYS/SYS_TypeTraits.h> #include <openvdb/tools/VolumeToMesh.h> #include <hboost/function.hpp> #include <openvdb/tools/SignedFloodFill.h> #include <algorithm> #include <vector> #include <stddef.h> #define TIMING_DEF \ UT_StopWatch timer; \ if (verbose) timer.start(); #define TIMING_LOG(msg) \ if (verbose) { \ printf(msg ": %f ms\n", 1000*timer.stop()); \ fflush(stdout); \ timer.start(); \ } GA_PrimitiveDefinition *GU_PrimVDB::theDefinition = 0; GU_PrimVDB* GU_PrimVDB::build(GU_Detail *gdp, bool append_points) { #ifndef SESI_OPENVDB // This is only necessary as a stop gap measure until we have the // registration code split out properly. if (!GU_PrimVDB::theDefinition) GU_PrimVDB::registerMyself(&GUgetFactory()); GU_PrimVDB* primvdb = (GU_PrimVDB *)gdp->appendPrimitive(GU_PrimVDB::theTypeId()); #else GU_PrimVDB* primvdb = UTverify_cast<GU_PrimVDB *>(gdp->appendPrimitive(GEO_PRIMVDB)); primvdb->assignVertex(gdp->appendVertex(), true); #endif if (append_points) { GEO_Primitive *prim = primvdb; const GA_Size npts = primvdb->getVertexCount(); GA_Offset startptoff = gdp->appendPointBlock(npts); for (GA_Size i = 0; i < npts; i++) { prim->setPointOffset(i, startptoff+i); } } return primvdb; } GU_PrimVDB* GU_PrimVDB::buildFromGridAdapter(GU_Detail& gdp, void* gridPtr, const GEO_PrimVDB* src, const char* name) { // gridPtr is assumed to point to an openvdb::vX_Y_Z::GridBase::Ptr, for // some version X.Y.Z of OpenVDB that may be newer than the one with which // libHoudiniGEO.so was built. This is safe provided that GridBase and // its member objects are ABI-compatible between the two OpenVDB versions. openvdb::GridBase::Ptr grid = *static_cast<openvdb::GridBase::Ptr*>(gridPtr); if (!grid) return nullptr; GU_PrimVDB* vdb = GU_PrimVDB::build(&gdp); if (vdb != nullptr) { if (src != nullptr) { // Copy the source primitive's attributes to this primitive, // then transfer those attributes to this grid's metadata. vdb->copyAttributesAndGroups(*src, /*copyGroups=*/true); GU_PrimVDB::createMetadataFromGridAttrs(*grid, *vdb, gdp); // Copy the source's visualization options. GEO_VolumeOptions visopt = src->getVisOptions(); vdb->setVisualization(visopt.myMode, visopt.myIso, visopt.myDensity, visopt.myLod); } // Ensure that certain metadata exists (grid name, grid class, etc.). if (name != nullptr) grid->setName(name); grid->removeMeta("value_type"); grid->insertMeta("value_type", openvdb::StringMetadata(grid->valueType())); // For each of the following, force any existing metadata's value to be // one of the supported values. Note the careful 3 statement sequences // so that it works with type mismatches. openvdb::GridClass grid_class = grid->getGridClass(); grid->removeMeta(openvdb::GridBase::META_GRID_CLASS); grid->setGridClass(grid_class); openvdb::VecType vec_type = grid->getVectorType(); grid->removeMeta(openvdb::GridBase::META_VECTOR_TYPE); grid->setVectorType(vec_type); bool is_in_world_space = grid->isInWorldSpace(); grid->removeMeta(openvdb::GridBase::META_IS_LOCAL_SPACE); grid->setIsInWorldSpace(is_in_world_space); bool save_as_half = grid->saveFloatAsHalf(); grid->removeMeta(openvdb::GridBase::META_SAVE_HALF_FLOAT); grid->setSaveFloatAsHalf(save_as_half); // Transfer the grid's metadata to primitive attributes. GU_PrimVDB::createGridAttrsFromMetadata(*vdb, *grid, gdp); vdb->setGrid(*grid); // If we had no source, have to set options to reasonable // defaults. if (src == nullptr) { if (grid->getGridClass() == openvdb::GRID_LEVEL_SET) { vdb->setVisualization(GEO_VOLUMEVIS_ISO, vdb->getVisIso(), vdb->getVisDensity(), vdb->getVisLod()); } else { vdb->setVisualization(GEO_VOLUMEVIS_SMOKE, vdb->getVisIso(), vdb->getVisDensity(), vdb->getVisLod()); } } } return vdb; } int64 GU_PrimVDB::getMemoryUsage() const { int64 mem = sizeof(*this); mem += GEO_PrimVDB::getBaseMemoryUsage(); return mem; } void GU_PrimVDB::countMemory(UT_MemoryCounter &counter) const { counter.countUnshared(sizeof(*this)); GEO_PrimVDB::countBaseMemory(counter); } namespace // anonymous { class gu_VolumeMax { public: gu_VolumeMax( const UT_VoxelArrayReadHandleF &vox, UT_AutoInterrupt &progress) : myVox(vox) , myProgress(progress) , myMax(std::numeric_limits<float>::min()) { } gu_VolumeMax(const gu_VolumeMax &other, UT_Split) : myVox(other.myVox) , myProgress(other.myProgress) // NOTE: other.myMax could be half written-to while this // constructor is being called, so don't use its // value here. Initialize myMax as in the main // constructor. , myMax(std::numeric_limits<float>::min()) { } void operator()(const UT_BlockedRange<int> &range) { uint8 bcnt = 0; for (int i = range.begin(); i != range.end(); ++i) { float min_value; float max_value; myVox->getLinearTile(i)->findMinMax(min_value, max_value); myMax = SYSmax(myMax, max_value); if (!bcnt++ && myProgress.wasInterrupted()) break; } } void join(const gu_VolumeMax &other) { myMax = std::max(myMax, other.myMax); } float findMax() { UTparallelReduce(UT_BlockedRange<int>(0, myVox->numTiles()), *this); return myMax; } private: const UT_VoxelArrayReadHandleF & myVox; UT_AutoInterrupt & myProgress; float myMax; }; class gu_ConvertToVDB { public: gu_ConvertToVDB( const UT_VoxelArrayReadHandleF &vox, float background, UT_AutoInterrupt &progress, bool activateInsideSDF ) : myVox(vox) , myGrid(openvdb::FloatGrid::create(background)) , myProgress(progress) , myActivateInsideSDF(activateInsideSDF) { } gu_ConvertToVDB(const gu_ConvertToVDB &other, UT_Split) : myVox(other.myVox) , myGrid(openvdb::FloatGrid::create(other.myGrid->background())) , myProgress(other.myProgress) , myActivateInsideSDF(other.myActivateInsideSDF) { } openvdb::FloatGrid::Ptr run() { using namespace openvdb; UTparallelReduce(UT_BlockedRange<int>(0, myVox->numTiles()), *this); // Check if the VDB grid can be made empty openvdb::Coord dim = myGrid->evalActiveVoxelDim(); if (dim[0] == 1 && dim[1] == 1 && dim[2] == 1) { openvdb::Coord ijk = myGrid->evalActiveVoxelBoundingBox().min(); float value = myGrid->tree().getValue(ijk); if (openvdb::math::isApproxEqual<float>(value, myGrid->background())) { myGrid->clear(); } } return myGrid; } void operator()(const UT_BlockedRange<int> &range) { using namespace openvdb; FloatGrid & grid = *myGrid.get(); const float background = grid.background(); const UT_VoxelArrayF & vox = *myVox; uint8 bcnt = 0; FloatGrid::Accessor acc = grid.getAccessor(); for (int i = range.begin(); i != range.end(); ++i) { const UT_VoxelTile<float> & tile = *vox.getLinearTile(i); Coord org; Coord dim; vox.linearTileToXYZ(i, org[0], org[1], org[2]); org[0] *= TILESIZE; org[1] *= TILESIZE; org[2] *= TILESIZE; dim[0] = tile.xres(); dim[1] = tile.yres(); dim[2] = tile.zres(); if (tile.isConstant()) { CoordBBox bbox(org, org + dim.offsetBy(-1)); float value = tile(0, 0, 0); if (!SYSisEqual(value, background) && (myActivateInsideSDF || !SYSisEqual(value, -background))) { grid.fill(bbox, value); } } else { openvdb::Coord ijk; for (ijk[2] = 0; ijk[2] < dim[2]; ++ijk[2]) { for (ijk[1] = 0; ijk[1] < dim[1]; ++ijk[1]) { for (ijk[0] = 0; ijk[0] < dim[0]; ++ijk[0]) { float value = tile(ijk[0], ijk[1], ijk[2]); if (!SYSisEqual(value, background) && (myActivateInsideSDF || !SYSisEqual(value, -background))) { Coord pos = ijk.offsetBy(org[0], org[1], org[2]); acc.setValue(pos, value); } } } } } if (!bcnt++ && myProgress.wasInterrupted()) break; } } void join(const gu_ConvertToVDB &other) { if (myProgress.wasInterrupted()) return; UT_IF_ASSERT(int old_count = myGrid->activeVoxelCount();) UT_IF_ASSERT(int other_count = other.myGrid->activeVoxelCount();) myGrid->merge(*other.myGrid); UT_ASSERT(myGrid->activeVoxelCount() == old_count + other_count); } private: const UT_VoxelArrayReadHandleF & myVox; openvdb::FloatGrid::Ptr myGrid; UT_AutoInterrupt & myProgress; bool myActivateInsideSDF; }; // class gu_ConvertToVDB } // namespace anonymous GU_PrimVDB * GU_PrimVDB::buildFromPrimVolume( GU_Detail &geo, const GEO_PrimVolume &vol, const char *name, const bool flood_sdf, const bool prune, const float tolerance, const bool activate_inside_sdf) { using namespace openvdb; UT_AutoInterrupt progress("Converting to VDB"); UT_VoxelArrayReadHandleF vox = vol.getVoxelHandle(); float background; if (vol.isSDF()) { gu_VolumeMax max_op(vox, progress); background = max_op.findMax(); if (progress.wasInterrupted()) return nullptr; } else { if (vol.getBorder() == GEO_VOLUMEBORDER_CONSTANT) background = vol.getBorderValue(); else background = 0.0; } // When flood-filling SDFs, the inactive interior voxels will be set to // -background. In that case we can avoid activating all inside voxels // that already have that value, maintaining the narrow band (if any) of the // original native volume. For non-SDF we always activate interior voxels. gu_ConvertToVDB converter(vox, background, progress, activate_inside_sdf || !flood_sdf || !vol.isSDF()); FloatGrid::Ptr grid = converter.run(); if (progress.wasInterrupted()) return nullptr; if (vol.isSDF()) grid->setGridClass(GridClass(GRID_LEVEL_SET)); else grid->setGridClass(GridClass(GRID_FOG_VOLUME)); if (prune) { grid->pruneGrid(tolerance); } if (flood_sdf && vol.isSDF()) { // only call signed flood fill on SDFs openvdb::tools::signedFloodFill(grid->tree()); } GU_PrimVDB *prim_vdb = buildFromGrid(geo, grid, nullptr, name); if (!prim_vdb) return nullptr; int rx, ry, rz; vol.getRes(rx, ry, rz); prim_vdb->setSpaceTransform(vol.getSpaceTransform(), UT_Vector3R(rx,ry,rz)); prim_vdb->setVisualization( vol.getVisualization(), vol.getVisIso(), vol.getVisDensity(), GEO_VOLUMEVISLOD_FULL); return prim_vdb; } // Copy the exclusive bbox [start,end) from myVox into acc static void guCopyVoxelBBox( const UT_VoxelArrayReadHandleF &vox, openvdb::FloatGrid::Accessor &acc, openvdb::Coord start, openvdb::Coord end) { openvdb::Coord c; for (c[0] = start[0] ; c[0] < end[0]; c[0]++) { for (c[1] = start[1] ; c[1] < end[1]; c[1]++) { for (c[2] = start[2] ; c[2] < end[2]; c[2]++) { float value = vox->getValue(c[0], c[1], c[2]); acc.setValueOnly(c, value); } } } } void GU_PrimVDB::expandBorderFromPrimVolume(const GEO_PrimVolume &vol, int pad) { using namespace openvdb; UT_AutoInterrupt progress("Add inactive VDB border"); const UT_VoxelArrayReadHandleF vox(vol.getVoxelHandle()); const Coord res(vox->getXRes(), vox->getYRes(), vox->getZRes()); GridBase & base = getGrid(); FloatGrid & grid = UTvdbGridCast<FloatGrid>(base); FloatGrid::Accessor acc = grid.getAccessor(); // For simplicity, we overdraw the edges and corners for (int axis = 0; axis < 3; axis++) { if (progress.wasInterrupted()) return; openvdb::Coord beg(-pad, -pad, -pad); openvdb::Coord end = res.offsetBy(+pad); beg[axis] = -pad; end[axis] = 0; guCopyVoxelBBox(vox, acc, beg, end); beg[axis] = res[axis]; end[axis] = res[axis] + pad; guCopyVoxelBBox(vox, acc, beg, end); } } // The following code is for HDK only #ifndef SESI_OPENVDB // Static callback for our factory. static GA_Primitive* gu_newPrimVDB(GA_Detail &detail, GA_Offset offset, const GA_PrimitiveDefinition &) { return new GU_PrimVDB(static_cast<GU_Detail *>(&detail), offset); } static GA_Primitive* gaPrimitiveMergeConstructor(const GA_MergeMap &map, GA_Detail &dest_detail, GA_Offset dest_offset, const GA_Primitive &src_prim) { return new GU_PrimVDB(map, dest_detail, dest_offset, static_cast<const GU_PrimVDB &>(src_prim)); } static UT_Lock theInitPrimDefLock; void GU_PrimVDB::registerMyself(GA_PrimitiveFactory *factory) { // Ignore double registration if (theDefinition) return; UT_Lock::Scope lock(theInitPrimDefLock); if (theDefinition) return; #if defined(__ICC) // Disable ICC "assignment to static variable" warning, // since the assignment to theDefinition is mutex-protected. __pragma(warning(disable:1711)); #endif theDefinition = factory->registerDefinition("VDB", gu_newPrimVDB, GA_FAMILY_NONE); #if defined(__ICC) __pragma(warning(default:1711)); #endif if (!theDefinition) { std::cerr << "WARNING: Unable to register custom GU_PrimVDB\n"; if (!factory->lookupDefinition("VDB")) { std::cerr << "WARNING: failed to register GU_PrimVDB\n"; } return; } theDefinition->setLabel("Sparse Volumes (VDBs)"); theDefinition->setHasLocalTransform(true); theDefinition->setMergeConstructor(&gaPrimitiveMergeConstructor); registerIntrinsics(*theDefinition); // Register the GT tesselation too (now we know what type id we have) openvdb_houdini::GT_GEOPrimCollectVDB::registerPrimitive(theDefinition->getId()); } #endif GEO_Primitive * GU_PrimVDB::convertToNewPrim( GEO_Detail &dst_geo, GU_ConvertParms &parms, fpreal adaptivity, bool split_disjoint_volumes, bool &success) const { GEO_Primitive * prim = nullptr; const GA_PrimCompat::TypeMask parmType = parms.toType(); success = false; if (parmType == GEO_PrimTypeCompat::GEOPRIMPOLY) { prim = convertToPoly(dst_geo, parms, adaptivity, /*polysoup*/false, success); } else if (parmType == GEO_PrimTypeCompat::GEOPRIMPOLYSOUP) { prim = convertToPoly(dst_geo, parms, adaptivity, /*polysoup*/true, success); } else if (parmType == GEO_PrimTypeCompat::GEOPRIMVOLUME) { prim = convertToPrimVolume(dst_geo, parms, split_disjoint_volumes); if (prim) success = true; } return prim; } GEO_Primitive * GU_PrimVDB::convertNew(GU_ConvertParms &parms) { bool success = false; return convertToNewPrim(*getParent(), parms, /*adaptivity*/0, /*sparse*/false, success); } static void guCopyMesh( GEO_Detail& detail, openvdb::tools::VolumeToMesh& mesher, bool buildpolysoup, bool verbose) { TIMING_DEF; const openvdb::tools::PointList& points = mesher.pointList(); openvdb::tools::PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); // NOTE: Adaptive meshes consist of tringles and quads. // Construct the points GA_Size npoints = mesher.pointListSize(); GA_Offset startpt = detail.appendPointBlock(npoints); SYS_STATIC_ASSERT(sizeof(openvdb::tools::PointList::element_type) == sizeof(UT_Vector3)); GA_RWHandleV3 pthandle(detail.getP()); pthandle.setBlock(startpt, npoints, (UT_Vector3 *)points.get()); TIMING_LOG("Copy Points"); // Construct the array of polygon point numbers // NOTE: For quad meshes, the number of quads is about the number of points, // so the number of vertices is about 4*npoints GA_Size nquads = 0, ntris = 0; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; nquads += polygons.numQuads(); ntris += polygons.numTriangles(); } TIMING_LOG("Count Quads and Tris"); // Don't create anything if nothing to create if (!ntris && !nquads) return; GA_Size nverts = nquads*4 + ntris*3; UT_IntArray verts(nverts, nverts); GA_Size iquad = 0; GA_Size itri = nquads*4; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; // Copy quads for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { const openvdb::Vec4I& quad = polygons.quad(i); verts(iquad++) = quad[0]; verts(iquad++) = quad[1]; verts(iquad++) = quad[2]; verts(iquad++) = quad[3]; } // Copy triangles (adaptive mesh) for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { const openvdb::Vec3I& triangle = polygons.triangle(i); verts(itri++) = triangle[0]; verts(itri++) = triangle[1]; verts(itri++) = triangle[2]; } } TIMING_LOG("Get Quad and Tri Verts"); GEO_PolyCounts sizelist; if (nquads) sizelist.append(4, nquads); if (ntris) sizelist.append(3, ntris); if (buildpolysoup) GU_PrimPolySoup::build(&detail, startpt, npoints, sizelist, verts.array()); else GU_PrimPoly::buildBlock(&detail, startpt, npoints, sizelist, verts.array()); TIMING_LOG("Build Polys"); } namespace { class gu_VDBNormalsParallel { public: gu_VDBNormalsParallel(GA_Attribute *p, GA_Attribute *n, const GU_PrimVDB &vdb) : myP(p) , myN(n) , myVDB(vdb) {} void operator()(const GA_SplittableRange &r) const { UT_Interrupt *boss = UTgetInterrupt(); GA_ROPageHandleV3 positions(myP); GA_RWPageHandleV3 normals(myN); for (GA_PageIterator pit = r.beginPages(); !pit.atEnd(); ++pit) { if (boss->opInterrupt()) break; const GA_Offset pagefirstoff = pit.getFirstOffsetInPage(); positions.setPage(pagefirstoff); normals.setPage(pagefirstoff); GA_Offset start; GA_Offset end; for (GA_Iterator it = pit.begin(); it.blockAdvance(start, end); ) { myVDB.evalGradients(&normals.value(start), 1, &positions.value(start), end - start, /*normalize*/true); } } } private: GA_Attribute *const myP; GA_Attribute *const myN; const GU_PrimVDB &myVDB; }; } GEO_Primitive * GU_PrimVDB::convertToPoly( GEO_Detail &dst_geo, GU_ConvertParms &parms, fpreal adaptivity, bool polysoup, bool &success) const { using namespace openvdb; UT_AutoInterrupt progress("Convert VDB to Polygons"); GA_Detail::OffsetMarker marker(dst_geo); bool verbose = false; success = false; try { tools::VolumeToMesh mesher(parms.myOffset, adaptivity); UTvdbProcessTypedGridScalar(getStorageType(), getGrid(), mesher); if (progress.wasInterrupted()) return nullptr; guCopyMesh(dst_geo, mesher, polysoup, verbose); if (progress.wasInterrupted()) return nullptr; } catch (std::exception& /*e*/) { return nullptr; } GA_Range pointrange(marker.pointRange()); GA_Range primitiverange(marker.primitiveRange()); GUconvertCopySingleVertexPrimAttribsAndGroups( parms, *getParent(), getMapOffset(), dst_geo, primitiverange, pointrange); if (progress.wasInterrupted()) return nullptr; // If there was already a point normal attribute, we should compute normals // to avoid getting zero default values for the new polygons. GA_RWAttributeRef normal_ref = dst_geo.findNormalAttribute(GA_ATTRIB_POINT); if (normal_ref.isValid() && !pointrange.isEmpty()) { UTparallelFor(GA_SplittableRange(pointrange), gu_VDBNormalsParallel(dst_geo.getP(), normal_ref.getAttribute(), *this)); if (progress.wasInterrupted()) return nullptr; } // At this point, we have succeeded, marker.numPrimitives() might be 0 if // we had an empty VDB. success = true; if (primitiverange.isEmpty()) return nullptr; return dst_geo.getGEOPrimitive(marker.primitiveBegin()); } namespace { class gu_DestroyVDBPrimGuard { public: gu_DestroyVDBPrimGuard(GU_PrimVDB &vdb) : myVDB(vdb) { } ~gu_DestroyVDBPrimGuard() { myVDB.getDetail().destroyPrimitive(myVDB, /*and_points*/true); } private: GU_PrimVDB &myVDB; }; } // anonymous namespace /*static*/ void GU_PrimVDB::convertPrimVolumeToPolySoup( GU_Detail &dst_geo, const GEO_PrimVolume &src_vol) { using namespace openvdb; UT_AutoInterrupt progress("Convert to Polygons"); GU_PrimVDB &vdb = *buildFromPrimVolume( dst_geo, src_vol, nullptr, /*flood*/false, /*prune*/true, /*tol*/0, /*activate_inside*/true); gu_DestroyVDBPrimGuard destroy_guard(vdb); if (progress.wasInterrupted()) return; try { BoolGrid::Ptr mask; if (src_vol.getBorder() != GEO_VOLUMEBORDER_CONSTANT) { Coord res; src_vol.getRes(res[0], res[1], res[2]); CoordBBox bbox(Coord(0, 0, 0), res.offsetBy(-1)); // inclusive if (bbox.hasVolume()) { vdb.expandBorderFromPrimVolume(src_vol, 4); if (progress.wasInterrupted()) return; mask = BoolGrid::create(/*background*/false); mask->setTransform(vdb.getGrid().transform().copy()); mask->fill(bbox, /*foreground*/true); } } tools::VolumeToMesh mesher(src_vol.getVisIso()); mesher.setSurfaceMask(mask); GEOvdbProcessTypedGridScalar(vdb, mesher); if (progress.wasInterrupted()) return; guCopyMesh(dst_geo, mesher, /*polysoup*/true, /*verbose*/false); if (progress.wasInterrupted()) return; } catch (std::exception& /*e*/) { } } namespace // anonymous { #define SCALAR_RET(T) \ typename SYS_EnableIf< SYS_IsArithmetic<T>::value, T >::type #define NON_SCALAR_RET(T) \ typename SYS_DisableIf< SYS_IsArithmetic<T>::value, T >::type /// Houdini Volume wrapper to abstract multiple volumes with a consistent API. template <int TUPLE_SIZE> class VoxelArrayVolume { public: static const int TupleSize = TUPLE_SIZE; VoxelArrayVolume(GU_Detail& geo): mGeo(geo) { for (int i = 0; i < TUPLE_SIZE; i++) { mVol[i] = (GU_PrimVolume *)GU_PrimVolume::build(&mGeo); mHandle[i] = mVol[i]->getVoxelWriteHandle(); } } void setSize(const openvdb::Coord &dim) { for (int i = 0; i < TUPLE_SIZE; i++) { mHandle[i]->size(dim[0], dim[1], dim[2]); } } template <class ValueT> void setVolumeOptions( bool is_sdf, const ValueT& background, GEO_VolumeVis vismode, fpreal iso, fpreal density, SCALAR_RET(ValueT)* /*dummy*/ = 0) { if (is_sdf) { mVol[0]->setBorder(GEO_VOLUMEBORDER_SDF, background); mVol[0]->setVisualization(vismode, iso, density); } else { mVol[0]->setBorder(GEO_VOLUMEBORDER_CONSTANT, background); mVol[0]->setVisualization(vismode, iso, density); } } template <class ValueT> void setVolumeOptions( bool is_sdf, const ValueT& background, GEO_VolumeVis vismode, fpreal iso, fpreal density, NON_SCALAR_RET(ValueT)* /*dummy*/ = 0) { if (is_sdf) { for (int i = 0; i < TUPLE_SIZE; i++) { mVol[i]->setBorder(GEO_VOLUMEBORDER_SDF, background[i]); mVol[i]->setVisualization(vismode, iso, density); } } else { for (int i = 0; i < TUPLE_SIZE; i++) { mVol[i]->setBorder(GEO_VOLUMEBORDER_CONSTANT, background[i]); mVol[i]->setVisualization(vismode, iso, density); } } } void setSpaceTransform(const GEO_PrimVolumeXform& s) { for (int i = 0; i < TUPLE_SIZE; i++) mVol[i]->setSpaceTransform(s); } int numTiles() const { // Since we create all volumes the same size, we can simply use the // first one. return mHandle[0]->numTiles(); } template<typename ConstAccessorT> void copyToAlignedTile( int tile_index, ConstAccessorT& src, const openvdb::Coord& src_origin); template<typename ConstAccessorT> void copyToTile( int tile_index, ConstAccessorT& src, const openvdb::Coord& src_origin); private: // methods using VoxelTileF = UT_VoxelTile<fpreal32>; template <class ValueT> static void makeConstant_(VoxelTileF* tiles[TUPLE_SIZE], const ValueT& v, SCALAR_RET(ValueT)* /*dummy*/ = 0) { tiles[0]->makeConstant(fpreal32(v)); } template <class ValueT> static void makeConstant_(VoxelTileF* tiles[TUPLE_SIZE], const ValueT& v, NON_SCALAR_RET(ValueT)* /*dummy*/ = 0) { for (int i = 0; i < TUPLE_SIZE; i++) tiles[i]->makeConstant(fpreal32(v[i])); } // Convert a local tile coordinate to a linear offset. This is used instead // of UT_VoxelTile::operator()() since we always decompress the tile first. SYS_FORCE_INLINE static int tileCoordToOffset(const VoxelTileF* tile, const openvdb::Coord& xyz) { UT_ASSERT_P(xyz[0] >= 0 && xyz[0] < tile->xres()); UT_ASSERT_P(xyz[1] >= 0 && xyz[1] < tile->yres()); UT_ASSERT_P(xyz[2] >= 0 && xyz[2] < tile->zres()); return ((xyz[2] * tile->yres()) + xyz[1]) * tile->xres() + xyz[0]; } // Set the value into tile coord xyz template <class ValueT> static void setTileVoxel( const openvdb::Coord& xyz, VoxelTileF* tile, fpreal32* rawData, const ValueT& v, int /*i*/, SCALAR_RET(ValueT)* /*dummy*/ = 0) { rawData[tileCoordToOffset(tile, xyz)] = v; } template <class ValueT> static void setTileVoxel( const openvdb::Coord& xyz, VoxelTileF* tile, fpreal32* rawData, const ValueT& v, int i, NON_SCALAR_RET(ValueT)* /*dummy*/ = 0) { rawData[tileCoordToOffset(tile, xyz)] = v[i]; } template <class ValueT> static bool compareVoxel( const openvdb::Coord& xyz, VoxelTileF* tile, fpreal32* rawData, const ValueT& v, int i, SCALAR_RET(ValueT) *dummy = 0) { UT_ASSERT_P(xyz[0] >= 0 && xyz[0] < tile->xres()); UT_ASSERT_P(xyz[1] >= 0 && xyz[1] < tile->yres()); UT_ASSERT_P(xyz[2] >= 0 && xyz[2] < tile->zres()); float vox = (*tile)(xyz[0], xyz[1], xyz[2]); return openvdb::math::isApproxEqual<float>(vox, v); } template <class ValueT> static bool compareVoxel( const openvdb::Coord& xyz, VoxelTileF* tile, fpreal32* rawData, const ValueT& v, int i, NON_SCALAR_RET(ValueT) *dummy = 0) { UT_ASSERT_P(xyz[0] >= 0 && xyz[0] < tile->xres()); UT_ASSERT_P(xyz[1] >= 0 && xyz[1] < tile->yres()); UT_ASSERT_P(xyz[2] >= 0 && xyz[2] < tile->zres()); float vox = (*tile)(xyz[0], xyz[1], xyz[2]); return openvdb::math::isApproxEqual<float>(vox, v[i]); } // Check if aligned VDB bbox region is constant template<typename ConstAccessorT, typename ValueType> static bool isAlignedConstantRegion_( ConstAccessorT& acc, const openvdb::Coord& beg, const openvdb::Coord& end, const ValueType& const_value) { using openvdb::math::isApproxEqual; using LeafNodeType = typename ConstAccessorT::LeafNodeT; const openvdb::Index DIM = LeafNodeType::DIM; // The smallest constant tile size in vdb is DIM and the // vdb-leaf/hdk-tile coords are aligned. openvdb::Coord ijk; for (ijk[0] = beg[0]; ijk[0] < end[0]; ijk[0] += DIM) { for (ijk[1] = beg[1]; ijk[1] < end[1]; ijk[1] += DIM) { for (ijk[2] = beg[2]; ijk[2] < end[2]; ijk[2] += DIM) { if (acc.probeConstLeaf(ijk) != nullptr) return false; ValueType sampleValue = acc.getValue(ijk); if (!isApproxEqual(const_value, sampleValue)) return false; } } } return true; } // Copy all data of the aligned leaf node to the tile at origin template<typename LeafType> static void copyAlignedLeafNode_( VoxelTileF* tile, int tuple_i, const openvdb::Coord& origin, const LeafType& leaf) { fpreal32* data = tile->rawData(); for (openvdb::Index i = 0; i < LeafType::NUM_VALUES; ++i) { openvdb::Coord xyz = origin + LeafType::offsetToLocalCoord(i); setTileVoxel(xyz, tile, data, leaf.getValue(i), tuple_i); } } // Check if unaligned VDB bbox region is constant. // beg_a is beg rounded down to the nearest leaf origin. template<typename ConstAccessorT, typename ValueType> static bool isConstantRegion_( ConstAccessorT& acc, const openvdb::Coord& beg, const openvdb::Coord& end, const openvdb::Coord& beg_a, const ValueType& const_value) { using openvdb::math::isApproxEqual; using LeafNodeType = typename ConstAccessorT::LeafNodeT; const openvdb::Index DIM = LeafNodeType::DIM; const openvdb::Index LOG2DIM = LeafNodeType::LOG2DIM; UT_ASSERT(beg_a[0] % DIM == 0); UT_ASSERT(beg_a[1] % DIM == 0); UT_ASSERT(beg_a[2] % DIM == 0); openvdb::Coord ijk; for (ijk[0] = beg_a[0]; ijk[0] < end[0]; ijk[0] += DIM) { for (ijk[1] = beg_a[1]; ijk[1] < end[1]; ijk[1] += DIM) { for (ijk[2] = beg_a[2]; ijk[2] < end[2]; ijk[2] += DIM) { const LeafNodeType* leaf = acc.probeConstLeaf(ijk); if (!leaf) { ValueType sampleValue = acc.getValue(ijk); if (!isApproxEqual(const_value, sampleValue)) return false; continue; } // Else, we're a leaf node, determine if region is constant openvdb::Coord leaf_beg = ijk; openvdb::Coord leaf_end = ijk + openvdb::Coord(DIM, DIM, DIM); // Clamp the leaf region to the tile bbox leaf_beg.maxComponent(beg); leaf_end.minComponent(end); // Offset into local leaf coordinates leaf_beg -= leaf->origin(); leaf_end -= leaf->origin(); const ValueType* s0 = &leaf->getValue(leaf_beg[2]); for (openvdb::Int32 x = leaf_beg[0]; x < leaf_end[0]; ++x) { const ValueType* s1 = s0 + (x<<2*LOG2DIM); for (openvdb::Int32 y = leaf_beg[1]; y < leaf_end[1]; ++y) { const ValueType* s2 = s1 + (y<<LOG2DIM); for (openvdb::Int32 z = leaf_beg[2]; z < leaf_end[2]; ++z) { if (!isApproxEqual(const_value, *s2)) return false; s2++; } } } } } } return true; } // Copy the leaf node data at the global coord leaf_origin to the local // tile region [beg,end) template<typename LeafType> static void copyLeafNode_( VoxelTileF* tile, int tuple_i, const openvdb::Coord& beg, const openvdb::Coord& end, const openvdb::Coord& leaf_origin, const LeafType& leaf) { using openvdb::Coord; fpreal32* data = tile->rawData(); Coord xyz; Coord ijk = leaf_origin; for (xyz[2] = beg[2]; xyz[2] < end[2]; ++xyz[2], ++ijk[2]) { ijk[1] = leaf_origin[1]; for (xyz[1] = beg[1]; xyz[1] < end[1]; ++xyz[1], ++ijk[1]) { ijk[0] = leaf_origin[0]; for (xyz[0] = beg[0]; xyz[0] < end[0]; ++xyz[0], ++ijk[0]) { setTileVoxel(xyz, tile, data, leaf.getValue(ijk), tuple_i); } } } } // Set the local tile region [beg,end) to the same value. template <class ValueT> static void setConstantRegion_( VoxelTileF* tile, int tuple_i, const openvdb::Coord& beg, const openvdb::Coord& end, const ValueT& value) { fpreal32* data = tile->rawData(); openvdb::Coord xyz; for (xyz[2] = beg[2]; xyz[2] < end[2]; ++xyz[2]) { for (xyz[1] = beg[1]; xyz[1] < end[1]; ++xyz[1]) { for (xyz[0] = beg[0]; xyz[0] < end[0]; ++xyz[0]) { setTileVoxel(xyz, tile, data, value, tuple_i); } } } } void getTileCopyData_( int tile_index, const openvdb::Coord& src_origin, VoxelTileF* tiles[TUPLE_SIZE], openvdb::Coord& res, openvdb::Coord& src_bbox_beg, openvdb::Coord& src_bbox_end) { for (int i = 0; i < TUPLE_SIZE; i++) { tiles[i] = mHandle[i]->getLinearTile(tile_index); } // Since all tiles are the same size, so just use the first one. res[0] = tiles[0]->xres(); res[1] = tiles[0]->yres(); res[2] = tiles[0]->zres(); // Define the inclusive coordinate range, in vdb index space. // ie. The source bounding box that we will copy from. // NOTE: All tiles are the same size, so just use the first handle. openvdb::Coord dst; mHandle[0]->linearTileToXYZ(tile_index, dst.x(), dst.y(), dst.z()); dst.x() *= TILESIZE; dst.y() *= TILESIZE; dst.z() *= TILESIZE; src_bbox_beg = src_origin + dst; src_bbox_end = src_bbox_beg + res; } private: // data GU_Detail& mGeo; GU_PrimVolume *mVol[TUPLE_SIZE]; UT_VoxelArrayWriteHandleF mHandle[TUPLE_SIZE]; }; // Copy the vdb data to the current tile. Assumes full tiles. template<int TUPLE_SIZE> template<typename ConstAccessorT> inline void VoxelArrayVolume<TUPLE_SIZE>::copyToAlignedTile( int tile_index, ConstAccessorT &acc, const openvdb::Coord& src_origin) { using openvdb::Coord; using openvdb::CoordBBox; using ValueType = typename ConstAccessorT::ValueType; using LeafNodeType = typename ConstAccessorT::LeafNodeT; const openvdb::Index LEAF_DIM = LeafNodeType::DIM; VoxelTileF* tiles[TUPLE_SIZE]; Coord tile_res; Coord beg; Coord end; getTileCopyData_(tile_index, src_origin, tiles, tile_res, beg, end); ValueType const_value = acc.getValue(beg); if (isAlignedConstantRegion_(acc, beg, end, const_value)) { makeConstant_(tiles, const_value); } else { // populate dense tile for (int tuple_i = 0; tuple_i < TUPLE_SIZE; tuple_i++) { VoxelTileF* tile = tiles[tuple_i]; tile->makeRawUninitialized(); Coord ijk; for (ijk[0] = beg[0]; ijk[0] < end[0]; ijk[0] += LEAF_DIM) { for (ijk[1] = beg[1]; ijk[1] < end[1]; ijk[1] += LEAF_DIM) { for (ijk[2] = beg[2]; ijk[2] < end[2]; ijk[2] += LEAF_DIM) { Coord tile_beg = ijk - beg; // local tile coord Coord tile_end = tile_beg.offsetBy(LEAF_DIM); const LeafNodeType* leaf = acc.probeConstLeaf(ijk); if (leaf != nullptr) { copyAlignedLeafNode_( tile, tuple_i, tile_beg, *leaf); } else { setConstantRegion_( tile, tuple_i, tile_beg, tile_end, acc.getValue(ijk)); } } } } } } // end populate dense tile } template<int TUPLE_SIZE> template<typename ConstAccessorT> inline void VoxelArrayVolume<TUPLE_SIZE>::copyToTile( int tile_index, ConstAccessorT &acc, const openvdb::Coord& src_origin) { using openvdb::Coord; using openvdb::CoordBBox; using ValueType = typename ConstAccessorT::ValueType; using LeafNodeType = typename ConstAccessorT::LeafNodeT; const openvdb::Index DIM = LeafNodeType::DIM; VoxelTileF* tiles[TUPLE_SIZE]; Coord tile_res; Coord beg; Coord end; getTileCopyData_(tile_index, src_origin, tiles, tile_res, beg, end); // a_beg is beg rounded down to the nearest leaf origin Coord a_beg(beg[0]&~(DIM-1), beg[1]&~(DIM-1), beg[2]&~(DIM-1)); ValueType const_value = acc.getValue(a_beg); if (isConstantRegion_(acc, beg, end, a_beg, const_value)) { makeConstant_(tiles, const_value); } else { for (int tuple_i = 0; tuple_i < TUPLE_SIZE; tuple_i++) { VoxelTileF* tile = tiles[tuple_i]; tile->makeRawUninitialized(); Coord ijk; for (ijk[0] = a_beg[0]; ijk[0] < end[0]; ijk[0] += DIM) { for (ijk[1] = a_beg[1]; ijk[1] < end[1]; ijk[1] += DIM) { for (ijk[2] = a_beg[2]; ijk[2] < end[2]; ijk[2] += DIM) { // Compute clamped local tile coord bbox Coord leaf_beg = ijk; Coord tile_beg = ijk - beg; Coord tile_end = tile_beg.offsetBy(DIM); for (int axis = 0; axis < 3; ++axis) { if (tile_beg[axis] < 0) { tile_beg[axis] = 0; leaf_beg[axis] = beg[axis]; } if (tile_end[axis] > tile_res[axis]) tile_end[axis] = tile_res[axis]; } // Copy the region const LeafNodeType* leaf = acc.probeConstLeaf(leaf_beg); if (leaf != nullptr) { copyLeafNode_( tile, tuple_i, tile_beg, tile_end, leaf_beg, *leaf); } else { setConstantRegion_( tile, tuple_i, tile_beg, tile_end, acc.getValue(ijk)); } } } } } } // Enable this to do slow code path verification #if 0 for (int tuple_i = 0; tuple_i < TUPLE_SIZE; ++tuple_i) { VoxelTileF* tile = tiles[tuple_i]; fpreal32* data = tile->rawData(); Coord xyz; for (xyz[2] = 0; xyz[2] < tile_res[2]; ++xyz[2]) { for (xyz[1] = 0; xyz[1] < tile_res[1]; ++xyz[1]) { for (xyz[0] = 0; xyz[0] < tile_res[0]; ++xyz[0]) { Coord ijk = beg + xyz; if (!compareVoxel(xyz, tile, data, acc.getValue(ijk), tuple_i)) { UT_ASSERT(!"Voxels are different"); compareVoxel(xyz, tile, data, acc.getValue(ijk), tuple_i); } } } } } #endif } template<typename TreeType, typename VolumeT, bool aligned> class gu_SparseTreeCopy; template<typename TreeType, typename VolumeT> class gu_SparseTreeCopy<TreeType, VolumeT, /*aligned=*/true> { public: gu_SparseTreeCopy( const TreeType& tree, VolumeT& volume, const openvdb::Coord& src_origin, UT_AutoInterrupt& progress ) : mVdbAcc(tree) , mVolume(volume) , mSrcOrigin(src_origin) , mProgress(progress) { } void run(bool threaded = true) { tbb::blocked_range<int> range(0, mVolume.numTiles()); if (threaded) tbb::parallel_for(range, *this); else (*this)(range); } void operator()(const tbb::blocked_range<int>& range) const { uint8 bcnt = 0; for (int i = range.begin(); i != range.end(); ++i) { mVolume.copyToAlignedTile(i, mVdbAcc, mSrcOrigin); if (!bcnt++ && mProgress.wasInterrupted()) return; } } private: openvdb::tree::ValueAccessor<const TreeType> mVdbAcc; VolumeT& mVolume; const openvdb::Coord mSrcOrigin; UT_AutoInterrupt& mProgress; }; template<typename TreeType, typename VolumeT> class gu_SparseTreeCopy<TreeType, VolumeT, /*aligned=*/false> { public: gu_SparseTreeCopy( const TreeType& tree, VolumeT& volume, const openvdb::Coord& src_origin, UT_AutoInterrupt& progress ) : mVdbAcc(tree) , mVolume(volume) , mSrcOrigin(src_origin) , mProgress(progress) { } void run(bool threaded = true) { tbb::blocked_range<int> range(0, mVolume.numTiles()); if (threaded) tbb::parallel_for(range, *this); else (*this)(range); } void operator()(const tbb::blocked_range<int>& range) const { uint8 bcnt = 0; for (int i = range.begin(); i != range.end(); ++i) { mVolume.copyToTile(i, mVdbAcc, mSrcOrigin); if (!bcnt++ && mProgress.wasInterrupted()) return; } } private: openvdb::tree::ValueAccessor<const TreeType> mVdbAcc; VolumeT& mVolume; const openvdb::Coord mSrcOrigin; UT_AutoInterrupt& mProgress; }; /// @brief Converts an OpenVDB grid into one/three Houdini Volume. /// @note Vector grids are converted into three Houdini Volumes. template <class VolumeT> class gu_ConvertFromVDB { public: gu_ConvertFromVDB( GEO_Detail& dst_geo, const GU_PrimVDB& src_vdb, bool split_disjoint_volumes, UT_AutoInterrupt& progress) : mDstGeo(static_cast<GU_Detail&>(dst_geo)) , mSrcVDB(src_vdb) , mSplitDisjoint(split_disjoint_volumes) , mProgress(progress) { } template<typename GridT> void operator()(const GridT &grid) { if (mSplitDisjoint) { vdbToDisjointVolumes(grid); } else { using LeafNodeType = typename GridT::TreeType::LeafNodeType; const openvdb::Index LEAF_DIM = LeafNodeType::DIM; VolumeT volume(mDstGeo); openvdb::CoordBBox bbox(grid.evalActiveVoxelBoundingBox()); bool aligned = ( (bbox.min()[0] % LEAF_DIM) == 0 && (bbox.min()[1] % LEAF_DIM) == 0 && (bbox.min()[2] % LEAF_DIM) == 0 && ((bbox.max()[0]+1) % LEAF_DIM) == 0 && ((bbox.max()[1]+1) % LEAF_DIM) == 0 && ((bbox.max()[2]+1) % LEAF_DIM) == 0); vdbToVolume(grid, bbox, volume, aligned); } } const UT_IntArray& components() const { return mDstComponents; } private: template<typename GridType> void vdbToVolume(const GridType& grid, const openvdb::CoordBBox& bbox, VolumeT& volume, bool aligned); template<typename GridType> void vdbToDisjointVolumes(const GridType& grid); private: GU_Detail& mDstGeo; UT_IntArray mDstComponents; const GU_PrimVDB& mSrcVDB; bool mSplitDisjoint; UT_AutoInterrupt& mProgress; }; // Copy the grid's bbox into volume at (0,0,0) template<typename VolumeT> template<typename GridType> void gu_ConvertFromVDB<VolumeT>::vdbToVolume( const GridType& grid, const openvdb::CoordBBox& bbox, VolumeT& vol, bool aligned) { using LeafNodeType = typename GridType::TreeType::LeafNodeType; // Creating a Houdini volume with a zero bbox seems to break the transform. // (probably related to the bbox derived 'local space') openvdb::CoordBBox space_bbox = bbox; if (space_bbox.empty()) space_bbox.resetToCube(openvdb::Coord(0, 0, 0), 1); vol.setSize(space_bbox.dim()); vol.setVolumeOptions(mSrcVDB.isSDF(), grid.background(), mSrcVDB.getVisualization(), mSrcVDB.getVisIso(), mSrcVDB.getVisDensity()); vol.setSpaceTransform(mSrcVDB.getSpaceTransform(UTvdbConvert(space_bbox))); for (int i = 0; i < VolumeT::TupleSize; i++) mDstComponents.append(i); // Copy the VDB bbox data to voxel array coord (0,0,0). SYS_STATIC_ASSERT(LeafNodeType::DIM <= TILESIZE); SYS_STATIC_ASSERT((TILESIZE % LeafNodeType::DIM) == 0); if (aligned) { gu_SparseTreeCopy<typename GridType::TreeType, VolumeT, true> copy(grid.tree(), vol, space_bbox.min(), mProgress); copy.run(); } else { gu_SparseTreeCopy<typename GridType::TreeType, VolumeT, false> copy(grid.tree(), vol, space_bbox.min(), mProgress); copy.run(); } } template<typename VolumeT> template<typename GridType> void gu_ConvertFromVDB<VolumeT>::vdbToDisjointVolumes(const GridType& grid) { using TreeType = typename GridType::TreeType; using NodeType = typename TreeType::RootNodeType::ChildNodeType; std::vector<const NodeType*> nodes; typename TreeType::NodeCIter iter = grid.tree().cbeginNode(); iter.setMaxDepth(1); iter.setMinDepth(1); for (; iter; ++iter) { const NodeType* node = nullptr; iter.template getNode<const NodeType>(node); if (node) nodes.push_back(node); } std::vector<openvdb::CoordBBox> nodeBBox(nodes.size()); for (size_t n = 0, N = nodes.size(); n < N; ++n) { nodes[n]->evalActiveBoundingBox(nodeBBox[n], false); } openvdb::CoordBBox regionA, regionB; const int searchDist = int(GridType::TreeType::LeafNodeType::DIM) << 1; for (size_t n = 0, N = nodes.size(); n < N; ++n) { if (!nodes[n]) continue; openvdb::CoordBBox& bbox = nodeBBox[n]; regionA = bbox; regionA.max().offset(searchDist); bool expanded = true; while (expanded) { expanded = false; for (size_t i = (n + 1); i < N; ++i) { if (!nodes[i]) continue; regionB = nodeBBox[i]; regionB.max().offset(searchDist); if (regionA.hasOverlap(regionB)) { nodes[i] = nullptr; expanded = true; bbox.expand(nodeBBox[i]); regionA = bbox; regionA.max().offset(searchDist); } } } VolumeT volume(mDstGeo); vdbToVolume(grid, bbox, volume, /*aligned*/true); } } } // namespace anonymous GEO_Primitive * GU_PrimVDB::convertToPrimVolume( GEO_Detail &dst_geo, GU_ConvertParms &parms, bool split_disjoint_volumes) const { using namespace openvdb; UT_AutoInterrupt progress("Convert VDB to Volume"); GA_Detail::OffsetMarker marker(dst_geo); UT_IntArray dst_components; bool processed = false; { // Try to convert scalar grid gu_ConvertFromVDB< VoxelArrayVolume<1> > converter(dst_geo, *this, split_disjoint_volumes, progress); processed = GEOvdbProcessTypedGridScalar(*this, converter); } if (!processed) { // Try to convert vector grid gu_ConvertFromVDB< VoxelArrayVolume<3> > converter(dst_geo, *this, split_disjoint_volumes, progress); processed = GEOvdbProcessTypedGridVec3(*this, converter); dst_components = converter.components(); } // Copy attributes from source to dest primitives GA_Range pointrange(marker.pointRange()); GA_Range primitiverange(marker.primitiveRange()); if (!processed || primitiverange.isEmpty() || progress.wasInterrupted()) return nullptr; GUconvertCopySingleVertexPrimAttribsAndGroups( parms, *getParent(), getMapOffset(), dst_geo, primitiverange, pointrange); // Handle the name attribute if needed if (dst_components.entries() > 0) { GA_ROHandleS src_name(getParent(), GA_ATTRIB_PRIMITIVE, "name"); GA_RWHandleS dst_name(&dst_geo, GA_ATTRIB_PRIMITIVE, "name"); if (src_name.isValid() && dst_name.isValid()) { const UT_String name(src_name.get(getMapOffset())); if (name.isstring()) { UT_String full_name(name); int last = name.length() + 1; const char component[] = { 'x', 'y', 'z', 'w' }; GA_Size nprimitives = primitiverange.getEntries(); UT_ASSERT(dst_components.entries() == nprimitives); full_name += ".x"; for (int j = 0; j < nprimitives; j++) { int i = dst_components(j); if (i < 4) full_name(last) = component[i]; else full_name.sprintf("%s%d", (const char *)name, i); // NOTE: This assumes that the offsets are contiguous, // which is only valid if the converter didn't // delete anything. dst_name.set(marker.primitiveBegin() + GA_Offset(i), full_name); } } } } return dst_geo.getGEOPrimitive(marker.primitiveBegin()); } GEO_Primitive * GU_PrimVDB::convert(GU_ConvertParms &parms, GA_PointGroup *usedpts) { bool success = false; GEO_Primitive * prim; prim = convertToNewPrim(*getParent(), parms, /*adaptivity*/0, /*sparse*/false, success); if (success) { if (usedpts) addPointRefToGroup(*usedpts); GA_PrimitiveGroup *group = parms.getDeletePrimitives(); if (group) group->add(this); else getParent()->deletePrimitive(*this, !usedpts); } return prim; } /*static*/ void GU_PrimVDB::convertVolumesToVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, bool flood_sdf, bool prune, fpreal tolerance, bool keep_original, bool activate_inside_sdf) { UT_AutoInterrupt progress("Convert"); const GA_ROHandleS nameHandle(&src_geo, GA_ATTRIB_PRIMITIVE, "name"); GEO_Primitive *prim; GEO_Primitive *next; GA_FOR_SAFE_GROUP_PRIMITIVES(&src_geo, parms.primGroup, prim, next) { if (progress.wasInterrupted()) break; if (prim->getTypeId() != GEO_PRIMVOLUME) continue; GEO_PrimVolume *vol = UTverify_cast<GEO_PrimVolume*>(prim); GA_Offset voloff = vol->getMapOffset(); GA_Detail::OffsetMarker marker(dst_geo); // Get the volume's name, if it has one. char const * const volname = (nameHandle.isValid() ? nameHandle.get(voloff) : nullptr); GU_PrimVDB *new_prim; new_prim = GU_PrimVDB::buildFromPrimVolume( dst_geo, *vol, volname, flood_sdf, prune, tolerance, activate_inside_sdf); if (!new_prim || progress.wasInterrupted()) break; GA_Range pointrange(marker.pointRange()); GA_Range primitiverange(marker.primitiveRange()); GUconvertCopySingleVertexPrimAttribsAndGroups( parms, src_geo, voloff, dst_geo, primitiverange, pointrange); if (!keep_original && (&dst_geo == &src_geo)) dst_geo.deletePrimitive(*vol, /*and points*/true); } } /*static*/ void GU_PrimVDB::convertVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, fpreal adaptivity, bool keep_original, bool split_disjoint_volumes) { UT_AutoInterrupt progress("Convert"); GEO_Primitive *prim; GEO_Primitive *next; GA_FOR_SAFE_GROUP_PRIMITIVES(&src_geo, parms.primGroup, prim, next) { if (progress.wasInterrupted()) break; GU_PrimVDB *vdb = dynamic_cast<GU_PrimVDB*>(prim); if (vdb == nullptr) continue; bool success = false; (void) vdb->convertToNewPrim(dst_geo, parms, adaptivity, split_disjoint_volumes, success); if (success && !keep_original && (&dst_geo == &src_geo)) dst_geo.deletePrimitive(*vdb, /*and points*/true); } } /*static*/ void GU_PrimVDB::convertVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, fpreal adaptivity, bool keep_original) { convertVDBs(dst_geo, src_geo, parms, adaptivity, keep_original, false); } void GU_PrimVDB::normal(NormalComp& /*output*/) const { // No need here. } //////////////////////////////////////// namespace { template <typename T> struct IsScalarMeta { HBOOST_STATIC_CONSTANT(bool, value = true); }; #define DECLARE_VECTOR(METADATA_T) \ template <> struct IsScalarMeta<METADATA_T> \ { HBOOST_STATIC_CONSTANT(bool, value = false); }; \ /**/ DECLARE_VECTOR(openvdb::Vec2IMetadata) DECLARE_VECTOR(openvdb::Vec2SMetadata) DECLARE_VECTOR(openvdb::Vec2DMetadata) DECLARE_VECTOR(openvdb::Vec3IMetadata) DECLARE_VECTOR(openvdb::Vec3SMetadata) DECLARE_VECTOR(openvdb::Vec3DMetadata) DECLARE_VECTOR(openvdb::Vec4IMetadata) DECLARE_VECTOR(openvdb::Vec4SMetadata) DECLARE_VECTOR(openvdb::Vec4DMetadata) #undef DECLARE_VECTOR template<typename T, typename MetadataT, int I, typename ENABLE = void> struct MetaTuple { static T get(const MetadataT& meta) { return meta.value()[I]; } }; template<typename T, typename MetadataT, int I> struct MetaTuple<T, MetadataT, I, typename SYS_EnableIf< IsScalarMeta<MetadataT>::value >::type> { static T get(const MetadataT& meta) { UT_ASSERT(I == 0); return meta.value(); } }; template<int I> struct MetaTuple<const char*, openvdb::StringMetadata, I> { static const char* get(const openvdb::StringMetadata& meta) { UT_ASSERT(I == 0); return meta.value().c_str(); } }; template <typename MetadataT> struct MetaAttr; #define META_ATTR(METADATA_T, STORAGE, TUPLE_T, TUPLE_SIZE) \ template <> \ struct MetaAttr<METADATA_T> { \ using TupleT = TUPLE_T; \ using RWHandleT = GA_HandleT<TupleT>::RWType; \ static const int theTupleSize = TUPLE_SIZE; \ static const GA_Storage theStorage = STORAGE; \ }; \ /**/ META_ATTR(openvdb::BoolMetadata, GA_STORE_INT8, int8, 1) META_ATTR(openvdb::FloatMetadata, GA_STORE_REAL32, fpreal32, 1) META_ATTR(openvdb::DoubleMetadata, GA_STORE_REAL64, fpreal64, 1) META_ATTR(openvdb::Int32Metadata, GA_STORE_INT32, int32, 1) META_ATTR(openvdb::Int64Metadata, GA_STORE_INT64, int64, 1) //META_ATTR(openvdb::StringMetadata, GA_STORE_STRING, const char*, 1) META_ATTR(openvdb::Vec2IMetadata, GA_STORE_INT32, int32, 2) META_ATTR(openvdb::Vec2SMetadata, GA_STORE_REAL32, fpreal32, 2) META_ATTR(openvdb::Vec2DMetadata, GA_STORE_REAL64, fpreal64, 2) META_ATTR(openvdb::Vec3IMetadata, GA_STORE_INT32, int32, 3) META_ATTR(openvdb::Vec3SMetadata, GA_STORE_REAL32, fpreal32, 3) META_ATTR(openvdb::Vec3DMetadata, GA_STORE_REAL64, fpreal64, 3) META_ATTR(openvdb::Vec4IMetadata, GA_STORE_INT32, int32, 4) META_ATTR(openvdb::Vec4SMetadata, GA_STORE_REAL32, fpreal32, 4) META_ATTR(openvdb::Vec4DMetadata, GA_STORE_REAL64, fpreal64, 4) META_ATTR(openvdb::Mat4SMetadata, GA_STORE_REAL32, fpreal32, 16) META_ATTR(openvdb::Mat4DMetadata, GA_STORE_REAL64, fpreal64, 16) #undef META_ATTR // Functor for setAttr() typedef hboost::function< void (GEO_Detail&, GA_AttributeOwner, GA_Offset, const char*, const openvdb::Metadata&)> AttrSettor; template <typename MetadataT> static void setAttr(GEO_Detail& geo, GA_AttributeOwner owner, GA_Offset elem, const char* name, const openvdb::Metadata& meta_base) { using MetaAttrT = MetaAttr<MetadataT>; using RWHandleT = typename MetaAttrT::RWHandleT; using TupleT = typename MetaAttrT::TupleT; /// @todo If there is an existing attribute with the given name but /// a different type, this will replace the old attribute with a new one. /// See GA_ReuseStrategy for alternative behaviors. GA_RWAttributeRef attrRef = geo.addTuple(MetaAttrT::theStorage, owner, name, MetaAttrT::theTupleSize); if (attrRef.isInvalid()) return; RWHandleT handle(attrRef.getAttribute()); const MetadataT& meta = static_cast<const MetadataT&>(meta_base); switch (MetaAttrT::theTupleSize) { case 4: handle.set(elem, 3, MetaTuple<TupleT,MetadataT,3>::get(meta)); SYS_FALLTHROUGH; case 3: handle.set(elem, 2, MetaTuple<TupleT,MetadataT,2>::get(meta)); SYS_FALLTHROUGH; case 2: handle.set(elem, 1, MetaTuple<TupleT,MetadataT,1>::get(meta)); SYS_FALLTHROUGH; case 1: handle.set(elem, 0, MetaTuple<TupleT,MetadataT,0>::get(meta)); } UT_ASSERT(MetaAttrT::theTupleSize >= 1 && MetaAttrT::theTupleSize <= 4); } /// for Houdini 12.1 template <typename MetadataT> static void setStrAttr(GEO_Detail& geo, GA_AttributeOwner owner, GA_Offset elem, const char* name, const openvdb::Metadata& meta_base) { GA_RWAttributeRef attrRef = geo.addStringTuple(owner, name, 1); if (attrRef.isInvalid()) return; GA_RWHandleS handle(attrRef.getAttribute()); const MetadataT& meta = static_cast<const MetadataT&>(meta_base); handle.set(elem, 0, MetaTuple<const char*, MetadataT, 0>::get(meta)); } template <typename MetadataT> static void setMatAttr(GEO_Detail& geo, GA_AttributeOwner owner, GA_Offset elem, const char* name, const openvdb::Metadata& meta_base) { using MetaAttrT = MetaAttr<MetadataT>; using RWHandleT = typename MetaAttrT::RWHandleT; using TupleT = typename MetaAttrT::TupleT; GA_RWAttributeRef attrRef = geo.addTuple(MetaAttrT::theStorage, owner, name, MetaAttrT::theTupleSize); if (attrRef.isInvalid()) return; RWHandleT handle(attrRef.getAttribute()); const MetadataT& meta = static_cast<const MetadataT&>(meta_base); auto && value = meta.value(); for (int i = 0; i < MetaAttrT::theTupleSize; i++) { handle.set(elem, i, value.asPointer()[i]); } } class MetaToAttrMap : public std::map<std::string, AttrSettor> { public: MetaToAttrMap() { using namespace openvdb; // Construct a mapping from OpenVDB metadata types to functions // that create attributes of corresponding types. (*this)[BoolMetadata::staticTypeName()] = &setAttr<BoolMetadata>; (*this)[FloatMetadata::staticTypeName()] = &setAttr<FloatMetadata>; (*this)[DoubleMetadata::staticTypeName()] = &setAttr<DoubleMetadata>; (*this)[Int32Metadata::staticTypeName()] = &setAttr<Int32Metadata>; (*this)[Int64Metadata::staticTypeName()] = &setAttr<Int64Metadata>; (*this)[StringMetadata::staticTypeName()] = &setStrAttr<StringMetadata>; (*this)[Vec2IMetadata::staticTypeName()] = &setAttr<Vec2IMetadata>; (*this)[Vec2SMetadata::staticTypeName()] = &setAttr<Vec2SMetadata>; (*this)[Vec2DMetadata::staticTypeName()] = &setAttr<Vec2DMetadata>; (*this)[Vec3IMetadata::staticTypeName()] = &setAttr<Vec3IMetadata>; (*this)[Vec3SMetadata::staticTypeName()] = &setAttr<Vec3SMetadata>; (*this)[Vec3DMetadata::staticTypeName()] = &setAttr<Vec3DMetadata>; (*this)[Vec4IMetadata::staticTypeName()] = &setAttr<Vec4IMetadata>; (*this)[Vec4SMetadata::staticTypeName()] = &setAttr<Vec4SMetadata>; (*this)[Vec4DMetadata::staticTypeName()] = &setAttr<Vec4DMetadata>; (*this)[Mat4SMetadata::staticTypeName()] = &setMatAttr<Mat4SMetadata>; (*this)[Mat4DMetadata::staticTypeName()] = &setMatAttr<Mat4DMetadata>; } }; static UT_SingletonWithLock<MetaToAttrMap> sMetaToAttrMap; } // unnamed namespace //////////////////////////////////////// void GU_PrimVDB::syncAttrsFromMetadata() { if (GEO_Detail* detail = this->getParent()) { createGridAttrsFromMetadata(*this, this->getConstGrid(), *detail); } } void GU_PrimVDB::createGridAttrsFromMetadataAdapter( const GEO_PrimVDB& prim, const void* gridPtr, GEO_Detail& aGdp) { createAttrsFromMetadataAdapter( GA_ATTRIB_PRIMITIVE, prim.getMapOffset(), gridPtr, aGdp); } void GU_PrimVDB::createAttrsFromMetadataAdapter( GA_AttributeOwner owner, GA_Offset element, const void* meta_map_ptr, GEO_Detail& geo) { // meta_map_ptr is assumed to point to an openvdb::vX_Y_Z::MetaMap, for some // version X.Y.Z of OpenVDB that may be newer than the one with which // libHoudiniGEO.so was built. This is safe provided that MetaMap and // its member objects are ABI-compatible between the two OpenVDB versions. const openvdb::MetaMap& meta_map = *static_cast<const openvdb::MetaMap*>(meta_map_ptr); for (openvdb::MetaMap::ConstMetaIterator metaIt = meta_map.beginMeta(), metaEnd = meta_map.endMeta(); metaIt != metaEnd; ++metaIt) { if (openvdb::Metadata::Ptr meta = metaIt->second) { std::string name = metaIt->first; UT_String str(name); str.toLower(); str.forceValidVariableName(); UT_String prefixed(name); prefixed.prepend("vdb_"); if (isIntrinsicMetadata(prefixed)) continue; // If this grid's name is empty and a "name" attribute // doesn't already exist, don't create one. if (str == "name" && meta->typeName() == openvdb::StringMetadata::staticTypeName() && meta->str().empty()) { if (!geo.findAttribute(owner, name.c_str())) continue; } MetaToAttrMap::const_iterator creatorIt = sMetaToAttrMap->find(meta->typeName()); if (creatorIt != sMetaToAttrMap->end()) { creatorIt->second(geo, owner, element, name.c_str(), *meta); } else { /// @todo Add warning: // std::string("discarded metadata \"") + name // + "\" of unsupported type " + meta->typeName() } } } } void GU_PrimVDB::createMetadataFromGridAttrsAdapter( void* gridPtr, const GEO_PrimVDB& prim, const GEO_Detail& aGdp) { createMetadataFromAttrsAdapter( gridPtr, GA_ATTRIB_PRIMITIVE, prim.getMapOffset(), aGdp); } void GU_PrimVDB::createMetadataFromAttrsAdapter( void* meta_map_ptr, GA_AttributeOwner owner, GA_Offset element, const GEO_Detail& geo) { using namespace openvdb; // meta_map_ptr is assumed to point to an openvdb::vX_Y_Z::MetaMap, for some // version X.Y.Z of OpenVDB that may be newer than the one with which // libHoudiniGEO.so was built. This is safe provided that MetaMap and // its member objects are ABI-compatible between the two OpenVDB versions. openvdb::MetaMap& meta_map = *static_cast<openvdb::MetaMap*>(meta_map_ptr); const GA_AttributeSet& attrs = geo.getAttributes(); for (GA_AttributeDict::iterator it = attrs.begin(owner, GA_SCOPE_PUBLIC); !it.atEnd(); ++it) { if (!it.name()) continue; std::string name = it.name(); UT_String prefixed(name); prefixed.prepend("vdb_"); if (isIntrinsicMetadata(prefixed)) continue; const GA_Attribute* attrib = it.attrib(); const GA_AIFTuple* tuple = attrib->getAIFTuple(); const int entries = attrib->getTupleSize(); switch (attrib->getStorageClass()) { case GA_STORECLASS_INT: if (!tuple) continue; switch (entries) { case 1: meta_map.removeMeta(name); if (name.substr(0, 3) == "is_") { // Scalar integer attributes whose names begin with "is_" // are mapped to boolean metadata. if (tuple->getStorage(attrib) == GA_STORE_INT64) { GA_ROHandleT<int64> handle(attrib); meta_map.insertMeta(name, BoolMetadata( handle.get(element) != 0)); } else { GA_ROHandleT<int32> handle(attrib); meta_map.insertMeta(name, BoolMetadata( handle.get(element) != 0)); } } else { if (tuple->getStorage(attrib) == GA_STORE_INT64) { GA_ROHandleT<int64> handle(attrib); meta_map.insertMeta(name, Int64Metadata( handle.get(element))); } else { GA_ROHandleT<int32> handle(attrib); meta_map.insertMeta(name, Int32Metadata( handle.get(element))); } } break; case 2: { GA_ROHandleT<UT_Vector2i> handle(attrib); meta_map.removeMeta(name); meta_map.insertMeta(name, Vec2IMetadata( UTvdbConvert(handle.get(element)))); } break; case 3: { GA_ROHandleT<UT_Vector3i> handle(attrib); meta_map.removeMeta(name); meta_map.insertMeta(name, Vec3IMetadata( UTvdbConvert(handle.get(element)))); } break; case 4: { GA_ROHandleT<UT_Vector4i> handle(attrib); meta_map.removeMeta(name); meta_map.insertMeta(name, Vec4IMetadata( UTvdbConvert(handle.get(element)))); } break; default: { /// @todo Add warning: //std::ostringstream ostr; //ostr << "Skipped int[" << entries << "] metadata attribute \"" // << it.name() << "\" (int tuples of size > 3 are not supported)"; } break; } break; case GA_STORECLASS_FLOAT: if (!tuple) continue; switch (entries) { case 1: meta_map.removeMeta(name); if (tuple->getStorage(attrib) == GA_STORE_REAL64) { GA_ROHandleT<fpreal64> handle(attrib); meta_map.insertMeta(name, DoubleMetadata( handle.get(element))); } else { GA_ROHandleT<fpreal32> handle(attrib); meta_map.insertMeta(name, FloatMetadata( handle.get(element))); } break; case 2: meta_map.removeMeta(name); if (tuple->getStorage(attrib) == GA_STORE_REAL64) { GA_ROHandleT<UT_Vector2D> handle(attrib); meta_map.insertMeta(name, Vec2DMetadata( UTvdbConvert(handle.get(element)))); } else { GA_ROHandleT<UT_Vector2F> handle(attrib); meta_map.insertMeta(name, Vec2SMetadata( UTvdbConvert(handle.get(element)))); } break; case 3: meta_map.removeMeta(name); if (tuple->getStorage(attrib) == GA_STORE_REAL64) { GA_ROHandleT<UT_Vector3D> handle(attrib); meta_map.insertMeta(name, Vec3DMetadata( UTvdbConvert(handle.get(element)))); } else { GA_ROHandleT<UT_Vector3F> handle(attrib); meta_map.insertMeta(name, Vec3SMetadata( UTvdbConvert(handle.get(element)))); } break; case 4: meta_map.removeMeta(name); if (tuple->getStorage(attrib) == GA_STORE_REAL64) { GA_ROHandleT<UT_Vector4D> handle(attrib); meta_map.insertMeta(name, Vec4DMetadata( UTvdbConvert(handle.get(element)))); } else { GA_ROHandleT<UT_Vector4F> handle(attrib); meta_map.insertMeta(name, Vec4SMetadata( UTvdbConvert(handle.get(element)))); } break; case 16: meta_map.removeMeta(name); if (tuple->getStorage(attrib) == GA_STORE_REAL64) { GA_ROHandleT<UT_Matrix4D> handle(attrib); meta_map.insertMeta(name, Mat4DMetadata( UTvdbConvert(handle.get(element)))); } else { GA_ROHandleT<UT_Matrix4F> handle(attrib); meta_map.insertMeta(name, Mat4SMetadata( UTvdbConvert(handle.get(element)))); } break; default: { /// @todo Add warning: //std::ostringstream ostr; //ostr << "Skipped float[" << entries << "] metadata attribute \"" // << it.name() << "\" (float tuples of size > 3 are not supported)"; } break; } break; case GA_STORECLASS_STRING: { GA_ROHandleS handle(attrib); if (entries == 1 && handle.isValid()) { meta_map.removeMeta(name); const char* str = handle.get(element); if (!str) str = ""; meta_map.insertMeta(name, StringMetadata(str)); } else { /// @todo Add warning: //std::ostringstream ostr; //ostr << "Skipped string[" << entries << "] metadata attribute \"" // << it.name() << "\" (string tuples are not supported)"; } break; } case GA_STORECLASS_INVALID: break; case GA_STORECLASS_DICT: break; case GA_STORECLASS_OTHER: break; } } } //////////////////////////////////////// // Following code is for HDK only #ifndef SESI_OPENVDB // This is the usual DSO hook. extern "C" { void newGeometryPrim(GA_PrimitiveFactory *factory) { GU_PrimVDB::registerMyself(factory); } } // extern "C" #endif #endif // SESI_OPENVDB || SESI_OPENVDB_PRIM
75,727
C++
31.910908
106
0.558691
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Segment.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Segment.cc /// /// @author FX R&D OpenVDB team /// /// @brief Segment VDB Grids #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/tools/LevelSetUtil.h> #include <GA/GA_AttributeRef.h> #include <GA/GA_ElementGroup.h> #include <GA/GA_Handle.h> #include <GA/GA_Types.h> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// namespace { struct SegmentActiveVoxels { SegmentActiveVoxels(GU_Detail& geo, bool visualize, bool appendNumber, hvdb::Interrupter&) : mGeoPt(&geo) , mVisualize(visualize) , mAppendNumber(appendNumber) { } template<typename GridType> void operator()(const GridType& grid) { using GridPtrType = typename GridType::Ptr; std::vector<GridPtrType> segments; openvdb::tools::segmentActiveVoxels(grid, segments); GA_RWHandleV3 color; if (mVisualize) { GA_RWAttributeRef attrRef = mGeoPt->findDiffuseAttribute(GA_ATTRIB_PRIMITIVE); if (!attrRef.isValid()) attrRef = mGeoPt->addDiffuseAttribute(GA_ATTRIB_PRIMITIVE); color.bind(attrRef.getAttribute()); } float r, g, b; for (size_t n = 0, N = segments.size(); n < N; ++n) { std::string name = grid.getName(); if (mAppendNumber) { std::stringstream ss; ss << name << "_" << n; name = ss.str(); } GU_PrimVDB* vdb = hvdb::createVdbPrimitive(*mGeoPt, segments[n], name.c_str()); if (color.isValid()) { GA_Offset offset = vdb->getMapOffset(); exint colorID = exint(offset); UT_Color::getUniqueColor(colorID, &r, &g, &b); color.set(vdb->getMapOffset(), UT_Vector3(r, g, b)); } } } private: GU_Detail * const mGeoPt; bool const mVisualize; bool const mAppendNumber; }; // struct SegmentActiveVoxels struct SegmentSDF { SegmentSDF(GU_Detail& geo, bool visualize, bool appendNumber, hvdb::Interrupter&) : mGeoPt(&geo) , mVisualize(visualize) , mAppendNumber(appendNumber) { } template<typename GridType> void operator()(const GridType& grid) { using GridPtrType = typename GridType::Ptr; std::vector<GridPtrType> segments; openvdb::tools::segmentSDF(grid, segments); GA_RWHandleV3 color; if (mVisualize) { GA_RWAttributeRef attrRef = mGeoPt->findDiffuseAttribute(GA_ATTRIB_PRIMITIVE); if (!attrRef.isValid()) attrRef = mGeoPt->addDiffuseAttribute(GA_ATTRIB_PRIMITIVE); color.bind(attrRef.getAttribute()); } float r, g, b; for (size_t n = 0, N = segments.size(); n < N; ++n) { std::string name = grid.getName(); if (mAppendNumber) { std::stringstream ss; ss << name << "_" << n; name = ss.str(); } GU_PrimVDB* vdb = hvdb::createVdbPrimitive(*mGeoPt, segments[n], name.c_str()); if (color.isValid()) { GA_Offset offset = vdb->getMapOffset(); exint colorID = exint(offset); UT_Color::getUniqueColor(colorID, &r, &g, &b); color.set(offset, UT_Vector3(r, g, b)); } } } private: GU_Detail * const mGeoPt; bool const mVisualize; bool const mAppendNumber; }; // struct SegmentSDF } // unnamed namespace //////////////////////////////////////// class SOP_OpenVDB_Segment: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Segment(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Segment() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i ) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Select a subset of the input OpenVDB grids to segment.") .setDocumentation( "A subset of the input VDB grids to be segmented" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "colorsegments", "Color Segments") .setDefault(PRMoneDefaults) .setDocumentation( "If enabled, assign a unique, random color to each segment" " for ease of identification.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "appendnumber", "Append Segment Number to Grid Name") .setDefault(PRMoneDefaults) .setDocumentation( "If enabled, name each output VDB after the input VDB with" " a unique segment number appended for ease of identification.")); hvdb::OpenVDBOpFactory("VDB Segment by Connectivity", SOP_OpenVDB_Segment::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBSegment") #endif .addInput("OpenVDB grids") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_Segment::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Split SDF VDB volumes into connected components.\"\"\"\n\ \n\ @overview\n\ \n\ A single SDF VDB may represent multiple disjoint objects.\n\ This node detects disjoint components and creates a new VDB for each component.\n\ \n\ @related\n\ - [Node:sop/vdbsegmentbyconnectivity]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Segment::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Segment(net, name, op); } SOP_OpenVDB_Segment::SOP_OpenVDB_Segment(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// bool SOP_OpenVDB_Segment::updateParmsFlags() { bool changed = false; return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Segment::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); const GU_Detail* inputGeoPt = inputGeo(0); const GA_PrimitiveGroup *group = nullptr; hvdb::Interrupter boss("Segmenting VDBs"); { UT_String str; evalString(str, "group", 0, time); group = matchGroup(*inputGeoPt, str.toStdString()); } hvdb::VdbPrimCIterator vdbIt(inputGeoPt, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDB grids to process."); return error(); } bool visualize = bool(evalInt("colorsegments", 0, time)); bool appendNumber = bool(evalInt("appendnumber", 0, time)); SegmentActiveVoxels segmentActiveVoxels(*gdp, visualize, appendNumber, boss); SegmentSDF segmentSDF(*gdp, visualize, appendNumber, boss); for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; const GU_PrimVDB* vdb = vdbIt.getPrimitive(); const openvdb::GridClass gridClass = vdb->getGrid().getGridClass(); if (gridClass == openvdb::GRID_LEVEL_SET) { hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, segmentSDF); } else { hvdb::GEOvdbApply<hvdb::AllGridTypes>(*vdb, segmentActiveVoxels); } } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
8,400
C++
26.012862
98
0.587143
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Metadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Metadata.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Metadata: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Metadata(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Metadata() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setTooltip("Specify a subset of the input VDBs to be modified.") .setChoiceList(&hutil::PrimGroupMenuInput1) .setDocumentation( "A subset of the input VDBs to be modified" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setname", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "name", "Name") .setTooltip("The name of the VDB")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setclass", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); { std::vector<std::string> items; for (int n = 0; n < openvdb::NUM_GRID_CLASSES; ++n) { openvdb::GridClass gridclass = static_cast<openvdb::GridClass>(n); items.push_back(openvdb::GridBase::gridClassToString(gridclass)); items.push_back(openvdb::GridBase::gridClassToMenuName(gridclass)); } parms.add( hutil::ParmFactory(PRM_STRING, "class", "Class") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("Specify how voxel values should be interpreted.") .setDocumentation("\ How voxel values should be interpreted\n\ \n\ Fog Volume:\n\ The volume represents a density field. Values should be positive,\n\ with zero representing empty regions.\n\ Level Set:\n\ The volume is treated as a narrow-band signed distance field level set.\n\ The voxels within a certain distance&mdash;the \"narrow band width\"&mdash;of\n\ an isosurface are expected to define positive (exterior) and negative (interior)\n\ distances to the surface. Outside the narrow band, the distance value\n\ is constant and equal to the band width.\n\ Staggered Vector Field:\n\ If the volume is vector-valued, the _x_, _y_ and _z_ vector components\n\ are to be treated as lying on the respective faces of voxels,\n\ not at their centers.\n\ Other:\n\ No special meaning is assigned to the volume's data.\n")); } /// @todo Do we really need to expose this? parms.add(hutil::ParmFactory(PRM_TOGGLE, "setcreator", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "creator", "Creator") .setTooltip("Who (or what node) created the VDB")); /// @todo Currently, no SOP pays attention to this setting. parms.add(hutil::ParmFactory(PRM_TOGGLE, "setworld", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "world", "Transform Values") .setDefault(PRMzeroDefaults) .setTooltip( "For vector-valued VDBs, specify whether voxel values\n" "are in world space and should be affected by transforms\n" "or in local space and should not be transformed.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setvectype", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); { std::string help = "For vector-valued VDBs, specify an interpretation of the vectors" " that determines how they are affected by transforms.\n"; std::vector<std::string> items; for (int n = 0; n < openvdb::NUM_VEC_TYPES; ++n) { const auto vectype = static_cast<openvdb::VecType>(n); items.push_back(openvdb::GridBase::vecTypeToString(vectype)); items.push_back(openvdb::GridBase::vecTypeExamples(vectype)); help += "\n" + openvdb::GridBase::vecTypeExamples(vectype) + "\n " + openvdb::GridBase::vecTypeDescription(vectype) + "."; } parms.add(hutil::ParmFactory(PRM_STRING, "vectype", "Vector Type") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip(::strdup(help.c_str()))); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "setfloat16", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "float16", "Write 16-Bit Floats") .setDefault(PRMzeroDefaults) .setTooltip( "When saving the VDB to a file, write floating-point\n" "scalar or vector voxel values as 16-bit half floats.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "syncattrs", "Transfer Metadata to Attributes") .setDefault(PRMoneDefaults) .setTooltip("Transfer all standard metadata values to intrinsic primitive attributes.") .setDocumentation( "Transfer all standard metadata values to intrinsic primitive attributes,\n" "whether or not any of the above values were changed.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "syncmetadata", "Transfer Attributes to Metadata") .setDefault(PRMzeroDefaults) .setTooltip("Transfer all standard intrinsic primitive attribute values to metadata.") .setDocumentation( "Transfer all standard intrinsic primitive attribute values to metadata,\n" "whether or not any of the above values were changed.")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Metadata", SOP_OpenVDB_Metadata::factory, parms, *table) .setNativeName("") .addInput("Input with VDBs") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Metadata::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Modify the metadata associated with a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node allows one to create and edit\n\ [metadata|http://www.openvdb.org/documentation/doxygen/codeExamples.html#sHandlingMetadata]\n\ attached to a VDB volume.\n\ Some standard VDB metadata, such as the\n\ [grid class|http://www.openvdb.org/documentation/doxygen/overview.html#secGrid],\n\ is exposed via intrinsic attributes on the primitive and can be viewed\n\ and in some cases edited either from the [geometry spreadsheet|/ref/panes/geosheet]\n\ or with the [Node:sop/attribcreate] node, but changes to attribute values\n\ made through those means are typically not propagated immediately, if at all,\n\ to a VDB's metadata.\n\ This node provides more direct access to the standard VDB metadata.\n\ \n\ @related\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [Node:sop/attribcreate]\n\ - [Node:sop/name]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Metadata::updateParmsFlags() { bool changed = false; const fpreal time = 0; // No point using CHgetTime as that is unstable. changed |= enableParm("name", bool(evalInt("setname", 0, time))); changed |= enableParm("class", bool(evalInt("setclass", 0, time))); changed |= enableParm("creator", bool(evalInt("setcreator", 0, time))); changed |= enableParm("float16", bool(evalInt("setfloat16", 0, time))); changed |= enableParm("world", bool(evalInt("setworld", 0, time))); changed |= enableParm("vectype", bool(evalInt("setvectype", 0, time))); return changed; } OP_Node* SOP_OpenVDB_Metadata::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Metadata(net, name, op); } SOP_OpenVDB_Metadata::SOP_OpenVDB_Metadata(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } OP_ERROR SOP_OpenVDB_Metadata::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Get UI parameter values. const bool setname = evalInt("setname", 0, time), setclass = evalInt("setclass", 0, time), setcreator = evalInt("setcreator", 0, time), setfloat16 = evalInt("setfloat16", 0, time), setvectype = evalInt("setvectype", 0, time), setworld = evalInt("setworld", 0, time), syncattrs = evalInt("syncattrs", 0, time), syncmetadata = evalInt("syncmetadata", 0, time); if (!(setname || setclass || setcreator || setfloat16 || setvectype || setworld || syncattrs || syncmetadata)) { return error(); } const bool float16 = (!setfloat16 ? false : evalInt("float16", 0, time)); const bool world = (!setworld ? false : evalInt("world", 0, time)); const std::string name = (!setname ? std::string{} : evalStdString("name", time)); const std::string creator = (!setcreator ? std::string{} : evalStdString("creator", time)); const openvdb::GridClass gridclass = (!setclass ? openvdb::GRID_UNKNOWN : openvdb::GridBase::stringToGridClass(evalStdString("class", time))); const openvdb::VecType vectype = (!setvectype ? openvdb::VEC_INVARIANT : openvdb::GridBase::stringToVecType(evalStdString("vectype", time))); // Get the group of grids to be modified. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); UT_AutoInterrupt progress("Set VDB grid metadata"); // For each VDB primitive in the given group... for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) throw std::runtime_error("was interrupted"); GU_PrimVDB* vdb = *it; // No need to make the grid unique, since we're not modifying its voxel data. hvdb::Grid& grid = vdb->getGrid(); // Set various grid metadata items. if (setname) grid.setName(name); if (setcreator) grid.setCreator(creator); if (setfloat16) grid.setSaveFloatAsHalf(float16); if (setvectype) grid.setVectorType(vectype); if (setworld) grid.setIsInWorldSpace(world); if (setclass) { grid.setGridClass(gridclass); // Update viewport visualization options. switch (gridclass) { case openvdb::GRID_LEVEL_SET: case openvdb::GRID_FOG_VOLUME: { const GEO_VolumeOptions& visOps = vdb->getVisOptions(); vdb->setVisualization( ((gridclass == openvdb::GRID_LEVEL_SET) ? GEO_VOLUMEVIS_ISO : GEO_VOLUMEVIS_SMOKE), visOps.myIso, visOps.myDensity); break; } default: break; } } // Optionally transfer metadata to primitive attributes. if (syncattrs) vdb->syncAttrsFromMetadata(); // Optionally transfer primitive attributes to metadata. if (syncmetadata) GU_PrimVDB::createMetadataFromGridAttrs(grid, *vdb, *gdp); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
11,886
C++
39.294915
99
0.637473
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/ParmFactory.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file ParmFactory.cc /// @author FX R&D OpenVDB team #include "ParmFactory.h" #include <CH/CH_Manager.h> #include <CMD/CMD_Args.h> #include <CMD/CMD_Manager.h> #include <GOP/GOP_GroupParse.h> #include <GU/GU_Detail.h> #include <GU/GU_PrimPoly.h> #include <GU/GU_Selection.h> #include <GA/GA_AIFSharedStringTuple.h> #include <GA/GA_Attribute.h> #include <GA/GA_AttributeRef.h> #include <HOM/HOM_Module.h> #include <OP/OP_OperatorTable.h> #include <PRM/PRM_Parm.h> #include <PRM/PRM_SharedFunc.h> #include <PY/PY_CPythonAPI.h> #include <PY/PY_InterpreterAutoLock.h> #include <PY/PY_Python.h> #include <SOP/SOP_NodeParmsOptions.h> #include <UT/UT_IntArray.h> #include <UT/UT_WorkArgs.h> #include <algorithm> // for std::for_each(), std::max(), std::remove(), std::sort() #include <cstdint> // for std::uintptr_t() #include <cstdlib> // for std::atoi() #include <cstring> // for std::strcmp(), ::strdup() #include <limits> #include <ostream> #include <sstream> #include <stdexcept> namespace houdini_utils { namespace { // PRM_SpareData token names // SOP input index specifier /// @todo Is there an existing constant for this token? char const * const kSopInputToken = "sop_input"; // Parameter documentation wiki markup char const * const kParmDocToken = "houdini_utils::doc"; // String-encoded GA_AttributeOwner char const * const kAttrOwnerToken = "houdini_utils::attr_owner"; // Pointer to an AttrFilterFunc char const * const kAttrFilterToken = "houdini_utils::attr_filter"; // Add an integer value (encoded into a string) to a PRM_SpareData map // under the given token name. inline void setSpareInteger(PRM_SpareData* spare, const char* token, int value) { if (spare && token) { spare->addTokenValue(token, std::to_string(value).c_str()); } } // Retrieve the integer value with the given token name from a PRM_SpareData map. // If no such token exists, return the specified default integer value. inline int getSpareInteger(const PRM_SpareData* spare, const char* token, int deflt = 0) { if (!spare || !token) return deflt; char const * const str = spare->getValue(token); return str ? std::atoi(str) : deflt; } // Add a pointer (encoded into a string) to a PRM_SpareData map // under the given token name. inline void setSparePointer(PRM_SpareData* spare, const char* token, const void* ptr) { if (spare && token) { spare->addTokenValue(token, std::to_string(reinterpret_cast<std::uintptr_t>(ptr)).c_str()); } } // Retrieve the pointer with the given token name from a PRM_SpareData map. // If no such token exists, return the specified default pointer. inline const void* getSparePointer(const PRM_SpareData* spare, const char* token, const void* deflt = nullptr) { if (!spare || !token) return deflt; if (sizeof(std::uintptr_t) > sizeof(unsigned long long)) { throw std::range_error{"houdini_utils::ParmFactory: can't decode pointer from string"}; } if (const char* str = spare->getValue(token)) { auto intPtr = static_cast<std::uintptr_t>(std::stoull(str)); return reinterpret_cast<void*>(intPtr); } return deflt; } // Copy elements from one spare data map to another, // overwriting any existing elements with the same keys. inline void mergeSpareData(SpareDataMap& dst, const SpareDataMap& src) { for (const auto& it: src) { dst[it.first] = it.second; } } } // anonymous namespace ParmList& ParmList::add(const PRM_Template& p) { mParmVec.push_back(p); incFolderParmCount(); return *this; } ParmList& ParmList::add(const ParmFactory& f) { add(f.get()); return *this; } ParmList::SwitcherInfo* ParmList::getCurrentSwitcher() { SwitcherInfo* info = nullptr; if (!mSwitchers.empty()) { info = &mSwitchers.back(); } return info; } ParmList& ParmList::beginSwitcher(const std::string& token, const std::string& label) { if (nullptr != getCurrentSwitcher()) { incFolderParmCount(); } SwitcherInfo info; info.parmIdx = mParmVec.size(); info.exclusive = false; mSwitchers.push_back(info); // Add a switcher parameter with the given token and name, but no folders. mParmVec.push_back(ParmFactory(PRM_SWITCHER, token, label).get()); return *this; } ParmList& ParmList::beginExclusiveSwitcher(const std::string& token, const std::string& label) { if (nullptr != getCurrentSwitcher()) { incFolderParmCount(); } SwitcherInfo info; info.parmIdx = mParmVec.size(); info.exclusive = true; mSwitchers.push_back(info); // Add a switcher parameter with the given token and name, but no folders. mParmVec.push_back(ParmFactory(PRM_SWITCHER, token, label).get()); return *this; } ParmList& ParmList::endSwitcher() { if (SwitcherInfo* info = getCurrentSwitcher()) { if (info->folders.empty()) { throw std::runtime_error("added switcher that has no folders"); } else { // Replace the placeholder switcher parameter that was added to // mParmVec in beginSwitcher() with a new parameter that has // the correct folder count and folder info. PRM_Template& switcherParm = mParmVec[info->parmIdx]; std::string token, label; if (const char* s = switcherParm.getToken()) token = s; if (const char* s = switcherParm.getLabel()) label = s; mParmVec[info->parmIdx] = ParmFactory(info->exclusive ? PRM_SWITCHER_EXCLUSIVE : PRM_SWITCHER, token.c_str(), label.c_str()) .setVectorSize(int(info->folders.size())) .setDefault(info->folders) .get(); } mSwitchers.pop_back(); } else { throw std::runtime_error("endSwitcher() called with no corresponding beginSwitcher()"); } return *this; } ParmList& ParmList::addFolder(const std::string& label) { if (SwitcherInfo* info = getCurrentSwitcher()) { info->folders.push_back(PRM_Default(/*numParms=*/0, ::strdup(label.c_str()))); } else { throw std::runtime_error("added folder to nonexistent switcher"); } return *this; } void ParmList::incFolderParmCount() { if (SwitcherInfo* info = getCurrentSwitcher()) { if (info->folders.empty()) { throw std::runtime_error("added parameter to switcher that has no folders"); } else { // If a parameter is added to this ParmList while a switcher with at least // one folder is active, increment the folder's parameter count. PRM_Default& def = *(info->folders.rbegin()); def.setOrdinal(def.getOrdinal() + 1); } } } PRM_Template* ParmList::get() const { const size_t numParms = mParmVec.size(); PRM_Template* ret = new PRM_Template[numParms + 1]; for (size_t n = 0; n < numParms; ++n) { ret[n] = mParmVec[n]; } return ret; } //////////////////////////////////////// struct ParmFactory::Impl { Impl(const std::string& token, const std::string& label): callbackFunc(0), choicelist(nullptr), conditional(nullptr), defaults(PRMzeroDefaults), multiType(PRM_MULTITYPE_NONE), name(new PRM_Name(token.c_str(), label.c_str())), parmGroup(0), range(nullptr), spareData(nullptr), multiparms(nullptr), typeExtended(PRM_TYPE_NONE), vectorSize(1), invisible(false) { const_cast<PRM_Name*>(name)->harden(); } static PRM_SpareData* getSopInputSpareData(size_t inp); ///< @todo return a const pointer? static void getAttrChoices(void* op, PRM_Name* choices, int maxChoices, const PRM_SpareData*, const PRM_Parm*); PRM_Callback callbackFunc; const PRM_ChoiceList* choicelist; const PRM_ConditionalBase* conditional; const PRM_Default* defaults; std::string tooltip; PRM_MultiType multiType; const PRM_Name* name; int parmGroup; const PRM_Range* range; PRM_SpareData* spareData; const PRM_Template* multiparms; PRM_Type type; PRM_TypeExtended typeExtended; int vectorSize; bool invisible; static PRM_SpareData* const sSOPInputSpareData[4]; }; PRM_SpareData* const ParmFactory::Impl::sSOPInputSpareData[4] = { &SOP_Node::theFirstInput, &SOP_Node::theSecondInput, &SOP_Node::theThirdInput, &SOP_Node::theFourthInput}; // Return one of the predefined PRM_SpareData maps that specify a SOP input number, // or construct new PRM_SpareData if none exists for the given input number. PRM_SpareData* ParmFactory::Impl::getSopInputSpareData(size_t inp) { if (inp < 4) return Impl::sSOPInputSpareData[inp]; auto spare = new PRM_SpareData{SOP_Node::theFirstInput}; spare->addTokenValue(kSopInputToken, std::to_string(inp).c_str()); return spare; } // PRM_ChoiceGenFunc invoked by ParmFactory::setAttrChoiceList() void ParmFactory::Impl::getAttrChoices(void* op, PRM_Name* choices, int maxChoices, const PRM_SpareData* spare, const PRM_Parm* parm) { if (!op || !choices || !parm) return; // This function can only be used in SOPs, because it calls SOP_Node::fillAttribNameMenu(). if (static_cast<OP_Node*>(op)->getOpTypeID() != SOP_OPTYPE_ID) return; auto* sop = static_cast<SOP_Node*>(op); // Extract the SOP input number, the attribute class, and an optional // pointer to a filter functor from the spare data. const int inp = getSpareInteger(spare, kSopInputToken); const int attrOwner = getSpareInteger(spare, kAttrOwnerToken, GA_ATTRIB_INVALID); const auto* attrFilter = static_cast<const AttrFilterFunc*>(getSparePointer(spare, kAttrFilterToken)); // Marshal pointers to the filter functor and the parameter and SOP for which this function // is being called into blind data that can be passed to SOP_Node::fillAttribNameMenu(). struct AttrFilterData { const AttrFilterFunc* func; const PRM_Parm* parm; const SOP_Node* sop; }; AttrFilterData cbData{attrFilter, parm, sop}; // Define a filter callback function to be passed to SOP_Node::fillAttribNameMenu(). // Because the latter uses a C-style callback mechanism, this callback must be // equivalent to a static function pointer (as a non-capturing lambda is). auto cb = [](const GA_Attribute* aAttr, void* aData) -> bool { if (!aAttr) return false; // Cast the blind data pointer supplied by SOP_Node::fillAttribNameMenu(). const auto* data = static_cast<AttrFilterData*>(aData); if (!data || !data->func) return true; // no filter; accept all attributes // Invoke the filter functor and return the result. return (*(data->func))(*aAttr, *(data->parm), *(data->sop)); }; // Invoke SOP_Node::fillAttribNameMenu() for the appropriate attribute class. switch (attrOwner) { case GA_ATTRIB_VERTEX: case GA_ATTRIB_POINT: case GA_ATTRIB_PRIMITIVE: case GA_ATTRIB_DETAIL: if (cbData.func) { sop->fillAttribNameMenu(choices, maxChoices, static_cast<GA_AttributeOwner>(attrOwner), inp, cb, &cbData); } else { sop->fillAttribNameMenu(choices, maxChoices, static_cast<GA_AttributeOwner>(attrOwner), inp); } break; default: // all attributes { // To collect all classes of attributes, call SOP_Node::fillAttribNameMenu() // once for each class. Each call appends zero or more PRM_Names to the list // as well as an end-of-list terminator. auto* head = choices; int count = 0, maxCount = maxChoices; for (auto owner: { GA_ATTRIB_VERTEX, GA_ATTRIB_POINT, GA_ATTRIB_PRIMITIVE, GA_ATTRIB_DETAIL }) { int numAdded = (cbData.func ? sop->fillAttribNameMenu(head, maxCount, owner, inp, cb, &cbData) : sop->fillAttribNameMenu(head, maxCount, owner, inp)); if (numAdded > 0) { // SOP_Node::fillAttribNameMenu() returns the number of entries added // to the list, not including the terminator. // Advance the list head pointer so that the next entry to be added // (if any) overwrites the terminator. count += numAdded; head += numAdded; maxCount -= numAdded; } } if (count) { // Sort the list by name to reproduce the behavior of SOP_Node::allAttribMenu. std::sort(choices, choices + count, [](const PRM_Name& n1, const PRM_Name& n2) { return (0 > std::strcmp(n1.getToken(), n2.getToken())); } ); } break; } } } //////////////////////////////////////// ParmFactory::ParmFactory(PRM_Type type, const std::string& token, const std::string& label): mImpl(new Impl(token, label)) { mImpl->type = type; } ParmFactory::ParmFactory(PRM_MultiType multiType, const std::string& token, const std::string& label): mImpl(new Impl(token, label)) { mImpl->multiType = multiType; } ParmFactory& ParmFactory::setCallbackFunc(const PRM_Callback& f) { mImpl->callbackFunc = f; return *this; } ParmFactory& ParmFactory::setChoiceList(const PRM_ChoiceList* c) { mImpl->choicelist = c; if (c == &PrimGroupMenuInput1) { setSpareData(SOP_Node::getGroupSelectButton(GA_GROUP_PRIMITIVE, nullptr, 0, &SOP_Node::theFirstInput)); } else if (c == &PrimGroupMenuInput2) { setSpareData(SOP_Node::getGroupSelectButton(GA_GROUP_PRIMITIVE, nullptr, 1, &SOP_Node::theSecondInput)); } else if (c == &PrimGroupMenuInput3) { setSpareData(SOP_Node::getGroupSelectButton(GA_GROUP_PRIMITIVE, nullptr, 2, &SOP_Node::theThirdInput)); } else if (c == &PrimGroupMenuInput4) { setSpareData(SOP_Node::getGroupSelectButton(GA_GROUP_PRIMITIVE, nullptr, 3, &SOP_Node::theFourthInput)); } return *this; } /// @todo Merge this into setChoiceListItems() once the deprecated /// setChoiceList() overloads have been removed. ParmFactory& ParmFactory::doSetChoiceList(PRM_ChoiceListType typ, const char* const* items, bool paired) { size_t numItems = 0; for ( ; items[numItems] != nullptr; ++numItems) {} if (paired) numItems >>= 1; PRM_Name* copyOfItems = new PRM_Name[numItems + 1]; // extra item is list terminator if (paired) { for (size_t i = 0, n = 0; n < numItems; ++n, i += 2) { copyOfItems[n].setToken(items[i]); copyOfItems[n].setLabel(items[i+1]); copyOfItems[n].harden(); } } else { for (size_t n = 0; n < numItems; ++n) { UT_String idx; idx.itoa(n); copyOfItems[n].setToken(idx.buffer()); copyOfItems[n].setLabel(items[n]); copyOfItems[n].harden(); } } mImpl->choicelist = new PRM_ChoiceList(typ, copyOfItems); return *this; } /// @todo Merge this into setChoiceListItems() once the deprecated /// setChoiceList() overloads have been removed. ParmFactory& ParmFactory::doSetChoiceList(PRM_ChoiceListType typ, const std::vector<std::string>& items, bool paired) { const size_t numItems = items.size() >> (paired ? 1 : 0); PRM_Name* copyOfItems = new PRM_Name[numItems + 1]; // extra item is list terminator if (paired) { for (size_t i = 0, n = 0; n < numItems; ++n, i += 2) { copyOfItems[n].setToken(items[i].c_str()); copyOfItems[n].setLabel(items[i+1].c_str()); copyOfItems[n].harden(); } } else { for (size_t n = 0; n < numItems; ++n) { UT_String idx; idx.itoa(n); copyOfItems[n].setToken(idx.buffer()); copyOfItems[n].setLabel(items[n].c_str()); copyOfItems[n].harden(); } } mImpl->choicelist = new PRM_ChoiceList(typ, copyOfItems); return *this; } ParmFactory& ParmFactory::setChoiceList(PRM_ChoiceListType typ, const char* const* items, bool paired) { return doSetChoiceList(typ, items, paired); } ParmFactory& ParmFactory::setChoiceList(PRM_ChoiceListType typ, const std::vector<std::string>& items, bool paired) { return doSetChoiceList(typ, items, paired); } ParmFactory& ParmFactory::setChoiceListItems(PRM_ChoiceListType typ, const char* const* items) { return doSetChoiceList(typ, items, /*paired=*/true); } ParmFactory& ParmFactory::setChoiceListItems(PRM_ChoiceListType typ, const std::vector<std::string>& items) { return doSetChoiceList(typ, items, /*paired=*/true); } ParmFactory& ParmFactory::setGroupChoiceList(size_t inputIndex, PRM_ChoiceListType typ) { mImpl->choicelist = new PRM_ChoiceList(typ, PrimGroupMenu.getChoiceGenerator()); setSpareData(SOP_Node::getGroupSelectButton(GA_GROUP_PRIMITIVE, nullptr, static_cast<int>(inputIndex), mImpl->getSopInputSpareData(inputIndex))); return *this; } ParmFactory& ParmFactory::setAttrChoiceList(size_t inputIndex, GA_AttributeOwner attrOwner, PRM_ChoiceListType typ, AttrFilterFunc attrFilter) { setChoiceList(new PRM_ChoiceList{typ, Impl::getAttrChoices}); mImpl->spareData = new PRM_SpareData; setSpareInteger(mImpl->spareData, kSopInputToken, int(inputIndex)); setSpareInteger(mImpl->spareData, kAttrOwnerToken, static_cast<int>(attrOwner)); if (attrFilter) { setSparePointer(mImpl->spareData, kAttrFilterToken, new AttrFilterFunc{attrFilter}); } return *this; } ParmFactory& ParmFactory::setConditional(const PRM_ConditionalBase* c) { mImpl->conditional = c; return *this; } ParmFactory& ParmFactory::setDefault(fpreal f, const char* s, CH_StringMeaning meaning) { mImpl->defaults = new PRM_Default(f, s, meaning); return *this; } ParmFactory& ParmFactory::setDefault(const std::string& s, CH_StringMeaning meaning) { mImpl->defaults = new PRM_Default(0.0, ::strdup(s.c_str()), meaning); return *this; } ParmFactory& ParmFactory::setDefault(const std::vector<fpreal>& v) { const size_t numDefaults = v.size(); PRM_Default* defaults = new PRM_Default[numDefaults + 1]; for (size_t n = 0; n < numDefaults; ++n) { defaults[n] = PRM_Default(v[n]); } mImpl->defaults = defaults; return *this; } ParmFactory& ParmFactory::setDefault(const std::vector<PRM_Default>& defaults) { const size_t numDefaults = defaults.size(); PRM_Default* copyOfDefaults = new PRM_Default[numDefaults + 1]; for (size_t n = 0; n < numDefaults; ++n) { copyOfDefaults[n] = defaults[n]; } mImpl->defaults = copyOfDefaults; return *this; } ParmFactory& ParmFactory::setDefault(const PRM_Default* d) { mImpl->defaults = d; return *this; } ParmFactory& ParmFactory::setTooltip(const char* t) { mImpl->tooltip = (t ? t : ""); return *this; } ParmFactory& ParmFactory::setHelpText(const char* t) { return setTooltip(t); } ParmFactory& ParmFactory::setDocumentation(const char* doc) { if (!mImpl->spareData) { mImpl->spareData = new PRM_SpareData; } mImpl->spareData->addTokenValue(kParmDocToken, ::strdup(doc ? doc : "")); return *this; } ParmFactory& ParmFactory::setParmGroup(int n) { mImpl->parmGroup = n; return *this; } ParmFactory& ParmFactory::setRange(PRM_RangeFlag minFlag, fpreal minVal, PRM_RangeFlag maxFlag, fpreal maxVal) { mImpl->range = new PRM_Range(minFlag, minVal, maxFlag, maxVal); return *this; } ParmFactory& ParmFactory::setRange(const std::vector<PRM_Range>& ranges) { const size_t numRanges = ranges.size(); PRM_Range* copyOfRanges = new PRM_Range[numRanges + 1]; for (size_t n = 0; n < numRanges; ++n) { copyOfRanges[n] = ranges[n]; } mImpl->range = copyOfRanges; return *this; } ParmFactory& ParmFactory::setRange(const PRM_Range* r) { mImpl->range = r; return *this; } ParmFactory& ParmFactory::setSpareData(const SpareDataMap& items) { if (!items.empty()) { if (!mImpl->spareData) { mImpl->spareData = new PRM_SpareData; } for (SpareDataMap::const_iterator i = items.begin(), e = items.end(); i != e; ++i) { mImpl->spareData->addTokenValue(i->first.c_str(), i->second.c_str()); } } return *this; } ParmFactory& ParmFactory::setSpareData(const PRM_SpareData* d) { if (!d) { if (mImpl->spareData) mImpl->spareData->clear(); } else { mImpl->spareData = new PRM_SpareData{*d}; } return *this; } ParmFactory& ParmFactory::setMultiparms(const ParmList& p) { mImpl->multiparms = p.get(); return *this; } ParmFactory& ParmFactory::setTypeExtended(PRM_TypeExtended t) { mImpl->typeExtended = t; return *this; } ParmFactory& ParmFactory::setVectorSize(int n) { mImpl->vectorSize = n; return *this; } ParmFactory& ParmFactory::setInvisible() { mImpl->invisible = true; return *this; } PRM_Template ParmFactory::get() const { #ifdef SESI_OPENVDB // Help is maintained separately within Houdini const char *tooltip = nullptr; #else const char *tooltip = mImpl->tooltip.c_str(); #endif PRM_Template parm; if (mImpl->multiType != PRM_MULTITYPE_NONE) { parm.initMulti( mImpl->multiType, const_cast<PRM_Template*>(mImpl->multiparms), PRM_Template::PRM_EXPORT_MIN, fpreal(mImpl->vectorSize), const_cast<PRM_Name*>(mImpl->name), const_cast<PRM_Default*>(mImpl->defaults), const_cast<PRM_Range*>(mImpl->range), 0, // no callback mImpl->spareData, tooltip ? ::strdup(tooltip) : nullptr, const_cast<PRM_ConditionalBase*>(mImpl->conditional)); } else { parm.initialize( mImpl->type, mImpl->typeExtended, PRM_Template::PRM_EXPORT_MIN, mImpl->vectorSize, const_cast<PRM_Name*>(mImpl->name), const_cast<PRM_Default*>(mImpl->defaults), const_cast<PRM_ChoiceList*>(mImpl->choicelist), const_cast<PRM_Range*>(mImpl->range), mImpl->callbackFunc, mImpl->spareData, mImpl->parmGroup, tooltip ? ::strdup(tooltip) : nullptr, const_cast<PRM_ConditionalBase*>(mImpl->conditional)); } if (mImpl->invisible) { parm.setInvisible(true); } return parm; } //////////////////////////////////////// namespace { /// @brief Output wiki markup documentation to the given stream for /// a (possibly nested) list of parameters. /// @return the address of the parameter list entry one past the last parameter /// that was documented inline const PRM_Template* documentParms(std::ostream& os, PRM_Template const * const parmList, int level = 0, int numParms = std::numeric_limits<int>::max()) { if (level > 10) return parmList; // probably something wrong if there are 10 levels of nesting auto indent = [&level]() { return std::string(4 * std::max(0, level), ' '); }; bool hasHeading = false; const PRM_Template* parm = parmList; for (int parmIdx = 0; parm && (parmIdx < numParms) && (parm->getType() != PRM_LIST_TERMINATOR); ++parmIdx, ++parm) { const auto parmType = parm->getType(); if (parmType == PRM_LABEL || parm->getInvisible()) continue; const auto parmLabel = [parm]() { UT_String lbl = parm->getLabel(); // Houdini's wiki markup parser aggressively expands square-bracketed text into // hyperlinks. The following is one way to suppress that behavior, given that // there doesn't appear to be any native escaping mechanism. Since we might want // to use brackets--but probably not hyperlinks--in parameter labels, and since // hacks like this don't render correctly in the parameter pane, we unconditionally // "escape" brackets in parameter labels, but only in the documentation markup. lbl.substitute("[", "&#91;", /*all=*/true); // 91 is the ISO-8859 code for "[" lbl.substitute("]", "&#93;", /*all=*/true); // 93 is the ISO-8859 code for "]" return lbl; }(); const bool hasLabel = parmLabel.isstring(); if ((parmType == PRM_SEPARATOR) || ((parmType == PRM_HEADING) && !hasLabel)) { // A separator or empty heading removes one level of nesting. // (There are no begin/end grouping indicators, so this is just a best guess.) level = std::max(0, level - 1); hasHeading = false; continue; } UT_String parmDoc; const PRM_SpareData* const spare = parm->getSparePtr(); if (spare && spare->getValue(kParmDocToken)) { // If the parameter was documented with setDocumentation(), use that text. // (This relies on kParmDocToken not being paired with nullptr. // ParmFactory::setDocumentation(), at least, ensures that it isn't.) parmDoc = spare->getValue(kParmDocToken); // If the text is empty, suppress this parameter. if (!parmDoc.isstring()) continue; } else { // Otherwise, if the parameter has a tool tip, use that. parmDoc = parm->getHelpText(); // If the parameter has no tool tip but has a choice list, list the choices // (except if the parameter is a toggle--toggles seem to be implemented as // on/off choice lists). if (!parmDoc.isstring() && (parmType.getOrdinalType() != PRM_Type::PRM_ORD_TOGGLE)) { if (const PRM_ChoiceList* choices = parm->getChoiceListPtr()) { for (const PRM_Name* choiceName = const_cast<PRM_ChoiceList*>(choices)->choiceNamesPtr(); choiceName && choiceName->getToken(); ++choiceName) { if (const char* n = choiceName->getLabel()) { parmDoc += (std::string{"* "} + n + "\n").c_str(); } } } } // Otherwise, show the parameter without documentation. /// @todo Just suppress undocumented parameters? if ((parmType != PRM_HEADING) && !parm->isMultiType() && !parmDoc.isstring()) { parmDoc = "&nbsp;"; } } const bool hasDoc = parmDoc.isstring(); if (parmType == PRM_HEADING) { // Decrement the nesting level for a heading label if there was // a previous heading. (This assumes that headings aren't nested.) if (hasHeading) --level; hasHeading = true; os << indent() << parmLabel.c_str() << ":\n"; ++level; // increment the nesting level below a heading if (hasDoc) { parmDoc.substitute("\n", ("\n" + indent()).c_str(), /*all=*/true); os << indent() << parmDoc.c_str() << "\n\n"; } } else if ((parmType == PRM_SWITCHER) || (parmType == PRM_SWITCHER_EXCLUSIVE) || (parmType == PRM_SWITCHER_REFRESH)) { // The vector size of a switcher is the number of folders. const int numFolders = parm->getVectorSize(); const PRM_Template* firstFolderParm = parm + 1; const PRM_Default* deflt = parm->getFactoryDefaults(); for (int folder = 0; deflt && (folder < numFolders); ++folder, ++deflt) { // The default values of a switcher are per-folder (member count, title) pairs. const int numMembers = deflt->getOrdinal(); char const * const title = deflt->getString(); if (title) { // If the folder has a title, show the title and increment // the nesting level for the folder's members. os << indent() << title << ":\n"; ++level; } firstFolderParm = documentParms(os, firstFolderParm, level, numMembers); if (title) { --level; } } parm = PRM_Template::getEndOfSwitcher(parm); --parm; // decrement to compensate for loop increment } else if (parm->isMultiType()) { if (hasLabel) { os << indent() << parmLabel.c_str() << ":\n"; } ++level; // increment the nesting level for the members of a multiparm if (hasDoc) { // Add the multiparm's documentation. parmDoc.substitute("\n", ("\n" + indent()).c_str(), /*all=*/true); os << indent() << parmDoc.c_str() << "\n\n"; } // Add documentation for the members of the multiparm // (but not for members of native types such as ramps, // since those members have only generic descriptions). if ((parm->getMultiType() != PRM_MULTITYPE_RAMP_FLT) && (parm->getMultiType() != PRM_MULTITYPE_RAMP_RGB)) { if (PRM_Template const * const subparms = parm->getMultiParmTemplate()) { documentParms(os, subparms, level); } } --level; } else if (hasLabel && hasDoc) { // Add this parameter only if it has both a label and documentation. os << indent() << parmLabel.c_str() << ":\n"; ++level; parmDoc.substitute("\n", ("\n" + indent()).c_str(), /*all=*/true); os << indent() << parmDoc.c_str() << "\n\n"; --level; } } return parm; } /// @brief Operator class that adds the help link. Used by the OpFactory. class OP_OperatorDW: public OP_Operator { public: OP_OperatorDW( OpFactory::OpFlavor flavor, const char* name, const char* english, OP_Constructor construct, PRM_Template* multiparms, const char* operatorTableName, unsigned minSources, unsigned maxSources, CH_LocalVariable* variables, unsigned flags, const char** inputlabels, const std::string& helpUrl, const std::string& doc) : OP_Operator(name, english, construct, multiparms, operatorTableName, minSources, maxSources, variables, flags, inputlabels) , mHelpUrl(helpUrl) { #ifndef SESI_OPENVDB // Generate help page markup for this operator if the help URL is empty // and the documentation string is nonempty. if (mHelpUrl.empty() && !doc.empty()) { UT_String flavorStr{OpFactory::flavorToString(flavor)}; flavorStr.toLower(); std::ostringstream os; os << "= " << english << " =\n\n" << "#type: node\n" << "#context: " << flavorStr << "\n" << "#internal: " << name << "\n\n" << doc << "\n\n"; { std::ostringstream osParm; documentParms(osParm, multiparms); const std::string parmDoc = osParm.str(); if (!parmDoc.empty()) { os << "@parameters\n\n" << parmDoc; } } const_cast<std::string*>(&mDoc)->assign(os.str()); } #endif } ~OP_OperatorDW() override {} bool getOpHelpURL(UT_String& url) override { url = mHelpUrl; return !mHelpUrl.empty(); } bool getHDKHelp(UT_String& txt) const override { if (!mHelpUrl.empty()) return false; // URL takes precedence over help text txt = mDoc; txt.hardenIfNeeded(); return !mDoc.empty(); } #ifndef SESI_OPENVDB bool getVersion(UT_String &version) override { auto it = spareData().find("operatorversion"); if (it != spareData().end()) { version = it->second; return true; } return OP_Operator::getVersion(version); } #endif const SpareDataMap& spareData() const { return mSpareData; } SpareDataMap& spareData() { return mSpareData; } private: const std::string mHelpUrl, mDoc; SpareDataMap mSpareData; }; class OpFactoryVerb: public SOP_NodeVerb { public: OpFactoryVerb(const std::string& name, SOP_NodeVerb::CookMode cookMode, const OpFactory::CacheAllocFunc& allocator, PRM_Template* parms) : mName{name} , mCookMode{cookMode} , mAllocator{allocator} , mParms{parms} {} SOP_NodeParms* allocParms() const override { return new SOP_NodeParmsOptions{mParms}; } SOP_NodeCache* allocCache() const override { return mAllocator(); } void setName(const std::string& name) { mName = name; } UT_StringHolder name() const override { return mName; } CookMode cookMode(const SOP_NodeParms*) const override { return mCookMode; } void cook(const CookParms& cookParms) const override { if (auto* cache = static_cast<SOP_NodeCacheOptions*>(cookParms.cache())) { cache->doCook(this, cookParms); } } private: std::string mName; SOP_NodeVerb::CookMode mCookMode; OpFactory::CacheAllocFunc mAllocator; PRM_Template* mParms; }; // class OpFactoryVerb } // anonymous namespace //////////////////////////////////////// struct OpFactory::Impl { Impl(const std::string& english, OP_Constructor& constructor, PRM_Template* parms, OP_OperatorTable& table, OpFactory::OpFlavor flavor): mFlavor(flavor), mEnglish(english), mConstruct(constructor), mTable(&table), mParms(parms), mObsoleteParms(nullptr), mMaxSources(0), mVariables(nullptr), mFlags(0) { } ~Impl() { std::for_each(mInputLabels.begin(), mInputLabels.end(), ::free); // Note: In get(), mOptInputLabels are appended to mInputLabels. } void init(const OpFactory& factory, OpPolicyPtr policy) { // Because mPolicy is supplied by this Impl's parent OpFactory // (which knows which OpPolicy subclass to use), initialization // of the following members must be postponed until both // the OpFactory and this Impl have been fully constructed. mPolicy = policy; mName = mPolicy->getName(factory); mLabelName = mPolicy->getLabelName(factory); mIconName = mPolicy->getIconName(factory); mHelpUrl = mPolicy->getHelpURL(factory); mFirstName = mPolicy->getFirstName(factory); mTabSubMenuPath = mPolicy->getTabSubMenuPath(factory); initScripting(); } OP_OperatorDW* get() { // Get the number of required inputs. const unsigned minSources = unsigned(mInputLabels.size()); // Append optional input labels to required input labels. mInputLabels.insert(mInputLabels.end(), mOptInputLabels.begin(), mOptInputLabels.end()); // Ensure that the maximum number of inputs is at least as large // as the number of labeled inputs. mMaxSources = std::max<unsigned>(unsigned(mInputLabels.size()), mMaxSources); mInputLabels.push_back(nullptr); OP_OperatorDW* op = new OP_OperatorDW(mFlavor, mName.c_str(), mLabelName.c_str(), mConstruct, mParms, UTisstring(mOperatorTableName.c_str()) ? mOperatorTableName.c_str() : 0, minSources, mMaxSources, mVariables, mFlags, const_cast<const char**>(&mInputLabels[0]), mHelpUrl, mDoc); if (!mIconName.empty()) op->setIconName(mIconName.c_str()); if (!mTabSubMenuPath.empty()) op->setOpTabSubMenuPath(mTabSubMenuPath.c_str()); if (mObsoleteParms != nullptr) op->setObsoleteTemplates(mObsoleteParms); if (mVerb) { // reset the name in case the internal name has changed mVerb->setName(mName); SOP_NodeVerb::registerVerb(mVerb); } mergeSpareData(op->spareData(), mSpareData); return op; } void initScripting() { // Install an HScript command to retrieve spare data from operators. if (auto* cmgr = CMD_Manager::getContext()) { if (!cmgr->isCommandDefined(kSpareDataCmdName)) { cmgr->installCommand(kSpareDataCmdName, "", cmdGetOperatorSpareData); } } // Install Python functions to retrieve spare data from operators. static bool sDidInstallHOMModule = false; if (!sDidInstallHOMModule) { // Install a _dwhoudiniutils module with a NodeType_spareData() function. static PY_PyMethodDef sMethods[] = { {"NodeType_spareData", homGetOperatorSpareData, PY_METH_VARARGS(), ""}, { nullptr, nullptr, 0, nullptr } }; { PY_InterpreterAutoLock interpreterLock; PY_Py_InitModule("_dwhoudiniutils", sMethods); sDidInstallHOMModule = true; } // Add methods to the hou.NodeType class. PYrunPythonStatementsAndExpectNoErrors("\ def _spareData(self, name):\n\ '''\n\ spareData(name) -> str or None\n\ \n\ Return the spare data with the given name, or None\n\ if no data with that name is defined for this node type.\n\ \n\ Currently, only node types defined with OpenVDB's OpFactory\n\ can have spare data. See www.openvdb.org for more information.\n\ '''\n\ import _dwhoudiniutils\n\ return _dwhoudiniutils.NodeType_spareData(self.category().name(), self.name(), name)\n\ \n\ def _spareDataDict(self):\n\ '''\n\ spareDataDict() -> dict of str to str\n\ \n\ Return a dictionary of the spare data for this node type.\n\ \n\ Currently, only node types defined with OpenVDB's OpFactory\n\ can have spare data. See www.openvdb.org for more information.\n\ '''\n\ import _dwhoudiniutils\n\ return _dwhoudiniutils.NodeType_spareData(self.category().name(), self.name())\n\ \n\ nt = __import__('hou').NodeType\n\ nt.spareData = _spareData\n\ nt.spareDataDict = _spareDataDict\n\ del nt, _spareData, _spareDataDict\n"); } } // HScript callback to retrieve spare data from an OP_OperatorDW-derived operator static void cmdGetOperatorSpareData(CMD_Args& args) { // The operator's network type ("Sop", "Dop", etc.) const char* const networkType = args[1]; // The operator's name const char* const opName = args[2]; // An optional spare data token const char* const token = args[3]; if (!networkType || !opName) { /// @todo Install this as a command.help file? args.out() << kSpareDataCmdName << "\n\ \n\ List spare data associated with an operator type.\n\ \n\ USAGE\n\ " << kSpareDataCmdName << " <networktype> <opname> [<token>]\n\ \n\ When the token is omitted, all (token, value) pairs\n\ associated with the operator type are displayed.\n\ \n\ Currently, only operator types defined with OpenVDB's OpFactory\n\ can have spare data. See www.openvdb.org for more information.\n\ \n\ EXAMPLES\n\ > " << kSpareDataCmdName << " Sop DW_OpenVDBConvert\n\ lists all spare data associated with the Convert VDB SOP\n\ > " << kSpareDataCmdName << " Sop DW_OpenVDBClip nativename\n\ displays the VDB Clip SOP's native name\n\ \n"; return; } // Retrieve the operator table for the specified network type. const OP_OperatorTable* table = nullptr; { OP_OperatorTableList opTables; OP_OperatorTable::getAllOperatorTables(opTables); for (const auto& t: opTables) { if (t && (t->getName() == networkType)) { table = t; break; } } } if (table) { if (const auto* op = table->getOperator(opName)) { // Retrieve the operator's spare data map. // (The map is empty for operators that don't support spare data.) const auto& spare = getOperatorSpareData(*op); if (token) { // If a token was provided and it exists in the map, // print the corresponding value. const auto it = spare.find(token); if (it != spare.end()) { args.out() << it->second << "\n"; } } else { // If no token was provided, print all of the operator's // (token, value) pairs. for (const auto& it: spare) { args.out() << it.first << " " << it.second << "\n"; } } } } } // Python callback to retrieve spare data from an OP_OperatorDW-derived operator static PY_PyObject* homGetOperatorSpareData(PY_PyObject* self, PY_PyObject* args) { // The operator's network type ("Sop", "Dop", etc.) const char* networkType = nullptr; // The operator's name const char* opName = nullptr; // An optional spare data token const char* token = nullptr; if (!PY_PyArg_ParseTuple(args, "ss|s", &networkType, &opName, &token)) { return nullptr; } if (!networkType || !opName) { return PY_Py_None(); } try { HOM_AutoLock homLock; // Retrieve the operator table for the specified network type. const OP_OperatorTable* table = nullptr; { OP_OperatorTableList opTables; OP_OperatorTable::getAllOperatorTables(opTables); for (const auto& t: opTables) { if (t && (t->getName() == networkType)) { table = t; break; } } } if (table) { if (const auto* op = table->getOperator(opName)) { // Retrieve the operator's spare data map. // (The map is empty for operators that don't support spare data.) const auto& spare = getOperatorSpareData(*op); if (token) { // If a token was provided and it exists in the map, // return the corresponding value. const auto it = spare.find(token); if (it != spare.end()) { return PY_Py_BuildValue("s", it->second.c_str()); } } else { // If no token was provided, return a dictionary // of all of the operator's (token, value) pairs. if (auto* dict = PY_Py_BuildValue("{}")) { for (const auto& it: spare) { PY_PyDict_SetItemString(dict, it.first.c_str(), PY_Py_BuildValue("s", it.second.c_str())); } return dict; } } } } } catch (HOM_Error&) { } return PY_Py_None(); } OpPolicyPtr mPolicy; // polymorphic, so stored by pointer OpFactory::OpFlavor mFlavor; std::string mEnglish, mName, mLabelName, mIconName, mHelpUrl, mDoc, mOperatorTableName; std::string mFirstName, mTabSubMenuPath; OP_Constructor mConstruct; OP_OperatorTable* mTable; PRM_Template *mParms, *mObsoleteParms; unsigned mMinSources; unsigned mMaxSources; CH_LocalVariable* mVariables; unsigned mFlags; std::vector<std::string> mAliases; std::vector<char*> mInputLabels, mOptInputLabels; OpFactoryVerb* mVerb = nullptr; bool mInvisible = false; SpareDataMap mSpareData; static constexpr auto* kSpareDataCmdName = "opsparedata"; }; OpFactory::OpFactory(const std::string& english, OP_Constructor ctor, ParmList& parms, OP_OperatorTable& table, OpFlavor flavor) { this->init(OpPolicyPtr(new OpPolicy), english, ctor, parms, table, flavor); } OpFactory::~OpFactory() { mImpl->mTable->addOperator(mImpl->get()); for (size_t n = 0, N = mImpl->mAliases.size(); n < N; ++n) { const std::string& alias = mImpl->mAliases[n]; if (!alias.empty()) { mImpl->mTable->setOpAlias(/*original=*/mImpl->mName.c_str(), alias.c_str()); } } // apply first name if set if (!mImpl->mFirstName.empty()) { mImpl->mTable->setOpFirstName(mImpl->mName.c_str(), mImpl->mFirstName.c_str()); } // hide node if marked as invisible if (mImpl->mInvisible) { mImpl->mTable->addOpHidden(mImpl->mName.c_str()); } } void OpFactory::init(OpPolicyPtr policy, const std::string& english, OP_Constructor ctor, ParmList& parms, OP_OperatorTable& table, OpFlavor flavor) { mImpl.reset(new Impl(english, ctor, parms.get(), table, flavor)); mImpl->init(*this, policy); } //static std::string OpFactory::flavorToString(OpFlavor flavor) { switch (flavor) { case SOP: return "SOP"; case POP: return "POP"; case ROP: return "ROP"; case VOP: return "VOP"; case HDA: return "HDA"; } return ""; } OpFactory::OpFlavor OpFactory::flavor() const { return mImpl->mFlavor; } std::string OpFactory::flavorString() const { return flavorToString(mImpl->mFlavor); } const std::string& OpFactory::name() const { return mImpl->mName; } const std::string& OpFactory::english() const { return mImpl->mEnglish; } const std::string& OpFactory::iconName() const { return mImpl->mIconName; } const std::string& OpFactory::helpURL() const { return mImpl->mHelpUrl; } const std::string& OpFactory::documentation() const { return mImpl->mDoc; } const OP_OperatorTable& OpFactory::table() const { return *mImpl->mTable; } OP_OperatorTable& OpFactory::table() { return *mImpl->mTable; } OpFactory& OpFactory::addAlias(const std::string& english) { if (!english.empty()) { this->addAliasVerbatim(mImpl->mPolicy->getName(*this, english)); } return *this; } OpFactory& OpFactory::addAliasVerbatim(const std::string& name) { if (!name.empty()) { mImpl->mAliases.push_back(name); } return *this; } OpFactory& OpFactory::setDocumentation(const std::string& doc) { mImpl->mDoc = doc; return *this; } OpFactory& OpFactory::addInput(const std::string& name) { mImpl->mInputLabels.push_back(::strdup(name.c_str())); return *this; } OpFactory& OpFactory::addOptionalInput(const std::string& name) { mImpl->mOptInputLabels.push_back(::strdup(name.c_str())); return *this; } OpFactory& OpFactory::setMaxInputs(unsigned n) { mImpl->mMaxSources = n; return *this; } OpFactory& OpFactory::setObsoleteParms(const ParmList& parms) { delete mImpl->mObsoleteParms; mImpl->mObsoleteParms = parms.get(); return *this; } OpFactory& OpFactory::setLocalVariables(CH_LocalVariable* v) { mImpl->mVariables = v; return *this; } OpFactory& OpFactory::setFlags(unsigned f) { mImpl->mFlags = f; return *this; } OpFactory& OpFactory::setInternalName(const std::string& name) { mImpl->mName = name; return *this; } OpFactory& OpFactory::setOperatorTable(const std::string& name) { mImpl->mOperatorTableName = name; return *this; } OpFactory& OpFactory::setVerb(SOP_NodeVerb::CookMode cookMode, const CacheAllocFunc& allocator) { if (flavor() != SOP) { throw std::runtime_error{"expected operator of type SOP, got " + flavorToString(flavor())}; } if (!allocator) throw std::invalid_argument{"must provide a cache allocator function"}; mImpl->mVerb = new OpFactoryVerb{name(), cookMode, allocator, mImpl->mParms}; return *this; } OpFactory& OpFactory::setInvisible() { mImpl->mInvisible = true; return *this; } OpFactory& OpFactory::addSpareData(const SpareDataMap& spare) { mergeSpareData(mImpl->mSpareData, spare); return *this; } //////////////////////////////////////// const SpareDataMap& getOperatorSpareData(const OP_Operator& op) { static const SpareDataMap sNoSpareData; if (const auto* opdw = dynamic_cast<const OP_OperatorDW*>(&op)) { return opdw->spareData(); } return sNoSpareData; } void addOperatorSpareData(OP_Operator& op, SpareDataMap& spare) { if (auto* opdw = dynamic_cast<OP_OperatorDW*>(&op)) { mergeSpareData(opdw->spareData(), spare); } else { throw std::runtime_error("spare data cannot be added to the \"" + op.getName().toStdString() + "\" operator"); } } //////////////////////////////////////// //virtual std::string OpPolicy::getName(const OpFactory&, const std::string& english) { UT_String s(english); s.forceValidVariableName(); return s.toStdString(); } //virtual std::string OpPolicy::getLabelName(const OpFactory& factory) { return factory.english(); } //////////////////////////////////////// const PRM_ChoiceList PrimGroupMenuInput1 = SOP_Node::primGroupMenu; const PRM_ChoiceList PrimGroupMenuInput2 = SOP_Node::primGroupMenu; const PRM_ChoiceList PrimGroupMenuInput3 = SOP_Node::primGroupMenu; const PRM_ChoiceList PrimGroupMenuInput4 = SOP_Node::primGroupMenu; const PRM_ChoiceList PrimGroupMenu = SOP_Node::primGroupMenu; } // namespace houdini_utils
50,328
C++
31.077119
99
0.602746
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Diagnostics.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Diagnostics.cc /// /// @author FX R&D OpenVDB team /// /// @brief Perform diagnostics on VDB volumes to detect potential issues. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Math.h> // Tolerance, isApproxEqual and isFinite #include <openvdb/math/Operators.h> // ISGradientNormSqrd #include <openvdb/tools/LevelSetRebuild.h> #include <openvdb/tools/LevelSetTracker.h> // LevelSetTracker::normalize #include <UT/UT_Interrupt.h> #include <UT/UT_UniquePtr.h> #include <PRM/PRM_Parm.h> #include <memory> #include <string> #include <sstream> #include <type_traits> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Local Utility Methods namespace { //////////////////////////////////////// // Tests struct AlwaysFalse { template<typename Iterator> bool operator()(const Iterator&) const { return false; } }; struct FiniteValue { template<typename Iterator> bool operator()(const Iterator& it) const { return openvdb::math::isFinite(*it); } }; template<typename ValueType> struct ApproxEqual { ApproxEqual(const ValueType& val, const ValueType& tol = openvdb::math::Tolerance<ValueType>::value()) : mValue(val), mTol(tol) {} template<typename Iterator> bool operator()(const Iterator& it) const { return openvdb::math::isApproxEqual(mValue, *it, mTol); } const ValueType mValue, mTol; }; template<typename ValueType> struct AbsApproxEqual { AbsApproxEqual(const ValueType& val, const ValueType& tol = openvdb::math::Tolerance<ValueType>::value()) : mValue(openvdb::math::Abs(val)), mTol(tol) {} template<typename Iterator> bool operator()(const Iterator& it) const { return openvdb::math::isApproxEqual(mValue, openvdb::math::Abs(*it), mTol); } const ValueType mValue, mTol; }; template<typename ValueType> struct AbsLessThan { AbsLessThan(ValueType val) : mValue(openvdb::math::Abs(val)) {} template<typename Iterator> bool operator()(const Iterator& it) const { return !(ValueType(openvdb::math::Abs(*it)) < mValue); } const ValueType mValue; }; template<typename T> inline float toFloat(const T s) { return float(s); } template<typename T> inline float toFloat(const openvdb::math::Vec3<T> v) { return float(v[0]); } struct InRange { InRange(float minValue, float maxValue) : mMin(minValue), mMax(maxValue) {} template<typename Iterator> bool operator()(const Iterator& it) const { return test(*it); } template<typename T> bool test(const T& s) const { return !(s < T(mMin) || T(mMax) < s); } template<typename T> bool test(const openvdb::math::Vec3<T>& v) const { return test(v.length()); } const float mMin, mMax; }; template<typename TreeType> struct GradientNorm { using ValueType = typename TreeType::ValueType; GradientNorm(const TreeType& tree, double voxelSize, ValueType tol) : mAcc(tree), mScale(ValueType(1.0 / voxelSize)), mTol(tol) {} GradientNorm(const GradientNorm& rhs) : mAcc(rhs.mAcc.tree()), mScale(rhs.mScale), mTol(rhs.mTol) {} template<typename Iterator> bool operator()(const Iterator& it) { const openvdb::Coord ijk = it.getCoord(); // ignore voxels adjacent to the active narrow band boundary if (!mAcc.isValueOn(ijk.offsetBy(-1, 0, 0))) return true; if (!mAcc.isValueOn(ijk.offsetBy( 1, 0, 0))) return true; if (!mAcc.isValueOn(ijk.offsetBy( 0,-1, 0))) return true; if (!mAcc.isValueOn(ijk.offsetBy( 0, 1, 0))) return true; if (!mAcc.isValueOn(ijk.offsetBy( 0, 0,-1))) return true; if (!mAcc.isValueOn(ijk.offsetBy( 0, 0, 1))) return true; return openvdb::math::isApproxEqual(ValueType(1.0), gradientNorm(ijk, mScale), mTol); } template<typename T> inline T gradientNorm(const openvdb::Coord& ijk, const T scale) { return scale * T(std::sqrt(double( openvdb::math::ISGradientNormSqrd<openvdb::math::FIRST_BIAS>::result(mAcc, ijk)))); } /// @{ // The gradient magnitude test is applied only to scalar, floating-point grids, // but this class needs to compile for all grid types. template<typename T> inline openvdb::math::Vec3<T> gradientNorm(const openvdb::Coord&, const openvdb::math::Vec3<T>) { return openvdb::math::Vec3<T>(0); } inline bool gradientNorm(const openvdb::Coord&, bool) { return false; } /// @} private: GradientNorm& operator=(const GradientNorm&); // disable assignment openvdb::tree::ValueAccessor<const TreeType> mAcc; const ValueType mScale, mTol; }; template<typename TreeType> struct SameSign { using ValueType = typename TreeType::ValueType; SameSign(const TreeType& tree) : mAcc(tree) {} SameSign(const SameSign& rhs) : mAcc(rhs.mAcc.tree()) {} template<typename Iterator> bool operator()(const Iterator& it) { ValueType val; const bool state = mAcc.probeValue(it.getCoord(), val); return state ? true : (val < ValueType(0)) == (*it < ValueType(0)); } private: SameSign& operator=(const SameSign&); // disable assignment openvdb::tree::ValueAccessor<const TreeType> mAcc; }; //////////////////////////////////////// /// @brief Visits values and performs tests template<typename GridType> struct Visitor { enum ValueKind { TILES_AND_VOXELS, TILES, VOXELS }; enum ValueState { ALL_VALUES, ACTIVE_VALUES, INACTIVE_VALUES }; using TreeType = typename GridType::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreePtr = typename BoolTreeType::Ptr; ////////// Visitor(const TreeType& tree) : mTree(tree), mValueMask(new BoolTreeType(false)) { tree.getNodes(mLeafNodes); tree.getNodes(mInternalNodes); } BoolTreePtr& valueMask() { return mValueMask; } std::string invalidValuesInfo() const { std::stringstream info; if (!mValueMask->empty()) { info << "invalid: "; const size_t voxelCount = size_t(mValueMask->activeLeafVoxelCount()); if (voxelCount > 0) info << voxelCount << " voxels "; const size_t tileCount = size_t(mValueMask->activeTileCount()); if (tileCount > 0) { if (voxelCount > 0) info << "& "; info << tileCount << " tiles"; } } return info.str(); } template<typename TestType> bool run(ValueKind kind, const ValueState& state, const TestType& test) { mValueMask.reset(new BoolTreeType(false)); if (kind == TILES_AND_VOXELS || kind == VOXELS) { LeafNodeReduction<TestType> op(state, &mLeafNodes[0], test, *mValueMask); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mLeafNodes.size()), op); } if (kind == TILES_AND_VOXELS || kind == TILES) { InternalNodeReduction<TestType> op(state, &mInternalNodes[0], test, *mValueMask); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mInternalNodes.size()), op); TestType myTest(test); if (state == ACTIVE_VALUES) { typename TreeType::ValueOnCIter it(mTree); it.setMaxDepth(TreeType::ValueOnCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (!myTest(it)) { mValueMask->fill(it.getBoundingBox(), true); } } } else if (state == INACTIVE_VALUES) { typename TreeType::ValueOffCIter it(mTree); it.setMaxDepth(TreeType::ValueOffCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (!myTest(it)) { mValueMask->fill(it.getBoundingBox(), true); } } } else { typename TreeType::ValueAllCIter it(mTree); it.setMaxDepth(TreeType::ValueAllCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (!myTest(it)) { mValueMask->fill(it.getBoundingBox(), true); } } } } return mValueMask->empty(); // passed if mask is empty } private: template<typename TestType> struct LeafNodeReduction { LeafNodeReduction(const ValueState& state, const LeafNodeType ** nodes, const TestType& test, BoolTreeType& mask) : mState(state), mNodes(nodes), mPrimMask(&mask), mTempMask(false), mMask(mPrimMask ? mPrimMask : &mTempMask), mTest(test) {} LeafNodeReduction(LeafNodeReduction& other, tbb::split) : mState(other.mState), mNodes(other.mNodes), mPrimMask(other.mPrimMask), mTempMask(false), mMask(&mTempMask), mTest(other.mTest) {} void join(LeafNodeReduction& other) { mMask->merge(*other.mMask); } void operator()(const tbb::blocked_range<size_t>& range) { openvdb::tree::ValueAccessor<BoolTreeType> mask(*mMask); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const LeafNodeType& node = *mNodes[n]; if (mState == ACTIVE_VALUES) { for (typename LeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { if (!mTest(it)) { mask.setValueOn(it.getCoord()); } } } else if (mState == INACTIVE_VALUES) { for (typename LeafNodeType::ValueOffCIter it = node.cbeginValueOff(); it; ++it) { if (!mTest(it)) { mask.setValueOn(it.getCoord()); } } } else { for (typename LeafNodeType::ValueAllCIter it=node.cbeginValueAll(); it; ++it) { if (!mTest(it)) { mask.setValueOn(it.getCoord()); } } } } } private: ValueState mState; LeafNodeType const * const * const mNodes; BoolTreeType * const mPrimMask; BoolTreeType mTempMask; BoolTreeType * const mMask; TestType mTest; }; // struct LeafNodeReduction template<typename TestType> struct InternalNodeReduction { InternalNodeReduction(const ValueState& state, const InternalNodeType** nodes, const TestType& test, BoolTreeType& mask) : mState(state), mNodes(nodes), mPrimMask(&mask), mTempMask(false), mMask(mPrimMask ? mPrimMask : &mTempMask), mTest(test) {} InternalNodeReduction(InternalNodeReduction& other, tbb::split) : mState(other.mState), mNodes(other.mNodes), mPrimMask(other.mPrimMask), mTempMask(false), mMask(&mTempMask), mTest(other.mTest) {} void join(InternalNodeReduction& other) { mMask->merge(*other.mMask); } void operator()(const tbb::blocked_range<size_t>& range) { openvdb::Coord ijk; const int dim = int(InternalNodeType::ChildNodeType::DIM) - 1; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const InternalNodeType& node = *mNodes[n]; if (mState == ACTIVE_VALUES) { for (typename InternalNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { if (!node.isChildMaskOn(it.pos()) && !mTest(it)) { ijk = it.getCoord(); mMask->fill(openvdb::CoordBBox(ijk, ijk.offsetBy(dim)), true); } } } else if (mState == INACTIVE_VALUES) { for (typename InternalNodeType::ValueOffCIter it = node.cbeginValueOff(); it; ++it) { if (!node.isChildMaskOn(it.pos()) && !mTest(it)) { ijk = it.getCoord(); mMask->fill(openvdb::CoordBBox(ijk, ijk.offsetBy(dim)), true); } } } else { for (typename InternalNodeType::ValueAllCIter it = node.cbeginValueAll(); it; ++it) { if (!node.isChildMaskOn(it.pos()) && !mTest(it)) { ijk = it.getCoord(); mMask->fill(openvdb::CoordBBox(ijk, ijk.offsetBy(dim)), true); } } } } } private: ValueState mState; InternalNodeType const * const * const mNodes; BoolTreeType * const mPrimMask; BoolTreeType mTempMask; BoolTreeType * const mMask; TestType mTest; }; // struct InternalNodeReduction const TreeType& mTree; BoolTreePtr mValueMask; std::vector<const LeafNodeType*> mLeafNodes; std::vector<const InternalNodeType*> mInternalNodes; }; // struct Visitor //////////////////////////////////////// // HDK Points With Values Create and Transfer template<typename BoolLeafNodeType> struct GetPoints { GetPoints(const BoolLeafNodeType ** maskNodes, UT_Vector3* points, const size_t* offsetTable, const openvdb::math::Transform& xform) : mMaskNodes(maskNodes) , mPoints(points) , mOffsetTable(offsetTable) , mXform(xform) { } void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::Vec3d xyz; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const BoolLeafNodeType& maskNode = *mMaskNodes[n]; UT_Vector3* points = &mPoints[mOffsetTable[n]]; size_t idx = 0; for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { xyz = mXform.indexToWorld(it.getCoord()); UT_Vector3& pos = points[idx++]; pos[0] = UT_Vector3::value_type(xyz[0]); pos[1] = UT_Vector3::value_type(xyz[1]); pos[2] = UT_Vector3::value_type(xyz[2]); } } } BoolLeafNodeType const * const * const mMaskNodes; UT_Vector3 * const mPoints; size_t const * const mOffsetTable; openvdb::math::Transform mXform; }; // struct GetPoints template<typename BoolTreeType> inline size_t getPoints(const openvdb::math::Transform& xform, const BoolTreeType& mask, UT_UniquePtr<UT_Vector3[]>& points) { using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; std::vector<const BoolLeafNodeType*> nodes; mask.getNodes(nodes); const size_t tileCount = mask.activeTileCount(); size_t voxelCount = 0, totalCount = tileCount; if (!nodes.empty()) { UT_UniquePtr<size_t[]> offsetTable(new size_t[nodes.size()]); for (size_t n = 0, N = nodes.size(); n < N; ++n) { offsetTable[n] = voxelCount; voxelCount += nodes[n]->onVoxelCount(); } totalCount += voxelCount; points.reset(new UT_Vector3[totalCount]); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), GetPoints<BoolLeafNodeType>(&nodes[0], points.get(), offsetTable.get(), xform)); } if (tileCount > 0) { if (!points) { points.reset(new UT_Vector3[tileCount]); } openvdb::Vec3d xyz; typename BoolTreeType::ValueOnCIter it(mask); it.setMaxDepth(BoolTreeType::ValueOnCIter::LEAF_DEPTH - 1); for (size_t idx = voxelCount; it; ++it, ++idx) { xyz = xform.indexToWorld(it.getCoord()); UT_Vector3& pos = points[idx]; pos[0] = UT_Vector3::value_type(xyz[0]); pos[1] = UT_Vector3::value_type(xyz[1]); pos[2] = UT_Vector3::value_type(xyz[2]); } } return totalCount; } inline GA_Offset transferPoints(GU_Detail& detail, const UT_UniquePtr<UT_Vector3[]>& points, size_t pointCount) { const GA_Offset startOffset = detail.getNumPointOffsets(); detail.appendPointBlock(pointCount); GA_Offset offset = startOffset; for (size_t n = 0, N = pointCount; n < N; ++n) { detail.setPos3(offset++, points[n]); } return startOffset; } template<typename TreeType> struct GetValues { using ValueType = typename TreeType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; GetValues(const TreeType& tree, const BoolLeafNodeType ** maskNodes, ValueType* values, const size_t* offsetTable) : mTree(&tree) , mMaskNodes(maskNodes) , mValues(values) , mOffsetTable(offsetTable) { } void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::tree::ValueAccessor<const TreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const BoolLeafNodeType& maskNode = *mMaskNodes[n]; ValueType* values = &mValues[mOffsetTable[n]]; size_t idx = 0; for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { values[idx++] = acc.getValue(it.getCoord()); } } } TreeType const * const mTree; BoolLeafNodeType const * const * const mMaskNodes; ValueType * const mValues; size_t const * const mOffsetTable; }; // struct GetValues template<typename TreeType> inline size_t getValues(const TreeType& tree, const typename TreeType::template ValueConverter<bool>::Type& mask, UT_UniquePtr<typename TreeType::ValueType[]>& values) { using ValueType = typename TreeType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; std::vector<const BoolLeafNodeType*> nodes; mask.getNodes(nodes); const size_t tileCount = mask.activeTileCount(); size_t voxelCount = 0, totalCount = tileCount; if (!nodes.empty()) { UT_UniquePtr<size_t[]> offsetTable(new size_t[nodes.size()]); for (size_t n = 0, N = nodes.size(); n < N; ++n) { offsetTable[n] = voxelCount; voxelCount += nodes[n]->onVoxelCount(); } totalCount += voxelCount; values.reset(new ValueType[totalCount]); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), GetValues<TreeType>(tree, &nodes[0], values.get(), offsetTable.get())); } if (tileCount > 0) { if (!values) { values.reset(new ValueType[tileCount]); } typename BoolTreeType::ValueOnCIter it(mask); it.setMaxDepth(BoolTreeType::ValueOnCIter::LEAF_DEPTH - 1); openvdb::tree::ValueAccessor<const TreeType> acc(tree); for (size_t idx = voxelCount; it; ++it, ++idx) { values[idx] = acc.getValue(it.getCoord()); } } return totalCount; } template<typename ValueType> inline void transferValues(GU_Detail& detail, const std::string& name, GA_Offset startOffset, const UT_UniquePtr<ValueType[]>& values, size_t pointCount) { GA_RWAttributeRef attr = detail.addFloatTuple( GA_ATTRIB_POINT, (name + "_scalar").c_str(), 1, GA_Defaults(0)); GA_RWHandleF handle = attr.getAttribute(); for (size_t n = 0, N = pointCount; n < N; ++n) { handle.set(startOffset++, float(values[n])); } } template<typename ValueType> inline void transferValues(GU_Detail& detail, const std::string& name, GA_Offset startOffset, const UT_UniquePtr<openvdb::math::Vec3<ValueType>[]>& values, size_t pointCount) { GA_RWAttributeRef attr = detail.addFloatTuple( GA_ATTRIB_POINT, (name + "_vector").c_str(), 3, GA_Defaults(0)); GA_RWHandleV3 handle = attr.getAttribute(); UT_Vector3 vec(0.0f, 0.0f, 0.0f); using VectorType = openvdb::math::Vec3<ValueType>; for (size_t n = 0, N = pointCount; n < N; ++n) { const VectorType& val = values[n]; vec[0] = float(val[0]); vec[1] = float(val[1]); vec[2] = float(val[2]); handle.set(startOffset++, vec); } } //////////////////////////////////////// // Utility Objects struct TestData { // settings bool useMask, usePoints, respectGridClass; // general tests bool testFinite, idFinite, fixFinite; bool testUniformBackground, idUniformBackground, fixUniformBackground; bool testInRange, idInRange, fixInRange; bool testUniformVoxelSize; float rangeMin, rangeMax; // level set tests bool testSymmetricNarrowBand; bool testMinimumBandWidth; bool testClosedSurface; bool testGradientMagnitude, idGradientMagnitude, fixGradientMagnitude; bool testNoActiveTiles, idNoActiveTiles, fixNoActiveTiles; float gradientTolerance, minBandWidth; // fog volume tests bool testBackgroundZero, idBackgroundZero, fixBackgroundZero; bool testActiveValuesFromZeroToOne, idActiveValuesFromZeroToOne, fixActiveValuesFromZeroToOne; }; // struct TestData struct GridTestLog { GridTestLog(int primitiveIndex, const std::string& gridName) : mGridName(), mFailedMsg(), mFailed(0), mPassed(0), mSkipped(0) { std::stringstream name; name << " (" << primitiveIndex << ") '" << gridName << "'"; mGridName = name.str(); } size_t failedCount() const { return mFailed; } size_t passedCount() const { return mPassed; } size_t skippedCount() const { return mSkipped; } void appendFailed(const std::string& testName, const std::string& msg = "") { mFailed++; mFailedMsg += " - '" + testName + "' " + msg + "\n"; } void appendPassed() { mPassed++; } void appendSkipped() { mSkipped++; } std::string str() const { std::stringstream log; log << mGridName; if (mPassed > 0) { log << " passed " << mPassed; } if (mFailed > 0) { log << " failed " << mFailed; } if ((mPassed + mFailed) == 0) { log << " not tested"; } log << "\n"; if (mSkipped > 0) { log << " - skipped " << mSkipped << " scalar floating-point specific test" << (mSkipped > 1 ? "s.\n" : ".\n"); } if (!mFailedMsg.empty()) { log << mFailedMsg << "\n"; } return log.str(); } private: std::string mGridName, mFailedMsg; size_t mFailed, mPassed, mSkipped; }; // struct GridTestLog template<typename GridType> struct MaskData { using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; MaskData() : mask(), minValue(ValueType(0)), maxValue(ValueType(0)), isRange(false) {} MaskData(typename BoolTreeType::Ptr& tree, ValueType val) : mask(tree), minValue(val), maxValue(val), isRange(false) {} MaskData(typename BoolTreeType::Ptr& tree, ValueType minval, ValueType maxval) : mask(tree), minValue(minval), maxValue(maxval), isRange(true) {} typename BoolTreeType::Ptr mask; ValueType minValue, maxValue; bool isRange; }; // struct MaskData //////////////////////////////////////// template<typename GridType> inline typename std::enable_if<std::is_floating_point<typename GridType::ValueType>::value, void>::type normalizeLevelSet(GridType& grid) { openvdb::tools::LevelSetTracker<GridType> op(grid); op.setNormCount(3); op.setSpatialScheme(openvdb::math::FIRST_BIAS); op.setTemporalScheme(openvdb::math::TVD_RK3); op.normalize(); } template<typename GridType> inline typename std::enable_if<!std::is_floating_point<typename GridType::ValueType>::value, void>::type normalizeLevelSet(GridType&) { } template<typename T> inline T clampValueAndVectorMagnitude(T s, const T& minVal, const T& maxVal) { if (s < minVal) s = minVal; if (s > maxVal) s = maxVal; return s; } template<typename T> inline openvdb::math::Vec3<T> clampValueAndVectorMagnitude(openvdb::math::Vec3<T> v, const openvdb::math::Vec3<T>& minVal, const openvdb::math::Vec3<T>& maxVal) { const T scale = clampValueAndVectorMagnitude(v.length(), minVal[0], maxVal[0]); v.normalize(); v *= scale; return v; } template<typename GridType> struct FixVoxelValues { using TreeType = typename GridType::TreeType; using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using MaskDataType = MaskData<GridType>; FixVoxelValues(TreeType& tree, const BoolLeafNodeType ** maskNodes, const MaskDataType& maskdata) : mTree(&tree) , mMaskNodes(maskNodes) , mMaskData(&maskdata) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ValueOnCIter = typename BoolLeafNodeType::ValueOnCIter; openvdb::tree::ValueAccessor<TreeType> acc(*mTree); const ValueType minVal = mMaskData->minValue; const ValueType maxVal = mMaskData->maxValue; const bool isRange = mMaskData->isRange; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const BoolLeafNodeType& maskNode = *mMaskNodes[n]; LeafNodeType* node = acc.probeLeaf(maskNode.origin()); if (!node) continue; if (isRange) { // clamp for (ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { node->setValueOnly(it.pos(), clampValueAndVectorMagnitude(node->getValue(it.pos()), minVal, maxVal)); } } else { // replace for (ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { node->setValueOnly(it.pos(), minVal); } } } } TreeType * const mTree; BoolLeafNodeType const * const * const mMaskNodes; MaskDataType const * const mMaskData; }; // struct FixVoxelValues template<typename GridType> inline typename GridType::Ptr fixValues(const GridType& grid, std::vector<MaskData<GridType> > fixMasks, bool inactivateTiles = false, bool renormalizeLevelSet = false) { using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using MaskDataType = MaskData<GridType>; typename GridType::Ptr replacementGrid = grid.deepCopy(); BoolTreeType alreadyFixedValues(false); for (size_t n = 0, N = fixMasks.size(); n < N; ++n) { MaskDataType& fix = fixMasks[n]; BoolTreeType mask(false); mask.topologyUnion(*fix.mask); mask.topologyDifference(alreadyFixedValues); // fix voxels { std::vector<const BoolLeafNodeType*> nodes; mask.getNodes(nodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), FixVoxelValues<GridType>(replacementGrid->tree(), &nodes[0], fix)); } // fix tiles typename BoolTreeType::ValueOnCIter it(mask); it.setMaxDepth(BoolTreeType::ValueOnCIter::LEAF_DEPTH - 1); openvdb::tree::ValueAccessor<TreeType> acc(replacementGrid->tree()); openvdb::Coord ijk; if (fix.isRange) { // clamp for (; it; ++it) { ijk = it.getCoord(); const ValueType val = clampValueAndVectorMagnitude( acc.getValue(ijk), fix.minValue, fix.maxValue); acc.addTile(it.getLevel(), ijk, val, acc.isValueOn(ijk)); } } else { // replace const ValueType val = fix.minValue; for (; it; ++it) { ijk = it.getCoord(); acc.addTile(it.getLevel(), ijk, val, acc.isValueOn(ijk)); } } alreadyFixedValues.topologyUnion(mask); } if (inactivateTiles) { typename TreeType::ValueOnIter it(replacementGrid->tree()); it.setMaxDepth(TreeType::ValueOnIter::LEAF_DEPTH - 1); for (; it; ++it) { it.setActiveState(false); } } if (renormalizeLevelSet) { normalizeLevelSet(*replacementGrid); } return replacementGrid; } template<typename GridType> inline void outputMaskAndPoints(const GridType& grid, const std::string& gridName, std::vector<typename GridType::TreeType::template ValueConverter<bool>::Type::Ptr> masks, bool outputMask, bool outputPoints, GU_Detail& detail, hvdb::Interrupter& interupter, const GridType* replacementGrid = nullptr) { using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolGridType = typename openvdb::Grid<BoolTreeType>; if (outputMask || outputPoints) { const TreeType& tree = grid.tree(); typename BoolGridType::Ptr maskGrid = openvdb::createGrid<BoolGridType>(false); BoolTreeType& mask = maskGrid->tree(); for (size_t n = 0, N = masks.size(); n < N; ++n) { BoolTreeType* maskPt = masks[n].get(); if (maskPt && !maskPt->empty()) { mask.merge(*masks[n]); } } if (outputPoints && !mask.empty()) { if (interupter.wasInterrupted()) return; UT_UniquePtr<UT_Vector3[]> points; const size_t totalPointCount = getPoints(grid.transform(), mask, points); if (interupter.wasInterrupted()) return; if (totalPointCount > 0) { const GA_Offset startOffset = transferPoints(detail, points, totalPointCount); points.reset(); // clear UT_UniquePtr<ValueType[]> values; getValues(tree, mask, values); if (interupter.wasInterrupted()) return; transferValues(detail, "input", startOffset, values, totalPointCount); if (replacementGrid) { if (interupter.wasInterrupted()) return; getValues(replacementGrid->tree(), mask, values); if (interupter.wasInterrupted()) return; transferValues(detail, "output", startOffset, values, totalPointCount); } } } if (interupter.wasInterrupted()) return; if (outputMask && !mask.empty()) { maskGrid->setName(gridName + "_mask"); maskGrid->setTransform(grid.transform().copy()); hvdb::createVdbPrimitive(detail, maskGrid, maskGrid->getName().c_str()); } } } //////////////////////////////////////// struct TestCollection { TestCollection(const TestData& test, GU_Detail& detail, hvdb::Interrupter& interupter, UT_ErrorManager* errorManager = nullptr) : mTest(test) , mDetail(&detail) , mInterupter(&interupter) , mErrorManager(errorManager) , mMessageStr() , mPrimitiveName() , mPrimitiveIndex(0) , mGridsFailed(0) , mReplacementGrid() { } ~TestCollection() { if (mErrorManager) { if (mGridsFailed > 0) { std::stringstream msg; msg << mGridsFailed << " grid" << (mGridsFailed > 1 ? "s" : "") << " failed one or more tests."; mErrorManager->addWarning(SOP_OPTYPE_NAME, SOP_MESSAGE, msg.str().c_str()); } if (!mMessageStr.empty()) { std::stringstream msg; msg << "Diagnostics results\n"; msg << mMessageStr; mErrorManager->addMessage(SOP_OPTYPE_NAME, SOP_MESSAGE, msg.str().c_str()); } } } void setPrimitiveIndex(int i) { mPrimitiveIndex = i; } void setPrimitiveName(const std::string& name) { mPrimitiveName = name; } bool hasReplacementGrid() const { return mReplacementGrid != nullptr; } openvdb::GridBase::Ptr replacementGrid() { return mReplacementGrid; } template<typename GridType> void operator()(const GridType& grid) { mReplacementGrid.reset(); // clear using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using MaskDataType = MaskData<GridType>; using VisitorType = Visitor<GridType>; ////////// const double voxelSize = grid.transform().voxelSize()[0]; const std::string gridName = mPrimitiveName.empty() ? grid.getName() : mPrimitiveName; const TreeType& tree = grid.tree(); GridTestLog log(mPrimitiveIndex, gridName); VisitorType visitor(tree); std::vector<typename BoolTreeType::Ptr> idMasks; std::vector<MaskDataType> fixMasks; // General tests bool inactivateTiles = false, renormalizeLevelSet = false; if (mTest.testFinite) { if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::ALL_VALUES, FiniteValue())) { log.appendFailed("Finite Values", visitor.invalidValuesInfo()); if (mTest.fixFinite) { fixMasks.push_back(MaskDataType(visitor.valueMask(), tree.background())); } if (mTest.idFinite) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } if (mInterupter->wasInterrupted()) return; if (mTest.testUniformBackground && (!mTest.respectGridClass || grid.getGridClass() != openvdb::GRID_LEVEL_SET)) { ApproxEqual<ValueType> test(tree.background()); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::INACTIVE_VALUES, test)) { log.appendFailed("Uniform Background", visitor.invalidValuesInfo()); if (mTest.fixUniformBackground) { fixMasks.push_back(MaskDataType(visitor.valueMask(), tree.background())); } if (mTest.idUniformBackground) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } if (mInterupter->wasInterrupted()) return; if (mTest.testInRange) { InRange test(mTest.rangeMin, mTest.rangeMax); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::ALL_VALUES, test)) { log.appendFailed("Values in Range", visitor.invalidValuesInfo()); if (mTest.fixInRange) { fixMasks.push_back(MaskDataType(visitor.valueMask(), ValueType(mTest.rangeMin), ValueType(mTest.rangeMax))); } if (mTest.idInRange) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } if (mInterupter->wasInterrupted()) return; // Level Set tests if (!mTest.respectGridClass || grid.getGridClass() == openvdb::GRID_LEVEL_SET) { if (mTest.testUniformVoxelSize) { if (!grid.hasUniformVoxels()) log.appendFailed("'Uniform Voxel Size'"); else log.appendPassed(); } if (mTest.testNoActiveTiles) { if (!visitor.run(VisitorType::TILES, VisitorType::ACTIVE_VALUES, AlwaysFalse())) { log.appendFailed("Inactive Tiles", visitor.invalidValuesInfo()); if (mTest.fixNoActiveTiles) inactivateTiles = true; if (mTest.idNoActiveTiles) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } if (mTest.testSymmetricNarrowBand) { if (std::is_floating_point<ValueType>::value) { const ValueType background = openvdb::math::Abs(tree.background()); AbsApproxEqual<ValueType> bgTest(background); InRange valueTest(-toFloat(background), toFloat(background)); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::INACTIVE_VALUES, bgTest) || !visitor.run(VisitorType::VOXELS, VisitorType::ACTIVE_VALUES, valueTest)) { log.appendFailed("Symmetric Narrow Band"); } else { log.appendPassed(); } } else { log.appendSkipped(); } } if (mInterupter->wasInterrupted()) return; if (mTest.testMinimumBandWidth) { if (std::is_floating_point<ValueType>::value) { const ValueType width = ValueType(mTest.minBandWidth) * ValueType(voxelSize); AbsLessThan<ValueType> test(width); if (tree.background() < width || !visitor.run( VisitorType::TILES_AND_VOXELS, VisitorType::INACTIVE_VALUES, test)) { log.appendFailed("Minimum Band Width"); } else { log.appendPassed(); } } else { log.appendSkipped(); } } if (mInterupter->wasInterrupted()) return; if (mTest.testClosedSurface) { if (std::is_floating_point<ValueType>::value) { typename GridType::Ptr levelSet = openvdb::tools::levelSetRebuild( grid, 0.0f, 2.0f, 2.0f, nullptr, mInterupter); SameSign<TreeType> test(levelSet->tree()); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::ALL_VALUES, test)) { log.appendFailed("Closed Surface"); } else { log.appendPassed(); } } else { log.appendSkipped(); } } if (mInterupter->wasInterrupted()) return; if (mTest.testGradientMagnitude) { if (std::is_floating_point<ValueType>::value) { GradientNorm<TreeType> test(tree, voxelSize, ValueType(mTest.gradientTolerance)); if (!visitor.run(VisitorType::VOXELS, VisitorType::ACTIVE_VALUES, test)) { log.appendFailed("Gradient Magnitude", visitor.invalidValuesInfo()); if (mTest.fixGradientMagnitude) renormalizeLevelSet = true; if (mTest.idGradientMagnitude) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } else { log.appendSkipped(); } } } // end Level Set tests // Fog Volume tests if (!mTest.respectGridClass || grid.getGridClass() == openvdb::GRID_FOG_VOLUME) { if (mTest.testBackgroundZero) { ApproxEqual<ValueType> test(ValueType(0.0)); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::INACTIVE_VALUES, test)) { log.appendFailed("Background Zero", visitor.invalidValuesInfo()); if (mTest.fixBackgroundZero) { fixMasks.push_back(MaskDataType(visitor.valueMask(), ValueType(0.0))); } if (mTest.idBackgroundZero) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } if (mTest.testActiveValuesFromZeroToOne) { InRange test(0.0f, 1.0f); if (!visitor.run(VisitorType::TILES_AND_VOXELS, VisitorType::ACTIVE_VALUES, test)) { log.appendFailed("Active Values in [0, 1]", visitor.invalidValuesInfo()); if (mTest.fixActiveValuesFromZeroToOne) { fixMasks.push_back( MaskDataType(visitor.valueMask(), ValueType(0.0), ValueType(1.0))); } if (mTest.idActiveValuesFromZeroToOne) idMasks.push_back(visitor.valueMask()); } else { log.appendPassed(); } } } // end Fog Volume tests typename GridType::Ptr replacement; if (!fixMasks.empty() || inactivateTiles || renormalizeLevelSet) { replacement = fixValues(grid, fixMasks, inactivateTiles, renormalizeLevelSet); mReplacementGrid = replacement; } if (mInterupter->wasInterrupted()) return; outputMaskAndPoints<GridType>(grid, gridName, idMasks, mTest.useMask, mTest.usePoints, *mDetail, *mInterupter, replacement.get()); // log diagnostics info mMessageStr += log.str(); if (log.failedCount() > 0) ++mGridsFailed; } private: TestData mTest; GU_Detail * const mDetail; hvdb::Interrupter * const mInterupter; UT_ErrorManager * const mErrorManager; std::string mMessageStr, mPrimitiveName; int mPrimitiveIndex, mGridsFailed; openvdb::GridBase::Ptr mReplacementGrid; }; // struct TestCollection } // unnamed namespace //////////////////////////////////////// // SOP Implementation class SOP_OpenVDB_Diagnostics: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Diagnostics(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } int selectOperationTests(); int validateOperationTests(); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; TestData getTestData(const fpreal time) const; }; protected: bool updateParmsFlags() override; }; int SOP_OpenVDB_Diagnostics::selectOperationTests() { setInt("test_valrange", 0, 0, 0); setInt("test_backgroundzero", 0, 0, 0); setInt("test_fogvalues", 0, 0, 0); setInt("test_voxelsize", 0, 0, 0); setInt("test_activetiles", 0, 0, 0); setInt("test_symmetric", 0, 0, 0); setInt("test_surface", 0, 0, 0); setInt("test_bandwidth", 0, 0, 0); if (bool(evalInt("verify_fogvolume", 0, 0))) { setInt("test_finite", 0, 0, 1); setInt("test_backgroundzero", 0, 0, 1); setInt("test_fogvalues", 0, 0, 1); } if (bool(evalInt("verify_csg", 0, 0))) { setInt("test_finite", 0, 0, 1); setInt("test_voxelsize", 0, 0, 1); setInt("test_activetiles", 0, 0, 1); setInt("test_symmetric", 0, 0, 1); setInt("test_surface", 0, 0, 1); setInt("test_background", 0, 0, 0); } if (bool(evalInt("verify_filtering", 0, 0))) { setInt("test_finite", 0, 0, 1); setInt("test_voxelsize", 0, 0, 1); setInt("test_activetiles", 0, 0, 1); setInt("test_symmetric", 0, 0, 1); setInt("test_bandwidth", 0, 0, 1); setInt("bandwidth", 0, 0, 3); setInt("test_background", 0, 0, 0); } if (bool(evalInt("verify_advection", 0, 0))) { setInt("test_finite", 0, 0, 1); setInt("test_voxelsize", 0, 0, 1); setInt("test_activetiles", 0, 0, 1); setInt("test_surface", 0, 0, 1); setInt("test_symmetric", 0, 0, 1); setInt("test_bandwidth", 0, 0, 1); setInt("bandwidth", 0, 0, 3); setInt("test_background", 0, 0, 0); } return 1; } int SOP_OpenVDB_Diagnostics::validateOperationTests() { // general tests const bool testFinite = bool(evalInt("test_finite", 0, 0)); const bool testUniformBackground = bool(evalInt("test_background", 0, 0)); const bool testInRange = bool(evalInt("test_valrange", 0, 0)); const bool testUniformVoxelSize = bool(evalInt("test_voxelsize", 0, 0)); // level set const bool testSymmetricNarrowBand = bool(evalInt("test_symmetric", 0, 0)); const bool minBandWidth = bool(evalInt("test_bandwidth", 0, 0)) && evalInt("bandwidth", 0, 0) > 2; const bool testClosedSurface = bool(evalInt("test_surface", 0, 0)); const bool testNoActiveTiles = bool(evalInt("test_activetiles", 0, 0)); const bool basicLevelSetChecks = testFinite && !testUniformBackground && !testInRange && testUniformVoxelSize && testNoActiveTiles; // fog volume tests const bool basicFogVolumeChecks = testFinite && !testInRange && bool(evalInt("test_backgroundzero", 0, 0)) && bool(evalInt("test_fogvalues", 0, 0)); { // Validate fog volume operations setInt("verify_fogvolume", 0, 0, int(basicFogVolumeChecks)); } { // Validate level set CSG tests bool isValid = basicLevelSetChecks && testClosedSurface && testSymmetricNarrowBand; setInt("verify_csg", 0, 0, int(isValid)); } { // Validate level set filtering tests bool isValid = basicLevelSetChecks && testSymmetricNarrowBand && minBandWidth; setInt("verify_filtering", 0, 0, int(isValid)); } { // Validate level set advection tests bool isValid = basicLevelSetChecks && testClosedSurface && testSymmetricNarrowBand && minBandWidth; setInt("verify_advection", 0, 0, int(isValid)); } return 1; } int selectOperationTestsCB(void*, int, float, const PRM_Template*); int validateOperationTestsCB(void*, int, float, const PRM_Template*); int selectOperationTestsCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Diagnostics* sop = static_cast<SOP_OpenVDB_Diagnostics*>(data); if (sop == nullptr) return 0; return sop->selectOperationTests(); } int validateOperationTestsCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Diagnostics* sop = static_cast<SOP_OpenVDB_Diagnostics*>(data); if (sop == nullptr) return 0; return sop->validateOperationTests(); } // Hack to work around lack of grid layout in parameter pane // (one space character is four pixels wide, but the middle column is centered) inline std::string spacing(int widthInPixels) { return std::string(widthInPixels >> 1, ' '); // 2 * width / 4 } void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenu) .setTooltip("Specify a subset of the input VDBs to examine.") .setDocumentation( "A subset of the input VDBs to be examined" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usemask", "Mark in Mask VDB") .setTooltip( "For tests set to Mark, output a mask VDB that highlights\n" "problematic regions in input VDBs.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usepoints", "Mark as Points With Values") .setDefault(PRMoneDefaults) .setTooltip( "For tests set to Mark, output a point cloud that highlights\n" "problematic regions in input VDBs.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "respectclass", "Respect VDB Class") .setDefault(PRMoneDefaults) .setTooltip( "If disabled, apply fog volume and level set tests to all VDBs,\n" "not just VDBs classified as fog volumes or level sets.")); ////////// // Operation parms.add(hutil::ParmFactory(PRM_SEPARATOR,"operation", "")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verify_fogvolume", "Validate Fog Volumes") .setCallbackFunc(&selectOperationTestsCB) .setTooltip("Verify that VDBs classified as fog volumes are valid fog volumes.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verify_csg", "Validate for Level Set CSG and Fracture") .setCallbackFunc(&selectOperationTestsCB) .setTooltip( "Verify that level set VDBs meet the requirements\n" "for CSG and fracture operations.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verify_filtering", "Validate for Level Set Filtering and Renormalization") .setCallbackFunc(&selectOperationTestsCB) .setTooltip( "Verify that level set VDBs meet the requirements\n" "for filtering and renormalization.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "verify_advection", "Validate for Level Set Advection and Morphing") .setCallbackFunc(&selectOperationTestsCB) .setTooltip( "Verify that level set VDBs meet the requirements\n" "for advection and morphing.")); ////////// // General parms.add(hutil::ParmFactory(PRM_HEADING, "general", "General Tests") .setDocumentation( "In the following, enable __Mark__ to add incorrect values" " to the output mask and/or point cloud, and enable __Fix__" " to replace incorrect values.")); // { Finite values parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_finite", "Finite Values" + spacing(35) ) .setCallbackFunc(&validateOperationTestsCB) .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip("Verify that all values are finite and non-NaN.") .setDocumentation( "Verify that all values are finite and non-NaN.\n\n" "If __Fix__ is enabled, replace incorrect values with the background value.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_finite", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_finite", "Fix") .setTooltip("Replace incorrect values with the background value.") .setDocumentation(nullptr)); // } // { Uniform background values parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_background", "Uniform Background" ) .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip("Verify that all inactive voxels are set to the background value.") .setDocumentation( "Verify that all inactive voxels are set to the background value.\n\n" "If __Fix__ is enabled, replace incorrect values with the background value.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_background", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_background", "Fix") .setTooltip("Replace incorrect values with the background value.") .setDocumentation(nullptr)); // } // { Values in range parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_valrange", "Values in Range" + spacing(23) ) .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip( "Verify that all scalar voxel values and vector magnitudes\n" "are in the given range.") .setDocumentation( "Verify that all scalar voxel values and vector magnitudes are in the given range.\n\n" "If __Fix__ is enabled, clamp values and vector magnitudes to the given range.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_valrange", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_valrange", "Fix") .setTooltip("Clamp values and vector magnitudes to the given range.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_LABEL | PRM_TYPE_JOIN_NEXT, "label_valrange", "")); std::vector<fpreal> defaultRange; defaultRange.push_back(fpreal(0.0)); defaultRange.push_back(fpreal(1.0)); parms.add(hutil::ParmFactory(PRM_FLT_J, "valrange", "Range") .setDefault(defaultRange) .setVectorSize(2) .setTooltip("Minimum and maximum allowed values (inclusive)")); // } ////////// // Level Set parms.add(hutil::ParmFactory(PRM_HEADING, "ls_heading", "Level Set Tests")); // { Symmetric Narrow Band parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_symmetric", "Symmetric Narrow Band") .setCallbackFunc(&validateOperationTestsCB) .setTooltip("Verify that level set inside and outside values are of equal magnitude.")); // } // { Min Band Width parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_bandwidth", "Minimum Band Width") .setCallbackFunc(&validateOperationTestsCB) .setTooltip( "Verify that interior and exterior narrow band widths" " are sufficiently large.")); parms.add(hutil::ParmFactory(PRM_LABEL | PRM_TYPE_JOIN_NEXT, "label_bandwidth", "")); parms.add(hutil::ParmFactory(PRM_INT_J, "bandwidth", "Minimum Width in Voxels") .setCallbackFunc(&validateOperationTestsCB) .setDefault(3)); // } // { Closed Surface parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_surface", "Closed Surface") .setCallbackFunc(&validateOperationTestsCB) .setTooltip("Verify that level sets represent watertight surfaces.")); // } // { Gradient magnitude parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_gradient", "Gradient Magnitude" + spacing(7) ) .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip( "Verify that the level set gradient has magnitude one everywhere\n" "(within a given tolerance).") .setDocumentation( "Verify that the level set gradient has magnitude one everywhere" " (within a given tolerance).\n\n" "If __Fix__ is enabled, renormalize level sets.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_gradient", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_gradient", "Fix") .setTooltip("Renormalize level sets.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_LABEL | PRM_TYPE_JOIN_NEXT, "label_gradient", "")); parms.add(hutil::ParmFactory(PRM_FLT_J, "gradienttolerance", "Tolerance") .setDefault(0.2f) .setDocumentation(nullptr)); // } // { Inactive Tiles parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_activetiles", "Inactive Tiles" + spacing(36) ) .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip("Verify that level sets have no active tiles.") .setDocumentation( "Verify that level sets have no active tiles.\n\n" "If __Fix__ is enabled, deactivate all tiles.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_activetiles", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_activetiles", "Fix") .setTooltip("Deactivate all tiles.") .setDocumentation(nullptr)); // } // { Uniform Voxel Size parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_voxelsize", "Uniform Voxel Size") .setTooltip("Verify that level sets have uniform voxel sizes.")); // } ////////// // Fog Volume parms.add(hutil::ParmFactory(PRM_HEADING, "fog_heading", "Fog Volume Tests") .setTooltip("Fog Volume specific tests.")); // { Background values parms.add(hutil::ParmFactory(PRM_TOGGLE, "test_backgroundzero", "Background Zero" + spacing(17) ) .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip("Verify that all inactive voxels in fog volumes have value zero.") .setDocumentation( "Verify that all inactive voxels in fog volumes have value zero.\n\n" "If __Fix__ is enabled, set inactive voxels to zero.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_backgroundzero", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_backgroundzero", "Fix") .setTooltip("Set inactive voxels to zero.") .setDocumentation(nullptr)); // } // { Active values parms.add(hutil::ParmFactory(PRM_TOGGLE, // Note: this label currently determines the spacing of the second column of toggles. "test_fogvalues", "Active Values in [0, 1]") .setCallbackFunc(&validateOperationTestsCB) .setTypeExtended(PRM_TYPE_MENU_JOIN) .setTooltip( "Verify that all active voxels in fog volumes\n" "have values in the range [0, 1].") .setDocumentation( "Verify that all active voxels in fog volumes have values in the range" " &#91;0, 1&#93;.\n\n" // "[0, 1]" "If __Fix__ is enabled, clamp active voxels to the range &#91;0, 1&#93;.")); parms.add(hutil::ParmFactory(PRM_TOGGLE | PRM_TYPE_JOIN_NEXT, "id_fogvalues", "Mark") .setTooltip("Add incorrect values to the output mask and/or point cloud.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "fix_fogvalues", "Fix") .setTooltip("Clamp active values to the range [0, 1].") .setDocumentation(nullptr)); // } hvdb::OpenVDBOpFactory("VDB Diagnostics", SOP_OpenVDB_Diagnostics::factory, parms, *table) .addInput("VDB Volumes") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Diagnostics::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Examine VDB volumes for bad values.\"\"\"\n\ \n\ @overview\n\ \n\ This node runs a suite of tests to validate and correct common errors in VDB volumes.\n\ It provides the option to output either a mask VDB or a point cloud that identifies\n\ the troublesome voxels, and it is optionally able to correct most types of errors.\n\ \n\ @related\n\ - [Node:sop/vdbdiagnostics]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Diagnostics::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Diagnostics(net, name, op); } SOP_OpenVDB_Diagnostics::SOP_OpenVDB_Diagnostics(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } bool SOP_OpenVDB_Diagnostics::updateParmsFlags() { bool changed = false; const bool identify = bool(evalInt("usemask", 0, 0)) || bool(evalInt("usepoints", 0, 0)); // general const bool testFinite = bool(evalInt("test_finite", 0, 0)); changed |= enableParm("id_finite", identify && testFinite); changed |= enableParm("fix_finite", testFinite); const bool testUniformBackground = bool(evalInt("test_background", 0, 0)); changed |= enableParm("id_background", identify && testUniformBackground); changed |= enableParm("fix_background", testUniformBackground); const bool testInRange = bool(evalInt("test_valrange", 0, 0)); changed |= enableParm("id_valrange", identify && testInRange); setVisibleState("label_valrange", testInRange); setVisibleState("valrange", testInRange); changed |= enableParm("fix_valrange", testInRange); // level set setVisibleState("label_bandwidth", evalInt("test_bandwidth", 0, 0)); setVisibleState("bandwidth", evalInt("test_bandwidth", 0, 0)); const bool testGradientMagnitude = bool(evalInt("test_gradient", 0, 0)); changed |= enableParm("id_gradient", identify && testGradientMagnitude); setVisibleState("label_gradient", testGradientMagnitude); setVisibleState("gradienttolerance", testGradientMagnitude); changed |= enableParm("fix_gradient", testGradientMagnitude); const bool testNoActiveTiles = bool(evalInt("test_activetiles", 0, 0)); changed |= enableParm("id_activetiles", identify && testNoActiveTiles); changed |= enableParm("fix_activetiles", testNoActiveTiles); // fog volume const bool testBackgroundZero = bool(evalInt("test_backgroundzero", 0, 0)); changed |= enableParm("id_backgroundzero", identify && testBackgroundZero); changed |= enableParm("fix_backgroundzero", testBackgroundZero); const bool testActiveValuesFromZeroToOne = bool(evalInt("test_fogvalues", 0, 0)); changed |= enableParm("id_fogvalues", identify && testActiveValuesFromZeroToOne); changed |= enableParm("fix_fogvalues", testActiveValuesFromZeroToOne); return changed; } TestData SOP_OpenVDB_Diagnostics::Cache::getTestData(const fpreal time) const { TestData test; test.useMask = bool(evalInt("usemask", 0, time)); test.usePoints = bool(evalInt("usepoints", 0, time)); test.respectGridClass = bool(evalInt("respectclass", 0, time)); const bool identify = test.useMask || test.usePoints; // general test.testFinite = bool(evalInt("test_finite", 0, time)); test.idFinite = identify && bool(evalInt("id_finite", 0, time)); test.fixFinite = bool(evalInt("fix_finite", 0, time)); test.testUniformBackground = bool(evalInt("test_background", 0, time)); test.idUniformBackground = identify && bool(evalInt("id_background", 0, time)); test.fixUniformBackground = bool(evalInt("fix_background", 0, time)); test.testInRange = bool(evalInt("test_valrange", 0, time)); test.idInRange = identify && bool(evalInt("id_valrange", 0, time)); test.fixInRange = bool(evalInt("fix_valrange", 0, time)); test.rangeMin = float(evalFloat("valrange", 0, time)); test.rangeMax = float(evalFloat("valrange", 1, time)); // level set test.testSymmetricNarrowBand = bool(evalInt("test_symmetric", 0, time)); test.testMinimumBandWidth = bool(evalInt("test_bandwidth", 0, time)); test.minBandWidth = float(evalInt("bandwidth", 0, time)); test.testClosedSurface = bool(evalInt("test_surface", 0, time)); test.testGradientMagnitude = bool(evalInt("test_gradient", 0, time)); test.idGradientMagnitude = identify && bool(evalInt("id_gradient", 0, time)); test.fixGradientMagnitude = bool(evalInt("fix_gradient", 0, time)); test.gradientTolerance = float(evalFloat("gradienttolerance", 0, time)); test.testNoActiveTiles = bool(evalInt("test_activetiles", 0, time)); test.idNoActiveTiles = identify && bool(evalInt("id_activetiles", 0, time)); test.fixNoActiveTiles = bool(evalInt("fix_activetiles", 0, time)); test.testUniformVoxelSize = bool(evalInt("test_voxelsize", 0, time)); // fog volume test.testBackgroundZero = bool(evalInt("test_backgroundzero", 0, time)); test.idBackgroundZero = identify && bool(evalInt("id_backgroundzero", 0, time)); test.fixBackgroundZero = bool(evalInt("fix_backgroundzero", 0, time)); test.testActiveValuesFromZeroToOne = bool(evalInt("test_fogvalues", 0, time)); test.idActiveValuesFromZeroToOne = identify && bool(evalInt("id_fogvalues", 0, time)); test.fixActiveValuesFromZeroToOne = bool(evalInt("fix_fogvalues", 0, time)); return test; } OP_ERROR SOP_OpenVDB_Diagnostics::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Performing diagnostics"); TestCollection tests(getTestData(time), *gdp, boss, UTgetErrorManager()); const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); size_t vdbPrimCount = 0; for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) break; tests.setPrimitiveName(it.getPrimitiveName().toStdString()); tests.setPrimitiveIndex(int(it.getIndex())); hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, tests, /*makeUnique=*/false); if (tests.replacementGrid()) { hvdb::replaceVdbPrimitive(*gdp, tests.replacementGrid(), **it, true, tests.replacementGrid()->getName().c_str()); } ++vdbPrimCount; } if (vdbPrimCount == 0) { addWarning(SOP_MESSAGE, "Did not find any VDBs to diagnose."); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
67,089
C++
33.212137
100
0.598638
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_NodeVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_NodeVDB.h /// @author FX R&D OpenVDB team /// @brief Base class for OpenVDB plugins #ifndef OPENVDB_HOUDINI_SOP_NODEVDB_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_SOP_NODEVDB_HAS_BEEN_INCLUDED #include <houdini_utils/ParmFactory.h> #include <openvdb/openvdb.h> #include <openvdb/Platform.h> #include <SOP/SOP_Node.h> #ifndef SESI_OPENVDB #include <UT/UT_DSOVersion.h> #endif #include "SOP_VDBVerbUtils.h" #include <iosfwd> #include <string> class GU_Detail; namespace openvdb_houdini { /// @brief Use this class to register a new OpenVDB operator (SOP, POP, etc.) /// @details This class ensures that the operator uses the appropriate OpPolicy. /// @sa houdini_utils::OpFactory, houdini_utils::OpPolicy class OPENVDB_HOUDINI_API OpenVDBOpFactory: public houdini_utils::OpFactory { public: /// Construct an OpFactory that on destruction registers a new OpenVDB operator type. OpenVDBOpFactory(const std::string& english, OP_Constructor, houdini_utils::ParmList&, OP_OperatorTable&, houdini_utils::OpFactory::OpFlavor = SOP); /// @brief Set the name of the equivalent native operator as shipped with Houdini. /// @details This is only needed where the native name policy doesn't provide the correct name. /// Pass an empty string to indicate that there is no equivalent native operator. OpenVDBOpFactory& setNativeName(const std::string& name); private: std::string mNativeName; }; //////////////////////////////////////// /// @brief Base class from which to derive OpenVDB-related Houdini SOPs class OPENVDB_HOUDINI_API SOP_NodeVDB: public SOP_Node { public: SOP_NodeVDB(OP_Network*, const char*, OP_Operator*); ~SOP_NodeVDB() override = default; void fillInfoTreeNodeSpecific(UT_InfoTree&, const OP_NodeInfoTreeParms&) override; void getNodeSpecificInfoText(OP_Context&, OP_NodeInfoParms&) override; /// @brief Return this node's registered verb. const SOP_NodeVerb* cookVerb() const override; /// @brief Retrieve a group from a geometry detail by parsing a pattern /// (typically, the value of a Group parameter belonging to this node). /// @throw std::runtime_error if the pattern is nonempty but doesn't match any group. /// @todo This is a wrapper for SOP_Node::parsePrimitiveGroups(), so it needs access /// to a SOP_Node instance. But it probably doesn't need to be a SOP_NodeVDB method. /// @{ const GA_PrimitiveGroup* matchGroup(GU_Detail&, const std::string& pattern); const GA_PrimitiveGroup* matchGroup(const GU_Detail&, const std::string& pattern); /// @} /// @name Parameter evaluation /// @{ /// @brief Evaluate a vector-valued parameter. openvdb::Vec3f evalVec3f(const char* name, fpreal time) const; /// @brief Evaluate a vector-valued parameter. openvdb::Vec3R evalVec3R(const char* name, fpreal time) const; /// @brief Evaluate a vector-valued parameter. openvdb::Vec3i evalVec3i(const char* name, fpreal time) const; /// @brief Evaluate a vector-valued parameter. openvdb::Vec2R evalVec2R(const char* name, fpreal time) const; /// @brief Evaluate a vector-valued parameter. openvdb::Vec2i evalVec2i(const char* name, fpreal time) const; /// @brief Evaluate a string-valued parameter as an STL string. /// @details This method facilitates string parameter evaluation in expressions. /// For example, /// @code /// matchGroup(*gdp, evalStdString("group", time)); /// @endcode std::string evalStdString(const char* name, fpreal time, int index = 0) const; /// @} protected: /// @{ /// @brief To facilitate compilable SOPs, cookMySop() is now final. /// Instead, either override SOP_NodeVDB::cookVDBSop() (for a non-compilable SOP) /// or override SOP_VDBCacheOptions::cookVDBSop() (for a compilable SOP). OP_ERROR cookMySop(OP_Context&) override final; virtual OP_ERROR cookVDBSop(OP_Context&) { return UT_ERROR_NONE; } /// @} OP_ERROR cookMyGuide1(OP_Context&) override; //OP_ERROR cookMyGuide2(OP_Context&) override; /// @brief Transfer the value of an obsolete parameter that was renamed /// to the parameter with the new name. /// @details This convenience method is intended to be called from /// @c resolveObsoleteParms(), when that function is implemented. void resolveRenamedParm(PRM_ParmList& obsoleteParms, const char* oldName, const char* newName); /// @name Input stealing /// @{ /// @brief Steal the geometry on the specified input if possible, instead of copying the data. /// /// @details In certain cases where a node's input geometry isn't being shared with /// other nodes, it is safe for the node to directly modify the geometry. /// Normally, input geometry is shared with the upstream node's output cache, /// so for stealing to be possible, the "unload" flag must be set on the upstream node /// to inhibit caching. In addition, reference counting of GEO_PrimVDB shared pointers /// ensures we cannot steal data that is in use elsewhere. When stealing is not possible, /// this method falls back to copying the shared pointer, effectively performing /// a duplicateSource(). /// /// @param index the index of the input from which to perform this operation /// @param context the current SOP context is used for cook time for network traversal /// @param pgdp pointer to the SOP's gdp /// @param gdh handle to manage input locking /// @param clean (forwarded to duplicateSource()) /// /// @note Prior to Houdini 13.0, this method peforms a duplicateSource() and unlocks the /// inputs to the SOP. From Houdini 13.0 on, this method will insert the existing data /// into the detail and update the detail handle in the SOP. /// /// @warning No attempt to call duplicateSource() or inputGeo() should be made after /// calling this method, as there will be no data on the input stream if isSourceStealable() /// returns @c true. /// @deprecated verbification renders this redundant [[deprecated]] OP_ERROR duplicateSourceStealable(const unsigned index, OP_Context& context, GU_Detail **pgdp, GU_DetailHandle& gdh, bool clean = true); /// @brief Steal the geometry on the specified input if possible, instead of copying the data. /// /// @details In certain cases where a node's input geometry isn't being shared with /// other nodes, it is safe for the node to directly modify the geometry. /// Normally, input geometry is shared with the upstream node's output cache, /// so for stealing to be possible, the "unload" flag must be set on the upstream node /// to inhibit caching. In addition, reference counting of GEO_PrimVDB shared pointers /// ensures we cannot steal data that is in use elsewhere. When stealing is not possible, /// this method falls back to copying the shared pointer, effectively performing /// a duplicateSource(). /// /// @note Prior to Houdini 13.0, this method peforms a duplicateSource() and unlocks the /// inputs to the SOP. From Houdini 13.0 on, this method will insert the existing data /// into the detail and update the detail handle in the SOP. /// /// @param index the index of the input from which to perform this operation /// @param context the current SOP context is used for cook time for network traversal /// @deprecated verbification renders this redundant [[deprecated]] OP_ERROR duplicateSourceStealable(const unsigned index, OP_Context& context); /// @} private: /// @brief Traverse the upstream network to determine if the source input can be stolen. /// /// An upstream SOP cannot be stolen if it is implicitly caching the data (no "unload" flag) /// or explictly caching the data (using a Cache SOP) /// /// The traversal ignores pass through nodes such as null SOPs and bypassing. /// /// @param index the index of the input from which to perform this operation /// @param context the current SOP context is used for cook time for network traversal /// @deprecated verbification renders this redundant [[deprecated]] bool isSourceStealable(const unsigned index, OP_Context& context) const; }; // class SOP_NodeVDB //////////////////////////////////////// /// @brief Namespace to hold functionality for registering info text callbacks. Whenever /// getNodeSpecificInfoText() is called, the default info text is added to MMB output unless /// a valid callback has been registered for the grid type. /// /// @details Use node_info_text::registerGridSpecificInfoText<> to register a grid type to /// a function pointer which matches the ApplyGridSpecificInfoText signature. /// /// void floatGridText(std::ostream&, const openvdb::GridBase&); /// /// node_info_text::registerGridSpecificInfoText<openvdb::FloatGrid>(&floatGridText); /// namespace node_info_text { // The function pointer signature expected when registering an grid type text // callback. The grid is passed untyped but is guaranteed to match the registered // type. using ApplyGridSpecificInfoText = void (*)(std::ostream&, const openvdb::GridBase&); /// @brief Register an info text callback to a specific grid type. /// @note Does not add the callback if the grid type already has a registered callback. /// @param gridType the grid type as a unique string (see templated /// registerGridSpecificInfoText<>) /// @param callback a pointer to the callback function to execute void registerGridSpecificInfoText(const std::string& gridType, ApplyGridSpecificInfoText callback); /// @brief Register an info text callback to a templated grid type. /// @note Does not add the callback if the grid type already has a registered callback. /// @param callback a pointer to the callback function to execute template<typename GridType> inline void registerGridSpecificInfoText(ApplyGridSpecificInfoText callback) { registerGridSpecificInfoText(GridType::gridType(), callback); } } // namespace node_info_text } // namespace openvdb_houdini #endif // OPENVDB_HOUDINI_SOP_NODEVDB_HAS_BEEN_INCLUDED
10,423
C
43.931034
99
0.702293
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Create.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Create.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/UT_VDBTools.h> // for GridTransformOp, et al. #include <openvdb_houdini/Utils.h> #include <UT/UT_Interrupt.h> #include <UT/UT_WorkArgs.h> #include <hboost/algorithm/string/case_conv.hpp> #include <hboost/algorithm/string/trim.hpp> #include <OBJ/OBJ_Camera.h> #include <cmath> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace cvdb = openvdb; //////////////////////////////////////// namespace { // Add new items to the *end* of this list, and update NUM_DATA_TYPES. enum DataType { TYPE_FLOAT = 0, TYPE_DOUBLE, TYPE_INT, TYPE_BOOL, TYPE_VEC3S, TYPE_VEC3D, TYPE_VEC3I }; enum { NUM_DATA_TYPES = TYPE_VEC3I + 1 }; std::string dataTypeToString(DataType dts) { std::string ret; switch (dts) { case TYPE_FLOAT: ret = "float"; break; case TYPE_DOUBLE: ret = "double"; break; case TYPE_INT: ret = "int"; break; case TYPE_BOOL: ret = "bool"; break; case TYPE_VEC3S: ret = "vec3s"; break; case TYPE_VEC3D: ret = "vec3d"; break; case TYPE_VEC3I: ret = "vec3i"; break; } return ret; } std::string dataTypeToMenuItems(DataType dts) { std::string ret; switch (dts) { case TYPE_FLOAT: ret = "float"; break; case TYPE_DOUBLE: ret = "double"; break; case TYPE_INT: ret = "int"; break; case TYPE_BOOL: ret = "bool"; break; case TYPE_VEC3S: ret = "vec3s (float)"; break; case TYPE_VEC3D: ret = "vec3d (double)"; break; case TYPE_VEC3I: ret = "vec3i (int)"; break; } return ret; } DataType stringToDataType(const std::string& s) { DataType ret = TYPE_FLOAT; std::string str = s; hboost::trim(str); hboost::to_lower(str); if (str == dataTypeToString(TYPE_FLOAT)) { ret = TYPE_FLOAT; } else if (str == dataTypeToString(TYPE_DOUBLE)) { ret = TYPE_DOUBLE; } else if (str == dataTypeToString(TYPE_INT)) { ret = TYPE_INT; } else if (str == dataTypeToString(TYPE_BOOL)) { ret = TYPE_BOOL; } else if (str == dataTypeToString(TYPE_VEC3S)) { ret = TYPE_VEC3S; } else if (str == dataTypeToString(TYPE_VEC3D)) { ret = TYPE_VEC3D; } else if (str == dataTypeToString(TYPE_VEC3I)) { ret = TYPE_VEC3I; } return ret; } } // unnamed namespace //////////////////////////////////////// class SOP_OpenVDB_Create : public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Create(OP_Network *net, const char *name, OP_Operator *op); ~SOP_OpenVDB_Create() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned) const override { return true; } int updateNearFar(float time); int updateFarPlane(float time); int updateNearPlane(float time); protected: OP_ERROR cookVDBSop(OP_Context&) override; bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; private: inline cvdb::Vec3i voxelToIndex(const cvdb::Vec3R& V) const { return cvdb::Vec3i(cvdb::Int32(V[0]), cvdb::Int32(V[1]), cvdb::Int32(V[2])); } template<typename GridType> void createNewGrid( const UT_String& gridNameStr, const typename GridType::ValueType& background, const cvdb::math::Transform::Ptr&, const cvdb::MaskGrid::ConstPtr& maskGrid = nullptr, GA_PrimitiveGroup* group = nullptr, int gridClass = 0, int vecType = -1); OP_ERROR buildTransform(OP_Context&, openvdb::math::Transform::Ptr&, const GU_PrimVDB*); const GU_PrimVDB* getReferenceVdb(OP_Context &context); cvdb::MaskGrid::Ptr createMaskGrid(const GU_PrimVDB*, const openvdb::math::Transform::Ptr&); bool mNeedsResampling; }; //////////////////////////////////////// // Callback functions that update the near and far parameters int updateNearFarCallback(void*, int, float, const PRM_Template*); int updateNearPlaneCallback(void*, int, float, const PRM_Template*); int updateFarPlaneCallback(void*, int, float, const PRM_Template*); int updateNearFarCallback(void* data, int /*idx*/, float time, const PRM_Template*) { SOP_OpenVDB_Create* sop = static_cast<SOP_OpenVDB_Create*>(data); if (sop == nullptr) return 0; return sop->updateNearFar(time); } int SOP_OpenVDB_Create::updateNearFar(float time) { const auto cameraPath = evalStdString("camera", time); if (cameraPath.empty()) return 1; OBJ_Node *camobj = findOBJNode(cameraPath.c_str()); if (!camobj) return 1; OBJ_Camera* cam = camobj->castToOBJCamera(); if (!cam) return 1; fpreal nearPlane = cam->getNEAR(time); fpreal farPlane = cam->getFAR(time); setFloat("nearPlane", 0, time, nearPlane); setFloat("farPlane", 0, time, farPlane); return 1; } int updateNearPlaneCallback(void* data, int /*idx*/, float time, const PRM_Template*) { SOP_OpenVDB_Create* sop = static_cast<SOP_OpenVDB_Create*>(data); if (sop == nullptr) return 0; return sop->updateNearPlane(time); } int SOP_OpenVDB_Create::updateNearPlane(float time) { fpreal nearPlane = evalFloat("nearPlane", 0, time), farPlane = evalFloat("farPlane", 0, time), voxelDepthSize = evalFloat("voxelDepthSize", 0, time); if (!(voxelDepthSize > 0.0)) voxelDepthSize = 1e-6; farPlane -= voxelDepthSize; if (farPlane < nearPlane) { setFloat("nearPlane", 0, time, farPlane); } return 1; } int updateFarPlaneCallback(void* data, int /*idx*/, float time, const PRM_Template*) { SOP_OpenVDB_Create* sop = static_cast<SOP_OpenVDB_Create*>(data); if (sop == nullptr) return 0; return sop->updateFarPlane(time); } int SOP_OpenVDB_Create::updateFarPlane(float time) { fpreal nearPlane = evalFloat("nearPlane", 0, time), farPlane = evalFloat("farPlane", 0, time), voxelDepthSize = evalFloat("voxelDepthSize", 0, time); if (!(voxelDepthSize > 0.0)) voxelDepthSize = 1e-6; nearPlane += voxelDepthSize; if (farPlane < nearPlane) { setFloat("farPlane", 0, time, nearPlane); } return 1; } //////////////////////////////////////// void newSopOperator(OP_OperatorTable *table) { if (table == nullptr) return; hutil::ParmList parms; // Group name parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setTooltip("Specify a name for this group of VDBs.")); parms.add(hutil::ParmFactory(PRM_SEPARATOR,"sep1", "Sep")); // Transform type parms.add(hutil::ParmFactory(PRM_ORD | PRM_TYPE_JOIN_NEXT, "transform", "Transform") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "linear", "Linear", "frustum", "Frustum", "refVDB", "Reference VDB" }) .setTooltip( "The type of transform to assign to each VDB\n\n" "Linear:\n" " Rotation and scale only\n" "Frustum:\n" " Perspective projection, with focal length and near and far planes" " from a given camera\n" "Reference VDB:\n" " Match the transform of an input VDB.")); // Toggle to preview the frustum parms.add(hutil::ParmFactory(PRM_TOGGLE, "previewFrustum", "Preview") .setDefault(PRMoneDefaults) .setTooltip("Generate geometry indicating the bounds of the camera frustum.") .setDocumentation( "For a frustum transform, generate geometry indicating" " the bounds of the camera frustum.")); // Uniform voxel size parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelSize", "Voxel Size") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setTooltip("The size (length of a side) of a cubic voxel in world units") .setTooltip( "For non-frustum transforms, the size (length of a side)" " of a cubic voxel in world units")); // Rotation parms.add(hutil::ParmFactory(PRM_XYZ_J, "rotation", "Rotation") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("Rotation specified in ZYX order")); // Frustum settings // { parms.add(hutil::ParmFactory(PRM_STRING, "camera", "Camera") .setTypeExtended(PRM_TYPE_DYNAMIC_PATH) .setCallbackFunc(&updateNearFarCallback) .setSpareData(&PRM_SpareData::objCameraPath) .setTooltip("The path to the reference camera object (e.g., \"/obj/cam1\")") .setDocumentation( "For a frustum transform, the path to the reference camera object" " (for example, `/obj/cam1`)")); parms.add(hutil::ParmFactory(PRM_FLT_J | PRM_TYPE_JOIN_NEXT, "nearPlane", "Near/Far Planes") .setDefault(PRMzeroDefaults) .setCallbackFunc(&updateNearPlaneCallback) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 20) .setTooltip("The near and far plane distances in world units") .setDocumentation( "The near and far plane distances in world units\n\n" "The near plane distance should always be <= `farPlane` &minus; `voxelDepthSize`,\n" "and the far plane distance should always be => `nearPlane` + `voxelDepthSize`.")); parms.add(hutil::ParmFactory( PRM_FLT_J | PRM_Type(PRM_Type::PRM_INTERFACE_LABEL_NONE), "farPlane", "") .setDefault(PRMoneDefaults) .setCallbackFunc(&updateFarPlaneCallback) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 20) .setTooltip("Far plane distance, should always be >= nearPlane + voxelDepthSize") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_INT_J, "voxelCount", "Voxel Count") .setDefault(PRM100Defaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 200) .setTooltip("The desired width of the near plane in voxels")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelDepthSize", "Voxel Depth") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setTooltip("The z dimension of a voxel in world units (all voxels have the same depth)") .setTooltip( "For a frustum transform, the z dimension of a voxel" " in world units (all voxels have the same depth)")); parms.add(hutil::ParmFactory(PRM_FLT_J, "cameraOffset", "Camera Offset") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_FREE, 20.0) .setTooltip( "Add padding to the frustum without changing the near and far plane positions.\n\n" "The camera position is offset in the direction opposite the view.")); // } // Matching settings parms.add(hutil::ParmFactory(PRM_STRING, "reference", "Reference") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("The VDB to be used as a reference") .setDocumentation( "A VDB from the second input to be used as reference" " (see [specifying volumes|/model/volumes#group])\n\n" "If multiple VDBs are selected, only the first one will be used.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useVoxelSize", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDefault(PRMzeroDefaults) .setTooltip( "If enabled, use the given voxel size, otherwise" " match the voxel size of the reference VDB.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelSizeRef", "Voxel Size") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setTooltip("The size (length of a side) of a cubic voxel in world units") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "matchTopology", "Match Topology") .setDefault(PRMoneDefaults) .setTooltip("Match the voxel topology of the reference VDB.")); // Grids Heading parms.add(hutil::ParmFactory(PRM_HEADING, "gridsHeading", "")); // Dynamic grid menu hutil::ParmList gridParms; { { // Grid class menu std::vector<std::string> items; for (int i = 0; i < openvdb::NUM_GRID_CLASSES; ++i) { openvdb::GridClass cls = openvdb::GridClass(i); items.push_back(openvdb::GridBase::gridClassToString(cls)); // token items.push_back(openvdb::GridBase::gridClassToMenuName(cls)); // label } gridParms.add(hutil::ParmFactory(PRM_STRING, "gridClass#", "Class") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("Specify how voxel values should be interpreted.") .setDocumentation("\ How voxel values should be interpreted\n\ \n\ Fog Volume:\n\ The volume represents a density field. Values should be positive,\n\ with zero representing empty regions.\n\ Level Set:\n\ The volume is treated as a narrow-band signed distance field level set.\n\ The voxels within a certain distance&mdash;the \"narrow band width\"&mdash;of\n\ an isosurface are expected to define positive (exterior) and negative (interior)\n\ distances to the surface. Outside the narrow band, the distance value\n\ is constant and equal to the band width.\n\ Staggered Vector Field:\n\ If the volume is vector-valued, the _x_, _y_ and _z_ vector components\n\ are to be treated as lying on the respective faces of voxels,\n\ not at their centers.\n\ Other:\n\ No special meaning is assigned to the volume's data.\n")); } { // Element type menu std::vector<std::string> items; for (int i = 0; i < NUM_DATA_TYPES; ++i) { items.push_back(dataTypeToString(DataType(i))); // token items.push_back(dataTypeToMenuItems(DataType(i))); // label } gridParms.add(hutil::ParmFactory(PRM_STRING, "elementType#", "Type") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("The type of value stored at each voxel") .setDocumentation( "The type of value stored at each voxel\n\n" "VDB volumes are able to store vector values, unlike Houdini volumes,\n" "which require one scalar volume for each vector component.")); } // Optional grid name string gridParms.add(hutil::ParmFactory(PRM_STRING, "gridName#", "Name") .setTooltip("A name for this VDB") .setDocumentation("A value for the `name` attribute of this VDB primitive")); // Default background values // { const char* bgHelpStr = "The \"default\" value for any voxel not explicitly set"; gridParms.add(hutil::ParmFactory(PRM_FLT_J, "bgFloat#", "Background Value") .setTooltip(bgHelpStr) .setDocumentation(bgHelpStr)); gridParms.add(hutil::ParmFactory(PRM_INT_J, "bgInt#", "Background Value") .setDefault(PRMoneDefaults) .setTooltip(bgHelpStr) .setDocumentation(nullptr)); gridParms.add(hutil::ParmFactory(PRM_INT_J, "bgBool#", "Background Value") .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_RESTRICTED, 1) .setDefault(PRMoneDefaults) .setTooltip(bgHelpStr) .setDocumentation(nullptr)); gridParms.add(hutil::ParmFactory(PRM_FLT_J, "bgVec3f#", "Background Value") .setVectorSize(3) .setTooltip(bgHelpStr) .setDocumentation(nullptr)); gridParms.add(hutil::ParmFactory(PRM_INT_J, "bgVec3i#", "Background Value") .setVectorSize(3) .setTooltip(bgHelpStr) .setDocumentation(nullptr)); gridParms.add(hutil::ParmFactory(PRM_FLT_J, "width#", "Half-Band Width") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1.0, PRM_RANGE_UI, 10) .setTooltip( "Half the width of the narrow band, in voxels\n\n" "(Many level set operations require this to be a minimum of three voxels.)")); // } // Vec type menu { std::string help = "For vector-valued VDBs, specify an interpretation of the vectors" " that determines how they are affected by transforms.\n"; std::vector<std::string> items; for (int i = 0; i < openvdb::NUM_VEC_TYPES ; ++i) { const auto vectype = static_cast<openvdb::VecType>(i); items.push_back(openvdb::GridBase::vecTypeToString(vectype)); items.push_back(openvdb::GridBase::vecTypeExamples(vectype)); help += "\n" + openvdb::GridBase::vecTypeExamples(vectype) + "\n " + openvdb::GridBase::vecTypeDescription(vectype) + "."; } gridParms.add(hutil::ParmFactory(PRM_ORD, "vecType#", "Vector Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip(help.c_str())); } } parms.add(hutil::ParmFactory(PRM_MULTITYPE_LIST, "gridList", "VDBs") .setMultiparms(gridParms) .setDefault(PRMoneDefaults)); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "propertiesHeading", "Shared Grid Properties")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "frustumHeading", "Frustum Grid Settings")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "padding", "Padding")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "matchVoxelSize", "Match Voxel Size")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Create", SOP_OpenVDB_Create::factory, parms, *table) .setObsoleteParms(obsoleteParms) .addOptionalInput("Optional Input to Merge With") .addOptionalInput("Optional Reference VDB") .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Create one or more empty VDB volume primitives.\"\"\"\n\ \n\ @overview\n\ \n\ [Include:volume_types]\n\ \n\ @related\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [OpenVDB From Polygons|Node:sop/DW_OpenVDBFromPolygons]\n\ - [OpenVDB Metadata|Node:sop/DW_OpenVDBMetadata]\n\ - [Node:sop/vdb]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node * SOP_OpenVDB_Create::factory(OP_Network *net, const char *name, OP_Operator *op) { return new SOP_OpenVDB_Create(net, name, op); } SOP_OpenVDB_Create::SOP_OpenVDB_Create(OP_Network *net, const char *name, OP_Operator *op) : hvdb::SOP_NodeVDB(net, name, op) , mNeedsResampling(false) { } //////////////////////////////////////// bool SOP_OpenVDB_Create::updateParmsFlags() { bool changed = false; UT_String tmpStr; const auto transformParm = evalInt("transform", 0, 0); const bool linear = (transformParm == 0); const bool frustum = (transformParm == 1); const bool matching = (transformParm == 2); for (int i = 1, N = static_cast<int>(evalInt("gridList", 0, 0)); i <= N; ++i) { evalStringInst("gridClass#", &i, tmpStr, 0, 0); openvdb::GridClass gridClass = openvdb::GridBase::stringToGridClass(tmpStr.toStdString()); evalStringInst("elementType#", &i, tmpStr, 0, 0); DataType eType = stringToDataType(tmpStr.toStdString()); bool isLevelSet = false; // Force a specific data type for some of the grid classes if (gridClass == openvdb::GRID_LEVEL_SET) { eType = TYPE_FLOAT; isLevelSet = true; } else if (gridClass == openvdb::GRID_FOG_VOLUME) { eType = TYPE_FLOAT; } else if (gridClass == openvdb::GRID_STAGGERED) { eType = TYPE_VEC3S; } /// Disbale unused bg value options changed |= enableParmInst("bgFloat#", &i, !isLevelSet && (eType == TYPE_FLOAT || eType == TYPE_DOUBLE)); changed |= enableParmInst("width#", &i, isLevelSet); changed |= enableParmInst("bgInt#", &i, eType == TYPE_INT || eType == TYPE_BOOL); changed |= enableParmInst("bgVec3f#", &i, eType == TYPE_VEC3S || eType == TYPE_VEC3D); changed |= enableParmInst("bgVec3i#", &i, eType == TYPE_VEC3I); changed |= enableParmInst("vecType#", &i, eType >= TYPE_VEC3S); // Hide unused bg value options. changed |= setVisibleStateInst("bgFloat#", &i, !isLevelSet && (eType == TYPE_FLOAT || eType == TYPE_DOUBLE)); changed |= setVisibleStateInst("width#", &i, isLevelSet); changed |= setVisibleStateInst("bgInt#", &i, eType == TYPE_INT); changed |= setVisibleStateInst("bgBool#", &i, eType == TYPE_BOOL); changed |= setVisibleStateInst("bgVec3f#", &i, eType == TYPE_VEC3S || eType == TYPE_VEC3D); changed |= setVisibleStateInst("bgVec3i#", &i, eType == TYPE_VEC3I); changed |= setVisibleStateInst("vecType#", &i, eType >= TYPE_VEC3S); // Enable different data types changed |= enableParmInst("elementType#", &i, gridClass == openvdb::GRID_UNKNOWN); changed |= setVisibleStateInst("elementType#", &i, gridClass == openvdb::GRID_UNKNOWN); } // linear transform and voxel size changed |= enableParm("voxelSize", linear); changed |= enableParm("rotation", linear); changed |= setVisibleState("voxelSize", linear); changed |= setVisibleState("rotation", linear); // frustum transform const auto cameraPath = evalStdString("camera", 0); const bool enableFrustumSettings = (!cameraPath.empty() && findOBJNode(cameraPath.c_str())); changed |= enableParm("camera", frustum); changed |= enableParm("voxelCount", frustum & enableFrustumSettings); changed |= enableParm("voxelDepthSize", frustum & enableFrustumSettings); changed |= enableParm("offset", frustum & enableFrustumSettings); changed |= enableParm("nearPlane", frustum & enableFrustumSettings); changed |= enableParm("farPlane", frustum & enableFrustumSettings); changed |= enableParm("cameraOffset", frustum & enableFrustumSettings); changed |= enableParm("previewFrustum", frustum & enableFrustumSettings); changed |= setVisibleState("camera", frustum); changed |= setVisibleState("voxelCount", frustum); changed |= setVisibleState("voxelDepthSize", frustum); changed |= setVisibleState("offset", frustum); changed |= setVisibleState("nearPlane", frustum); changed |= setVisibleState("farPlane", frustum); changed |= setVisibleState("cameraOffset", frustum); changed |= setVisibleState("previewFrustum", frustum); // matching const bool useVoxelSize = evalInt("useVoxelSize", 0, 0); changed |= enableParm("reference", matching); changed |= enableParm("useVoxelSize", matching); changed |= enableParm("voxelSizeRef", matching && useVoxelSize); changed |= enableParm("matchTopology", matching); changed |= setVisibleState("reference", matching); changed |= setVisibleState("useVoxelSize", matching); changed |= setVisibleState("voxelSizeRef", matching); changed |= setVisibleState("matchTopology", matching); changed |= setVisibleState("matchTopologyPlaceholder", false); return changed; } void SOP_OpenVDB_Create::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; PRM_Parm* parm = obsoleteParms->getParmPtr("matchVoxelSize"); if (parm && !parm->isFactoryDefault()) { const bool matchVoxelSize = obsoleteParms->evalInt("matchVoxelSize", 0, /*time=*/0.0); setInt("useVoxelSize", 0, 0.0, !matchVoxelSize); } // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// template<typename GridType> void SOP_OpenVDB_Create::createNewGrid( const UT_String& gridNameStr, const typename GridType::ValueType& background, const cvdb::math::Transform::Ptr& transform, const cvdb::MaskGrid::ConstPtr& maskGrid, GA_PrimitiveGroup* group, int gridClass, int vecType) { using Tree = typename GridType::TreeType; // Create a grid of a pre-registered type and assign it a transform. hvdb::GridPtr newGrid; if (maskGrid) { newGrid = GridType::create( typename Tree::Ptr(new Tree(maskGrid->tree(), background, cvdb::TopologyCopy()))); } else { newGrid = GridType::create(background); } newGrid->setTransform(transform); newGrid->setGridClass(openvdb::GridClass(gridClass)); if (vecType != -1) newGrid->setVectorType(openvdb::VecType(vecType)); // Store the grid in a new VDB primitive and add the primitive // to the output geometry detail. GEO_PrimVDB* vdb = hvdb::createVdbPrimitive(*gdp, newGrid, gridNameStr.toStdString().c_str()); // Add the primitive to the group. if (group) group->add(vdb); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Create::cookVDBSop(OP_Context &context) { try { hutil::ScopedInputLock lock(*this, context); gdp->clearAndDestroy(); if (getInput(0)) duplicateSource(0, context); fpreal time = context.getTime(); // Create a group for the grid primitives. const auto groupStr = evalStdString("group", time); GA_PrimitiveGroup* group = (groupStr.empty() ? nullptr : gdp->newPrimitiveGroup(groupStr.c_str())); // Get reference VDB, if exists const bool matchTransfom = (evalInt("transform", 0, time) == 2); const GU_PrimVDB* refVdb = (matchTransfom ? getReferenceVdb(context) : nullptr); // Create a shared transform cvdb::math::Transform::Ptr transform; if (buildTransform(context, transform, refVdb) >= UT_ERROR_ABORT) return error(); cvdb::MaskGrid::Ptr maskGrid; const bool matchTopology = evalInt("matchTopology", 0, time); if (matchTransfom && matchTopology) maskGrid = createMaskGrid(refVdb, transform); // Create the grids UT_String gridNameStr, tmpStr; for (int i = 1, N = static_cast<int>(evalInt("gridList", 0, 0)); i <= N; ++i) { evalStringInst("gridName#", &i, gridNameStr, 0, time); evalStringInst("gridClass#", &i, tmpStr, 0, time); openvdb::GridClass gridClass = openvdb::GridBase::stringToGridClass(tmpStr.toStdString()); evalStringInst("elementType#", &i, tmpStr, 0, time); DataType eType = stringToDataType(tmpStr.toStdString()); // Force a specific data type for some of the grid classes if (gridClass == openvdb::GRID_LEVEL_SET || gridClass == openvdb::GRID_FOG_VOLUME) { eType = TYPE_FLOAT; } else if (gridClass == openvdb::GRID_STAGGERED) { eType = TYPE_VEC3S; } switch(eType) { case TYPE_FLOAT: { float voxelSize = float(transform->voxelSize()[0]); float background = 0.0; if (gridClass == openvdb::GRID_LEVEL_SET) { background = float(evalFloatInst("width#", &i, 0, time) * voxelSize); } else { background = float(evalFloatInst("bgFloat#", &i, 0, time)); } createNewGrid<cvdb::FloatGrid>( gridNameStr, background, transform, maskGrid, group, gridClass); break; } case TYPE_DOUBLE: { double background = double(evalFloatInst("bgFloat#", &i, 0, time)); createNewGrid<cvdb::DoubleGrid>( gridNameStr, background, transform, maskGrid, group, gridClass); break; } case TYPE_INT: { int background = static_cast<int>(evalIntInst("bgInt#", &i, 0, time)); createNewGrid<cvdb::Int32Grid>( gridNameStr, background, transform, maskGrid, group, gridClass); break; } case TYPE_BOOL: { bool background = evalIntInst("bgBool#", &i, 0, time); createNewGrid<cvdb::BoolGrid>( gridNameStr, background, transform, maskGrid, group, gridClass); break; } case TYPE_VEC3S: { cvdb::Vec3f background( float(evalFloatInst("bgVec3f#", &i, 0, time)), float(evalFloatInst("bgVec3f#", &i, 1, time)), float(evalFloatInst("bgVec3f#", &i, 2, time))); int vecType = static_cast<int>(evalIntInst("vecType#", &i, 0, time)); createNewGrid<cvdb::Vec3SGrid>( gridNameStr, background, transform, maskGrid, group, gridClass, vecType); break; } case TYPE_VEC3D: { cvdb::Vec3d background( double(evalFloatInst("bgVec3f#", &i, 0, time)), double(evalFloatInst("bgVec3f#", &i, 1, time)), double(evalFloatInst("bgVec3f#", &i, 2, time))); int vecType = static_cast<int>(evalIntInst("vecType#", &i, 0, time)); createNewGrid<cvdb::Vec3DGrid>( gridNameStr, background, transform, maskGrid, group, gridClass, vecType); break; } case TYPE_VEC3I: { cvdb::Vec3i background( static_cast<cvdb::Int32>(evalIntInst("bgVec3i#", &i, 0, time)), static_cast<cvdb::Int32>(evalIntInst("bgVec3i#", &i, 1, time)), static_cast<cvdb::Int32>(evalIntInst("bgVec3i#", &i, 2, time))); int vecType = static_cast<int>(evalIntInst("vecType#", &i, 0, time)); createNewGrid<cvdb::Vec3IGrid>( gridNameStr, background, transform, maskGrid, group, gridClass, vecType); break; } } // eType switch } // grid create loop } catch ( std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Create::buildTransform(OP_Context& context, openvdb::math::Transform::Ptr& transform, const GU_PrimVDB* refVdb) { fpreal time = context.getTime(); const auto transformParm = evalInt("transform", 0, time); const bool linear = (transformParm == 0); const bool frustum = (transformParm == 1); if (frustum) { // nonlinear frustum transform const auto cameraPath = evalStdString("camera", time); if (cameraPath.empty()) { addError(SOP_MESSAGE, "No camera selected"); return error(); } OBJ_Node *camobj = findOBJNode(cameraPath.c_str()); if (!camobj) { addError(SOP_MESSAGE, "Camera not found"); return error(); } OBJ_Camera* cam = camobj->castToOBJCamera(); if (!cam) { addError(SOP_MESSAGE, "Camera not found"); return error(); } // Register this->addExtraInput(cam, OP_INTEREST_DATA); const float offset = static_cast<float>(evalFloat("cameraOffset", 0, time)), nearPlane = static_cast<float>(evalFloat("nearPlane", 0, time)), farPlane = static_cast<float>(evalFloat("farPlane", 0, time)), voxelDepthSize = static_cast<float>(evalFloat("voxelDepthSize", 0, time)); const int voxelCount = static_cast<int>(evalInt("voxelCount", 0, time)); transform = hvdb::frustumTransformFromCamera(*this, context, *cam, offset, nearPlane, farPlane, voxelDepthSize, voxelCount); if (bool(evalInt("previewFrustum", 0, time))) { UT_Vector3 boxColor(0.6f, 0.6f, 0.6f); UT_Vector3 tickColor(0.0f, 0.0f, 0.0f); hvdb::drawFrustum(*gdp, *transform, &boxColor, &tickColor, /*shaded*/true); } } else if (linear) { // linear affine transform const double voxelSize = double(evalFloat("voxelSize", 0, time)); openvdb::Vec3d rotation( evalFloat("rotation", 0, time), evalFloat("rotation", 1, time), evalFloat("rotation", 2, time)); if (std::abs(rotation.x()) < 0.00001 && std::abs(rotation.y()) < 0.00001 && std::abs(rotation.z()) < 0.00001) { transform = openvdb::math::Transform::createLinearTransform(voxelSize); } else { openvdb::math::Mat4d xform(openvdb::math::Mat4d::identity()); xform.preRotate(openvdb::math::X_AXIS, rotation.x()); xform.preRotate(openvdb::math::Y_AXIS, rotation.y()); xform.preRotate(openvdb::math::Z_AXIS, rotation.z()); xform.preScale(openvdb::Vec3d(voxelSize)); transform = openvdb::math::Transform::createLinearTransform(xform); } } else { // match reference if (refVdb == nullptr) { addError(SOP_MESSAGE, "Missing reference grid"); return error(); } transform = refVdb->getGrid().transform().copy(); const bool useVoxelSize = evalInt("useVoxelSize", 0, time); if (useVoxelSize) { // NOT matching the reference's voxel size if (!transform->isLinear()) { addError(SOP_MESSAGE, "Cannot change voxel size on a non-linear transform"); return error(); } const double voxelSize = double(evalFloat("voxelSizeRef", 0, time)); openvdb::Vec3d relativeVoxelScale = voxelSize / refVdb->getGrid().voxelSize(); // If the user is changing the voxel size to the original, // then there is no need to do anything if (!isApproxEqual(openvdb::Vec3d::ones(), relativeVoxelScale)) { mNeedsResampling = true; transform->preScale(relativeVoxelScale); } } } return error(); } //////////////////////////////////////// const GU_PrimVDB* SOP_OpenVDB_Create::getReferenceVdb(OP_Context &context) { const GU_Detail* refGdp = inputGeo(1, context); if (!refGdp) return nullptr; const GA_PrimitiveGroup* refGroup = matchGroup( *refGdp, evalStdString("reference", context.getTime())); hvdb::VdbPrimCIterator vdbIter(refGdp, refGroup); const GU_PrimVDB* refVdb = *vdbIter; if (++vdbIter) { addWarning(SOP_MESSAGE, "Multiple reference grids were found.\n" "Using the first one for reference."); } return refVdb; } //////////////////////////////////////// class GridConvertToMask { public: GridConvertToMask(cvdb::MaskGrid::Ptr& maskGrid) : outGrid(maskGrid) {} template<typename GridType> void operator()(const GridType& inGrid) { using MaskTree = cvdb::MaskGrid::TreeType; outGrid = cvdb::MaskGrid::create( MaskTree::Ptr(new MaskTree(inGrid.tree(), 0, cvdb::TopologyCopy()))); } private: cvdb::MaskGrid::Ptr& outGrid; }; cvdb::MaskGrid::Ptr SOP_OpenVDB_Create::createMaskGrid(const GU_PrimVDB* refVdb, const openvdb::math::Transform::Ptr& transform) { if (refVdb == nullptr) throw std::runtime_error("Missing reference grid"); cvdb::MaskGrid::Ptr maskGrid; GridConvertToMask op(maskGrid); if (hvdb::GEOvdbApply<hvdb::AllGridTypes>(*refVdb, op)) { maskGrid->setTransform(refVdb->getGrid().transform().copy()); } else { throw std::runtime_error("No valid reference grid found"); } if (!mNeedsResampling) return maskGrid; cvdb::MaskGrid::Ptr resampledMaskGrid = cvdb::MaskGrid::create(); resampledMaskGrid->setTransform(transform); hvdb::Interrupter interrupter; cvdb::tools::resampleToMatch<cvdb::tools::PointSampler>(*maskGrid, *resampledMaskGrid, interrupter); return resampledMaskGrid; }
37,024
C++
35.29902
99
0.604527
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Fracture.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Fracture.cc /// /// @author FX R&D OpenVDB team /// /// @brief Level set fracturing #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/LevelSetFracture.h> #include <openvdb/util/Util.h> #include <GA/GA_ElementGroupTable.h> #include <GA/GA_PageHandle.h> #include <GA/GA_PageIterator.h> #include <GA/GA_AttributeInstanceMatrix.h> #include <GEO/GEO_PrimClassifier.h> #include <GEO/GEO_PointClassifier.h> #include <GU/GU_ConvertParms.h> #include <UT/UT_Quaternion.h> #include <UT/UT_ValArray.h> #include <hboost/algorithm/string/join.hpp> #include <hboost/math/constants/constants.hpp> #include <cmath> #include <iostream> #include <limits> #include <list> #include <random> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_Fracture: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Fracture(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Fracture() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i ) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; template<class GridType> void process( std::list<openvdb::GridBase::Ptr>& grids, const GU_Detail* cutterGeo, const GU_Detail* pointGeo, hvdb::Interrupter&, const fpreal time); }; // class Cache protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; // class SOP_OpenVDB_Fracture //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; ////////// // Input options parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Select a subset of the input OpenVDB grids to fracture.") .setDocumentation( "A subset of the input VDBs to fracture" " (see [specifying volumes|/model/volumes#group])")); ////////// // Fracture options parms.add(hutil::ParmFactory(PRM_TOGGLE, "separatecutters", "Separate Cutters by Connectivity") .setTooltip( "The cutter geometry will be classified by point connectivity" " and each connected component will be cut separately.\n" "Use this if an individual piece of cutting geometry has overlapping components.\n\n" "This option is not available when cutter instance points are provided.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "cutteroverlap", "Allow Cutter Overlap") .setTooltip( "Allow consecutive cutter instances to fracture previously generated fragments.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "centercutter", "Center Cutter Geometry") #ifndef SESI_OPENVDB .setDefault(PRMoneDefaults) #else .setDefault(PRMzeroDefaults) #endif .setTooltip( "Center the cutter geometry around its point position centroid before instancing.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "randomizerotation", "Randomize Cutter Rotation") .setTooltip( "Apply a random rotation to the cutter as it is instanced onto each point.\n\n" "This option is only available when cutter instance points are provided.")); parms.add(hutil::ParmFactory(PRM_INT_J, "seed", "Random Seed") .setDefault(PRMoneDefaults) .setTooltip("The random number seed for cutter rotations")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "segmentfragments", "Split Input Fragments into Primitives") .setTooltip( "Split input VDBs with disjoint fragments into multiple primitives," " one per fragment.\nIn a chain of fracture nodes this operation" " is typically applied only to the last node.") .setDocumentation( "Split input VDBs with disjoint fragments into multiple primitives," " one per fragment.\n\n" "If you have a tube and cut out the middle, the two ends might end up in the\n" "same VDB. This option will detect that and split the ends into two VDBs.\n\n" "NOTE:\n" " This operation only needs to be performed if you plan on using the\n" " output fragments for collision detection. If you use multiple fracture\n" " nodes, then it is most efficient to only enable it on the very last\n" " fracture node.\n")); parms.add(hutil::ParmFactory(PRM_STRING, "fragmentgroup", "Fragment Group") .setTooltip("Specify a group name with which to associate all fragments generated " "by this fracture. The residual fragments of the input grids are excluded " "from this group.")); { char const * const visnames[] = { "none", "None", "all", "Pieces", "new", "New Fragments", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "visualizepieces", "Visualization") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, visnames) .setTooltip("Randomize output primitive colors.") .setDocumentation( "The generated VDBs can be colored uniquely for ease of identification." " The New Fragments option will leave the original pieces with their" " original coloring and assign colors only to newly-created pieces.")); } ////////// hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "inputgroup", "Group")); ////////// hvdb::OpenVDBOpFactory("VDB Fracture", SOP_OpenVDB_Fracture::factory, parms, *table) .addInput("OpenVDB grids to fracture\n" "(Required to have matching transforms and narrow band widths)") .addInput("Cutter objects (geometry).") .addOptionalInput("Optional points to instance the cutter object onto\n" "(The cutter object is used in place if no points are provided.)") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Fracture::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Split level set VDB volumes into pieces.\"\"\"\n\ \n\ @overview\n\ \n\ This node splits level set VDB volumes into multiple fragments.\n\ \n\ The _cutter_ geometry supplied in the second input determines\n\ where cuts are made in the source volumes.\n\ The optional third input specifies points onto which the cutter geometry\n\ will be instanced, so that even simple geometry can produce complex cuts.\n\ \n\ Typically, the input volume is the output of an [OpenVDB from\n\ Polygons|Node:sop/DW_OpenVDBFromPolygons] node.\n\ When that is the case, the fractured result can be converted back\n\ to polygons seamlessly using the\n\ [OpenVDB Convert|Node:sop/DW_OpenVDBConvert] node\n\ with the original polygons as the second input.\n\ \n\ NOTE:\n\ The cutter geometry must be a closed surface but does not need to be\n\ manifold. The cutter geometry can contain self intersections and\n\ degenerate faces. Normals on the cutter geometry are ignored.\n\ \n\ NOTE:\n\ The reference points supplied in the optional third input can have\n\ attributes that control how the cutter is transformed onto them. This\n\ follows the same rules that the [Node:sop/copy] node uses, except\n\ for scaling. Scaling an SDF correctly requires that the level set\n\ be rebuilt at the same time. Thus you must scale your cutter geometry\n\ appropriately first.\n\ \n\ @related\n\ - [OpenVDB Convert|Node:sop/DW_OpenVDBConvert]\n\ - [OpenVDB From Polygons|Node:sop/DW_OpenVDBFromPolygons]\n\ - [Node:sop/vdbfracture]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Fracture::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Fracture(net, name, op); } SOP_OpenVDB_Fracture::SOP_OpenVDB_Fracture(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Fracture::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "inputgroup", "group"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable or disable parameters in the UI. bool SOP_OpenVDB_Fracture::updateParmsFlags() { bool changed = false; const bool instancePointsExist = (nInputs() == 3); const bool multipleCutters = bool(evalInt("separatecutters", 0, 0)); const bool randomizeRotation = bool(evalInt("randomizerotation", 0, 0)); changed |= enableParm("separatecutters", !instancePointsExist); changed |= enableParm("centercutter", instancePointsExist); changed |= enableParm("randomizerotation", instancePointsExist); changed |= enableParm("seed", randomizeRotation); changed |= enableParm("cutteroverlap", instancePointsExist || multipleCutters); return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Fracture::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Converting geometry to volume"); ////////// // Validate inputs const GU_Detail* cutterGeo = inputGeo(1); if (!cutterGeo || !cutterGeo->getNumPrimitives()) { // All good, nothing to worry about with no cutting objects! return error(); } std::string warningStr; auto geoPtr = hvdb::convertGeometry(*cutterGeo, warningStr, &boss); if (geoPtr) { cutterGeo = geoPtr.get(); if (!warningStr.empty()) addWarning(SOP_MESSAGE, warningStr.c_str()); } const GU_Detail* pointGeo = inputGeo(2); const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); std::list<openvdb::GridBase::Ptr> grids; std::vector<GU_PrimVDB*> origvdbs; std::vector<std::string> nonLevelSetList, nonLinearList; for (hvdb::VdbPrimIterator vdbIter(gdp, group); vdbIter; ++vdbIter) { if (boss.wasInterrupted()) break; const openvdb::GridClass gridClass = vdbIter->getGrid().getGridClass(); if (gridClass != openvdb::GRID_LEVEL_SET) { nonLevelSetList.push_back(vdbIter.getPrimitiveNameOrIndex().toStdString()); continue; } if (!vdbIter->getGrid().transform().isLinear()) { nonLinearList.push_back(vdbIter.getPrimitiveNameOrIndex().toStdString()); continue; } GU_PrimVDB* vdb = vdbIter.getPrimitive(); vdb->makeGridUnique(); grids.push_back(vdb->getGrid().copyGrid()); grids.back()->setName(vdb->getGridName()); grids.back()->insertMeta("houdiniorigoffset", openvdb::Int64Metadata( vdb->getMapOffset() ) ); origvdbs.push_back(vdb); } if (!nonLevelSetList.empty()) { std::string s = "The following non level set grids were skipped: '" + hboost::algorithm::join(nonLevelSetList, ", ") + "'."; addWarning(SOP_MESSAGE, s.c_str()); } if (!nonLinearList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(nonLinearList, ", ") + "' because they don't have a linear/affine transform."; addWarning(SOP_MESSAGE, s.c_str()); } if (!grids.empty() && !boss.wasInterrupted()) { if (grids.front()->isType<openvdb::FloatGrid>()) { process<openvdb::FloatGrid>(grids, cutterGeo, pointGeo, boss, time); } else if (grids.front()->isType<openvdb::DoubleGrid>()) { process<openvdb::DoubleGrid>(grids, cutterGeo, pointGeo, boss, time); } else { addError(SOP_MESSAGE, "Unsupported grid type"); } for (std::vector<GU_PrimVDB*>::iterator it = origvdbs.begin(); it != origvdbs.end(); ++it) { gdp->destroyPrimitive(**it, /*andPoints=*/true); } } else { addWarning(SOP_MESSAGE, "No VDB grids to fracture"); } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// template<class GridType> void SOP_OpenVDB_Fracture::Cache::process( std::list<openvdb::GridBase::Ptr>& grids, const GU_Detail* cutterGeo, const GU_Detail* pointGeo, hvdb::Interrupter& boss, const fpreal time) { GA_PrimitiveGroup* group = nullptr; // Evaluate the UI parameters. { UT_String newGropStr; evalString(newGropStr, "fragmentgroup", 0, time); if(newGropStr.length() > 0) { group = gdp->findPrimitiveGroup(newGropStr); if (!group) group = gdp->newPrimitiveGroup(newGropStr); } } const bool randomizeRotation = bool(evalInt("randomizerotation", 0, time)); const bool cutterOverlap = bool(evalInt("cutteroverlap", 0, time)); const exint visualization = evalInt("visualizepieces", 0, time); const bool segmentFragments = bool(evalInt("segmentfragments", 0, time)); using ValueType = typename GridType::ValueType; typename GridType::Ptr firstGrid = openvdb::gridPtrCast<GridType>(grids.front()); if (!firstGrid) { addError(SOP_MESSAGE, "Unsupported grid type."); return; } // Get the first grid's transform and background value. openvdb::math::Transform::Ptr transform = firstGrid->transformPtr(); const ValueType backgroundValue = firstGrid->background(); std::vector<openvdb::Vec3s> instancePoints; std::vector<openvdb::math::Quats> instanceRotations; if (pointGeo != nullptr) { instancePoints.resize(pointGeo->getNumPoints()); GA_Range range(pointGeo->getPointRange()); GA_AttributeInstanceMatrix instanceMatrix; instanceMatrix.initialize(pointGeo->pointAttribs(), "N", "v"); // Ignore any scaling until levelset fracture supports it. instanceMatrix.resetScales(); // If we're randomizing or there are *any* valid transformation // attributes, we need to create an instance matrix. if (randomizeRotation || instanceMatrix.hasAnyAttribs()) { instanceRotations.resize(instancePoints.size()); using RandGen = std::mt19937; RandGen rng(RandGen::result_type(evalInt("seed", 0, time))); std::uniform_real_distribution<float> uniform01; const float two_pi = 2.0f * hboost::math::constants::pi<float>(); UT_DMatrix4 xform; UT_Vector3 trans; UT_DMatrix3 rotmat; UT_QuaternionD quat; for (GA_Iterator it(range); !it.atEnd(); it.advance()) { UT_Vector3 pos = pointGeo->getPos3(*it); if (randomizeRotation) { // Generate uniform random rotations by picking random // points in the unit cube and forming the unit quaternion. const float u = uniform01(rng); const float c1 = std::sqrt(1-u); const float c2 = std::sqrt(u); const float s1 = two_pi * uniform01(rng); const float s2 = two_pi * uniform01(rng); UT_Quaternion orient(c1 * std::sin(s1), c1 * std::cos(s1), c2 * std::sin(s2), c2 * std::cos(s2)); instanceMatrix.getMatrix(xform, pos, orient, *it); } else { instanceMatrix.getMatrix(xform, pos, *it); } GA_Index i = pointGeo->pointIndex(*it); xform.getTranslates(trans); xform.extractRotate(rotmat); quat.updateFromRotationMatrix(rotmat); instancePoints[i] = openvdb::Vec3s(trans.x(), trans.y(), trans.z()); instanceRotations[i].init( static_cast<float>(quat.x()), static_cast<float>(quat.y()), static_cast<float>(quat.z()), static_cast<float>(quat.w())); } } else { // No randomization or valid instance attributes, just use P. for (GA_Iterator it(range); !it.atEnd(); it.advance()) { UT_Vector3 pos = pointGeo->getPos3(*it); instancePoints[pointGeo->pointIndex(*it)] = openvdb::Vec3s(pos.x(), pos.y(), pos.z()); } } } if (boss.wasInterrupted()) return; std::list<typename GridType::Ptr> residuals; { std::list<openvdb::GridBase::Ptr>::iterator it = grids.begin(); std::vector<std::string> badTransformList, badBackgroundList, badTypeList; for (; it != grids.end(); ++it) { typename GridType::Ptr residual = openvdb::gridPtrCast<GridType>(*it); if (residual) { if (residual->transform() != *transform) { badTransformList.push_back(residual->getName()); continue; } if (!openvdb::math::isApproxEqual(residual->background(), backgroundValue)) { badBackgroundList.push_back(residual->getName()); continue; } residuals.push_back(residual); } else { badTypeList.push_back(residual->getName()); continue; } } grids.clear(); if (!badTransformList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTransformList, ", ") + "' because they don't match the transform of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badBackgroundList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badBackgroundList, ", ") + "' because they don't match the background value of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badTypeList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTypeList, ", ") + "' because they don't have the same data type as the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } } // Setup fracture tool openvdb::tools::LevelSetFracture<GridType, hvdb::Interrupter> lsFracture(&boss); const bool separatecutters = (pointGeo == nullptr) && bool(evalInt("separatecutters", 0, time)); std::vector<openvdb::Vec3s> pointList; { pointList.resize(cutterGeo->getNumPoints()); openvdb::math::Transform::Ptr xform = transform->copy(); if (!instancePoints.empty() && !separatecutters && bool(evalInt("centercutter", 0, time))) { UT_BoundingBox pointBBox; cutterGeo->getPointBBox(&pointBBox); UT_Vector3 center = pointBBox.center(); xform->postTranslate(openvdb::Vec3s(center.x(), center.y(), center.z())); } UTparallelFor(GA_SplittableRange(cutterGeo->getPointRange()), hvdb::TransformOp(cutterGeo, *xform, pointList)); } // Check for multiple cutter objects GEO_PrimClassifier primClassifier; if (separatecutters) { primClassifier.classifyBySharedPoints(*cutterGeo); } const int cutterObjects = separatecutters ? primClassifier.getNumClass() : 1; const float bandWidth = float(backgroundValue / transform->voxelSize()[0]); if (cutterObjects > 1) { GA_Offset start, end; GA_SplittableRange range(cutterGeo->getPrimitiveRange()); for (int classId = 0; classId < cutterObjects; ++classId) { if (boss.wasInterrupted()) break; size_t numPrims = 0; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { if (classId == primClassifier.getClass( static_cast<int>(cutterGeo->primitiveIndex(i)))) { ++numPrims; } } } } typename GridType::Ptr cutterGrid; if (numPrims == 0) continue; { std::vector<openvdb::Vec4I> primList; primList.reserve(numPrims); openvdb::Vec4I prim; using Vec4IValueType = openvdb::Vec4I::ValueType; for (GA_PageIterator pageIt = range.beginPages(); !pageIt.atEnd(); ++pageIt) { for (GA_Iterator blockIt(pageIt.begin()); blockIt.blockAdvance(start, end); ) { for (GA_Offset i = start; i < end; ++i) { if (classId == primClassifier.getClass( static_cast<int>(cutterGeo->primitiveIndex(i)))) { const GA_Primitive* primRef = cutterGeo->getPrimitiveList().get(i); const GA_Size vtxn = primRef->getVertexCount(); if ((primRef->getTypeId() == GEO_PRIMPOLY) && (3 == vtxn || 4 == vtxn)) { for (GA_Size vtx = 0; vtx < vtxn; ++vtx) { prim[int(vtx)] = static_cast<Vec4IValueType>( cutterGeo->pointIndex(primRef->getPointOffset(vtx))); } if (vtxn != 4) prim[3] = openvdb::util::INVALID_IDX; primList.push_back(prim); } } } } } openvdb::tools::QuadAndTriangleDataAdapter<openvdb::Vec3s, openvdb::Vec4I> mesh(pointList, primList); cutterGrid = openvdb::tools::meshToVolume<GridType>( boss, mesh, *transform, bandWidth, bandWidth); } if (!cutterGrid || cutterGrid->activeVoxelCount() == 0) continue; lsFracture.fracture(residuals, *cutterGrid, segmentFragments, nullptr, nullptr, cutterOverlap); } } else { // Convert cutter object mesh to level-set typename GridType::Ptr cutterGrid; { std::vector<openvdb::Vec4I> primList; primList.resize(cutterGeo->getNumPrimitives()); UTparallelFor(GA_SplittableRange(cutterGeo->getPrimitiveRange()), hvdb::PrimCpyOp(cutterGeo, primList)); openvdb::tools::QuadAndTriangleDataAdapter<openvdb::Vec3s, openvdb::Vec4I> mesh(pointList, primList); cutterGrid = openvdb::tools::meshToVolume<GridType>( boss, mesh, *transform, bandWidth, bandWidth); } if (!cutterGrid || cutterGrid->activeVoxelCount() == 0 || boss.wasInterrupted()) return; lsFracture.fracture(residuals, *cutterGrid, segmentFragments, &instancePoints, &instanceRotations, cutterOverlap); } if (boss.wasInterrupted()) return; typename std::list<typename GridType::Ptr>::iterator it; // Primitive Color GA_RWHandleV3 color; if (visualization) { GA_RWAttributeRef attrRef = gdp->findDiffuseAttribute(GA_ATTRIB_PRIMITIVE); if (!attrRef.isValid()) attrRef = gdp->addDiffuseAttribute(GA_ATTRIB_PRIMITIVE); color.bind(attrRef.getAttribute()); } UT_IntArray piececount; UT_IntArray totalpiececount; piececount.entries(gdp->getNumPrimitiveOffsets()); totalpiececount.entries(gdp->getNumPrimitiveOffsets()); GU_ConvertParms parms; parms.preserveGroups = true; // Export residual fragments exint coloridx = 0; GA_RWHandleS name_h(gdp, GA_ATTRIB_PRIMITIVE, "name"); // We have to do a pre-pass over all pieces to compute the total // number of pieces from each original object. This way // we can tell if we need to do renaming or not. for (it = residuals.begin(); it != residuals.end(); ++it) { GA_Offset origvdboff = GA_INVALID_OFFSET; typename GridType::Ptr grid = *it; openvdb::Int64Metadata::Ptr offmeta = grid->template getMetadata<openvdb::Int64Metadata>("houdiniorigoffset"); if (offmeta) { origvdboff = static_cast<GA_Offset>(offmeta->value()); } if (origvdboff != GA_INVALID_OFFSET) { totalpiececount(origvdboff)++; } } for (it = lsFracture.fragments().begin(); it != lsFracture.fragments().end(); ++it) { GA_Offset origvdboff = GA_INVALID_OFFSET; typename GridType::Ptr grid = *it; openvdb::Int64Metadata::Ptr offmeta = grid->template getMetadata<openvdb::Int64Metadata>("houdiniorigoffset"); if (offmeta) { origvdboff = static_cast<GA_Offset>(offmeta->value()); } if (origvdboff != GA_INVALID_OFFSET) { totalpiececount(origvdboff)++; } } for (it = residuals.begin(); it != residuals.end(); ++it) { if (boss.wasInterrupted()) break; typename GridType::Ptr grid = *it; GA_Offset origvdboff = GA_INVALID_OFFSET; openvdb::Int64Metadata::Ptr offmeta = grid->template getMetadata<openvdb::Int64Metadata>("houdiniorigoffset"); if (offmeta) { origvdboff = static_cast<GA_Offset>(offmeta->value()); grid->removeMeta("houdiniorigoffset"); } std::string gridname = grid->getName(); UT_String name; name.harden(gridname.c_str()); // Suffix our name. if (name.isstring() && origvdboff != GA_INVALID_OFFSET && totalpiececount(origvdboff) > 1) { UT_WorkBuffer buf; buf.sprintf("%s_%d", static_cast<const char*>(name), piececount(origvdboff)); piececount(origvdboff)++; name.harden(buf.buffer()); } GU_PrimVDB* vdb = hvdb::createVdbPrimitive(*gdp, grid, static_cast<const char*>(name)); if (origvdboff != GA_INVALID_OFFSET) { GU_PrimVDB* origvdb = dynamic_cast<GU_PrimVDB*>(gdp->getGEOPrimitive(origvdboff)); if (origvdb) { GA_Offset newvdbpt; newvdbpt = vdb->getPointOffset(0); GUconvertCopySingleVertexPrimAttribsAndGroups( parms, *origvdb->getParent(), origvdb->getMapOffset(), *gdp, GA_Range(gdp->getPrimitiveMap(), vdb->getMapOffset(), vdb->getMapOffset()+1), GA_Range(gdp->getPointMap(), newvdbpt, newvdbpt+1)); } } if (visualization == 1 && color.isValid()) { float r, g, b; UT_Color::getUniqueColor(coloridx, &r, &g, &b); color.set(vdb->getMapOffset(), UT_Vector3(r, g, b)); } coloridx++; if (name.isstring() && name_h.isValid()) { name_h.set(vdb->getMapOffset(), static_cast<const char*>(name)); } } if (boss.wasInterrupted()) return; // Export new fragments for (it = lsFracture.fragments().begin(); it != lsFracture.fragments().end(); ++it) { if (boss.wasInterrupted()) break; typename GridType::Ptr grid = *it; GA_Offset origvdboff = GA_INVALID_OFFSET; openvdb::Int64Metadata::Ptr offmeta = grid->template getMetadata<openvdb::Int64Metadata>("houdiniorigoffset"); if (offmeta) { origvdboff = static_cast<GA_Offset>(offmeta->value()); grid->removeMeta("houdiniorigoffset"); } std::string gridname = grid->getName(); UT_String name; name.harden(gridname.c_str()); // Suffix our name. if (name.isstring() && origvdboff != GA_INVALID_OFFSET) { UT_WorkBuffer buf; buf.sprintf("%s_%d", static_cast<const char*>(name), piececount(origvdboff)); piececount(origvdboff)++; name.harden(buf.buffer()); } GU_PrimVDB* vdb = hvdb::createVdbPrimitive(*gdp, grid, static_cast<const char*>(name)); if (origvdboff != GA_INVALID_OFFSET) { GU_PrimVDB* origvdb = dynamic_cast<GU_PrimVDB*>(gdp->getGEOPrimitive(origvdboff)); if (origvdb) { GA_Offset newvdbpt; newvdbpt = vdb->getPointOffset(0); GUconvertCopySingleVertexPrimAttribsAndGroups( parms, *origvdb->getParent(), origvdb->getMapOffset(), *gdp, GA_Range(gdp->getPrimitiveMap(), vdb->getMapOffset(), vdb->getMapOffset()+1), GA_Range(gdp->getPointMap(), newvdbpt, newvdbpt+1)); } } if (name.isstring() && name_h.isValid()) { name_h.set(vdb->getMapOffset(), static_cast<const char*>(name)); } if (group) group->add(vdb); if (visualization && color.isValid()) { float r, g, b; UT_Color::getUniqueColor(coloridx++, &r, &g, &b); color.set(vdb->getMapOffset(), UT_Vector3(r, g, b)); } coloridx++; } }
31,091
C++
34.251701
100
0.583481
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_To_Spheres.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_To_Spheres.cc /// /// @author FX R&D OpenVDB team /// /// @brief Fills a volume with adaptively sized overlapping or nonoverlapping spheres. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/Utils.h> #include <openvdb/tools/VolumeToSpheres.h> #include <GU/GU_ConvertParms.h> #include <GU/GU_Detail.h> #include <GU/GU_PrimSphere.h> #include <PRM/PRM_Parm.h> #include <GA/GA_PageIterator.h> #include <UT/UT_Interrupt.h> #include <hboost/algorithm/string/join.hpp> #include <algorithm> #include <limits> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_To_Spheres: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_To_Spheres(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_To_Spheres() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } void checkActivePart(float time); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("A subset of the input VDBs to be processed") .setDocumentation( "A subset of the input VDB grids to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "The voxel value that determines the surface of the volume\n\n" "Zero works for signed distance fields, while fog volumes" " require a small positive value (0.5 is a good initial guess).")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "worldunits", "Use World Space Units") .setDocumentation( "If enabled, specify sphere radii in world units, otherwise in voxels.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useradiusmin", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_FLT_J, "radiusmin", "Min Radius") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 2.0) .setTooltip("The radius of the smallest sphere allowed") .setDocumentation( "The radius of the smallest sphere allowed\n\n" "If disabled, allow spheres of any radius greater than zero.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useradiusmax", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_FLT_J, "radiusmax", "Max Radius") .setDefault(100.0) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 100.0) .setTooltip("The radius of the largest sphere allowed") .setDocumentation( "The radius of the largest sphere allowed\n\n" "If disabled, allow arbitrarily large spheres.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usespheresmin", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_INT_J, "spheresmin", "Min Spheres") .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 100) .setDefault(1) .setTooltip("The minimum number of spheres to be generated") .setDocumentation( "The minimum number of spheres to be generated\n\n" "If disabled, allow very small VDBs to not generate any spheres.\n\n" "NOTE:\n" " __Min Spheres__ takes precedence over __Min Radius__.\n" " Spheres smaller than __Min Radius__ might be generated\n" " in order to ensure that the minimum sphere count is satisfied.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usespheresmax", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_INT_J, "spheresmax", "Max Spheres") .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 100) .setDefault(50) .setTooltip("The maximum number of spheres to be generated") .setDocumentation( "The maximum number of spheres to be generated\n\n" "If disabled, allow for up to __Point Count__ spheres to be generated.")); parms.add(hutil::ParmFactory(PRM_INT_J, "scatter", "Point Count") .setRange(PRM_RANGE_RESTRICTED, 1000, PRM_RANGE_UI, 50000) .setDefault(10000) .setTooltip( "The number of candidate sphere centers to consider\n\n" "Increasing this count increases the chances of finding optimal sphere sizes.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "overlapping", "Overlapping") #ifndef SESI_OPENVDB .setDefault(PRMzeroDefaults) #else .setDefault(PRMoneDefaults) #endif .setTooltip("If enabled, allow spheres to overlap/intersect.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "preserve", "Preserve Attributes and Groups") #ifndef SESI_OPENVDB .setDefault(PRMzeroDefaults) #else .setDefault(PRMoneDefaults) #endif .setTooltip("If enabled, copy attributes and groups from the input.")); // The "doid" parameter name comes from the standard in POPs parms.add(hutil::ParmFactory(PRM_TOGGLE, "doid", "Add ID Attribute") #ifndef SESI_OPENVDB .setDefault(PRMoneDefaults) #else .setDefault(PRMzeroDefaults) #endif .setTooltip("If enabled, add an id point attribute that denotes the source VDB.") .setDocumentation( "If enabled, add an `id` point attribute that denotes the source VDB" " for each sphere.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "dopscale", "Add PScale Attribute") .setDefault(PRMzeroDefaults) .setTooltip("If enabled, add a pscale point attribute to each sphere.") .setDocumentation( "If enabled, add a `pscale` point attribute that indicates" " the radius of each sphere.")); ////////// const float fmax = std::numeric_limits<float>::max(); hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "spheres", "Max Spheres").setDefault(50)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "minradius", "Min Radius").setDefault(1.0)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxradius", "Max Radius").setDefault(fmax)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "minradiusworld", "").setDefault(0.1)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxradiusworld", "").setDefault(fmax)); ////////// hvdb::OpenVDBOpFactory("VDB to Spheres", SOP_OpenVDB_To_Spheres::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBToSpheres") #endif .addInput("VDBs to convert") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_To_Spheres::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Fill a VDB volume with adaptively-sized spheres.\"\"\"\n\ \n\ @overview\n\ \n\ This node is useful for generating proxy geometry for RBD simulations,\n\ since approximating nonconvex geometry with sphere compounds\n\ drastically improves the simulation time.\n\ This can be used, for example, on the output of an\n\ [OpenVDB Fracture node|Node:sop/DW_OpenVDBFracture].\n\ \n\ Another use is to produce the initial density volume for cloud modeling.\n\ \n\ @related\n\ - [OpenVDB Fracture|Node:sop/DW_OpenVDBFracture]\n\ - [Node:sop/cloud]\n\ - [Node:sop/vdbtospheres]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_To_Spheres::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; const bool worldUnits = (0 != evalInt("worldunits", 0, time)); resolveRenamedParm(*obsoleteParms, "spheres", "spheresmax"); resolveRenamedParm(*obsoleteParms, "minradius", "radiusmin"); // If world units are enabled, use the old world-space radius bounds if they exist. if (worldUnits && obsoleteParms->getParmPtr("minradiusworld") && !obsoleteParms->getParmPtr("minradiusworld")->isFactoryDefault()) { setFloat("radiusmin", 0, time, obsoleteParms->evalFloat("minradiusworld", 0, time)); } { // The old "maxradius" and "maxradiusworld" parameters had default values // of numeric_limits<float>::max(), indicating no upper bound. // That state is now represented by the "useradiusmax" toggle, which defaults to Off. // If "maxradius" (or "maxradiusworld" in world-space mode) had a non-default value, // transfer that value to "radiusmax" and toggle "useradiusmax" on. char const * const oldName = (worldUnits ? "maxradiusworld" : "maxradius"); PRM_Parm* parm = obsoleteParms->getParmPtr(oldName); if (parm && !parm->isFactoryDefault()) { setFloat("radiusmax", 0, time, obsoleteParms->evalFloat(oldName, 0, time)); setInt("useradiusmax", 0, time, true); } } hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } OP_Node* SOP_OpenVDB_To_Spheres::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_To_Spheres(net, name, op); } SOP_OpenVDB_To_Spheres::SOP_OpenVDB_To_Spheres(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } bool SOP_OpenVDB_To_Spheres::updateParmsFlags() { bool changed = false; changed |= enableParm("radiusmin", (0 != evalInt("useradiusmin", 0, 0))); changed |= enableParm("radiusmax", (0 != evalInt("useradiusmax", 0, 0))); changed |= enableParm("spheresmin", (0 != evalInt("usespheresmin", 0, 0))); changed |= enableParm("spheresmax", (0 != evalInt("usespheresmax", 0, 0))); return changed; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_To_Spheres::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Filling VDBs with spheres"); const GU_Detail* vdbGeo = inputGeo(0); if (vdbGeo == nullptr) return error(); const GA_PrimitiveGroup* group = matchGroup(*vdbGeo, evalStdString("group", time)); hvdb::VdbPrimCIterator vdbIt(vdbGeo, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDBs found."); return error(); } // Eval attributes const bool addID = (0 != evalInt("doid", 0, time)), addPScale = (0 != evalInt("dopscale", 0, time)), overlapping = (0 != evalInt("overlapping", 0, time)), preserve = (0 != evalInt("preserve", 0, time)), useMinRadius = (0 != evalInt("useradiusmin", 0, time)), useMaxRadius = (0 != evalInt("useradiusmax", 0, time)), useMinSpheres = (0 != evalInt("usespheresmin", 0, time)), useMaxSpheres = (0 != evalInt("usespheresmax", 0, time)), worldUnits = (0 != evalInt("worldunits", 0, time)); const float fmin = std::numeric_limits<float>::min(), fmax = std::numeric_limits<float>::max(), isovalue = static_cast<float>(evalFloat("isovalue", 0, time)), minRadius = !useMinRadius ? fmin : static_cast<float>(evalFloat("radiusmin", 0, time)), maxRadius = !useMaxRadius ? fmax : static_cast<float>(evalFloat("radiusmax", 0, time)); const int scatter = static_cast<int>(evalInt("scatter", 0, time)); const openvdb::Vec2i sphereCount( !useMinSpheres ? 0 : static_cast<int>(evalInt("spheresmin", 0, time)), !useMaxSpheres ? scatter : static_cast<int>(evalInt("spheresmax", 0, time))); GA_RWHandleI idAttr; if (addID) { GA_RWAttributeRef aRef = gdp->findPointAttribute("id"); if (!aRef.isValid()) { aRef = gdp->addIntTuple(GA_ATTRIB_POINT, "id", 1, GA_Defaults(0)); } idAttr = aRef.getAttribute(); if(!idAttr.isValid()) { addWarning(SOP_MESSAGE, "Failed to create the point ID attribute."); return error(); } } GA_RWHandleF pscaleAttr; if (addPScale) { GA_RWAttributeRef aRef = gdp->findFloatTuple(GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE); if (!aRef.isValid()) { aRef = gdp->addFloatTuple( GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE, 1, GA_Defaults(0)); } pscaleAttr = aRef.getAttribute(); if(!pscaleAttr.isValid()) { addWarning(SOP_MESSAGE, "Failed to create the point pscale attribute."); return error(); } } int idNumber = 1; GU_ConvertParms parms; parms.setKeepGroups(true); std::vector<std::string> skippedGrids; for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; openvdb::Vec2s radiusRange(minRadius, maxRadius); if (worldUnits) { const float voxelScale = float(1.0 / vdbIt->getGrid().voxelSize()[0]); radiusRange *= voxelScale; } radiusRange[1] = std::max(radiusRange[1], radiusRange[0] + float(1e-5)); std::vector<openvdb::Vec4s> spheres; if (vdbIt->getGrid().type() == openvdb::FloatGrid::gridType()) { openvdb::FloatGrid::ConstPtr gridPtr = openvdb::gridConstPtrCast<openvdb::FloatGrid>(vdbIt->getGridPtr()); openvdb::tools::fillWithSpheres(*gridPtr, spheres, sphereCount, overlapping, radiusRange[0], radiusRange[1], isovalue, scatter, &boss); } else if (vdbIt->getGrid().type() == openvdb::DoubleGrid::gridType()) { openvdb::DoubleGrid::ConstPtr gridPtr = openvdb::gridConstPtrCast<openvdb::DoubleGrid>(vdbIt->getGridPtr()); openvdb::tools::fillWithSpheres(*gridPtr, spheres, sphereCount, overlapping, radiusRange[0], radiusRange[1], isovalue, scatter, &boss); } else { skippedGrids.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } GA_Detail::OffsetMarker marker(*gdp); // copy spheres to Houdini for (size_t n = 0, N = spheres.size(); n < N; ++n) { const openvdb::Vec4s& sphere = spheres[n]; GA_Offset ptoff = gdp->appendPointOffset(); gdp->setPos3(ptoff, sphere.x(), sphere.y(), sphere.z()); if (addID) { idAttr.set(ptoff, idNumber); } if (addPScale) { pscaleAttr.set(ptoff, sphere[3]); } UT_Matrix4 mat = UT_Matrix4::getIdentityMatrix(); mat.scale(sphere[3],sphere[3],sphere[3]); GU_PrimSphereParms sphereParms(gdp, ptoff); sphereParms.xform = mat; GU_PrimSphere::build(sphereParms); } if (preserve) { GUconvertCopySingleVertexPrimAttribsAndGroups( parms, *vdbGeo, vdbIt.getOffset(), *gdp, marker.primitiveRange(), marker.pointRange()); } ++idNumber; } if (!skippedGrids.empty()) { std::string s = "Only scalar (float/double) grids are supported, the following " "were skipped: '" + hboost::algorithm::join(skippedGrids, ", ") + "'."; addWarning(SOP_MESSAGE, s.c_str()); } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
16,781
C++
35.482609
99
0.614922
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_From_Particles.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_From_Particles.cc /// /// @author FX R&D OpenVDB team /// /// @brief Converts points into signed distance / level set volumes. /// /// @note The world space narrow band half-width is encoded in the /// background value of a level set grid. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/AttributeTransferUtil.h> #include <openvdb_houdini/GU_VDBPointTools.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/Grid.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/ParticlesToLevelSet.h> #include <openvdb/tools/TopologyToLevelSet.h> #include <CH/CH_Manager.h> #include <GA/GA_Types.h> // for GA_ATTRIB_POINT #include <PRM/PRM_Parm.h> #include <UT/UT_Assert.h> #include <algorithm> #include <cmath> #include <cstdlib> // for std::atoi() #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { class ParticleList; } class SOP_OpenVDB_From_Particles: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_From_Particles(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) {} static OP_Node* factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_From_Particles(net, name, op); } int isRefInput(unsigned i) const override { return (i > 0); } int convertUnits(); static int convertUnitsCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { if (auto* sop = static_cast<SOP_OpenVDB_From_Particles*>(data)) { return sop->convertUnits(); } return 0; } static void buildAttrMenu(void*, PRM_Name*, int, const PRM_SpareData*, const PRM_Parm*); static const PRM_ChoiceList sPointAttrMenu; class Cache: public SOP_VDBCacheOptions { public: float voxelSize() const { return mVoxelSize; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: void convert( fpreal time, ParticleList&, openvdb::FloatGrid::Ptr, openvdb::BoolGrid::Ptr, hvdb::Interrupter&); void convertWithAttributes( fpreal time, const GU_Detail&, ParticleList&, openvdb::FloatGrid::Ptr, openvdb::BoolGrid::Ptr, hvdb::Interrupter&); int constructGenericAtttributeList( fpreal time, hvdb::AttributeDetailList&, const GU_Detail&, const openvdb::Int32Grid& closestPtIdxGrid); float mVoxelSize = 0.1f; }; // class Cache protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; }; // class SOP_OpenVDB_From_Particles const PRM_ChoiceList SOP_OpenVDB_From_Particles::sPointAttrMenu( PRM_ChoiceListType(PRM_CHOICELIST_REPLACE), SOP_OpenVDB_From_Particles::buildAttrMenu); //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_TOGGLE, "builddistance", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "distancename", "Distance VDB") .setDefault("surface") .setTooltip("A name for the output SDF volume") .setDocumentation( "If enabled, output a narrow-band signed distance field with the given name.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "buildfog", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "fogname", "Fog VDB") .setDefault("density") .setTooltip("A name for the output fog volume") .setDocumentation( "If enabled, output a fog volume with the given name.\n\n" "Voxels inside particles will have value one, and voxels outside" " will have value zero. Within a narrow band centered on particle surfaces," " voxel values will vary linearly from zero to one.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "buildinteriormask", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "interiormaskname", "Interior Mask VDB") .setDefault("mask") .setTooltip("A name for the output interior mask volume") .setDocumentation( "If enabled, output an interior mask volume with the given name.\n\n" "Voxels inside particles will be active, and voxels outside will be inactive.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "buildmask", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "Bounding Mask VDB") .setDefault("boundingvolume") .setTooltip("A name for the output bounding mask volume") .setDocumentation( "If enabled, output an alpha mask volume with the given name.\n\n" "The alpha mask is a fog volume derived from the CSG difference" " between a level set surface with a maximum radius of the particles" " and a level set surface with a minimum radius of the particles." " This mask can be used to constrain level set smoothing so as to" " prevent surface details from being completely smoothed away.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "boundinglimit", "Bounding Limit") .setDefault(0.25) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_RESTRICTED, 1) .setTooltip( "Fraction by which to increase the maximum and decrease the minimum" " particle radii used to define the limit surfaces for the alpha mask")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usereferencevdb", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "referencevdb", "Reference VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setDocumentation( "If enabled, give output volumes the same orientation and voxel size as" " the selected VDB (see [specifying volumes|/model/volumes#group])" " and match the narrow-band width if the reference VDB is a level set.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "merge", "Merge with Reference VDB") .setDocumentation( "If a reference VDB is provided, union the new particles into it.\n\n" "This allows one to use the particles to specify only the surface detail" " and use a coarse, offset volume for the main bulk.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelsize", "Voxel Size") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setDocumentation( "The desired voxel size in world units (smaller corresponds to higher resolution)\n\n" "Particles smaller than the voxel size will not be represented in the output VDB.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useworldspace", "Use World Space for Band") .setCallbackFunc(&SOP_OpenVDB_From_Particles::convertUnitsCB) .setTooltip( "If enabled, specify the narrow-band width in world units, otherwise in voxels.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "halfbandvoxels", "Half-Band Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "Half the width of the narrow band in voxels\n" "Many level set operations require a minimum of three voxels.") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_FLT_J, "halfband", "Half-Band") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setDocumentation("Half the width of the narrow band in world units")); parms.beginExclusiveSwitcher("conversion", "Conversion"); parms.addFolder("Spheres"); parms.add(hutil::ParmFactory(PRM_FLT_J, "particlescale", "Particle Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 2.0) .setDocumentation( "Multiplier for the `pscale` point attribute, which defines" " the world space particle radius\n\n" "If the `pscale` attribute is missing, it is assumed to have a value of one.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "minradius", "Minimum Radius") .setDefault(1.5) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 2.0) .setTooltip( "Minimum radius in voxel units after scaling\n\n" "Particles smaller than this limit are ignored.\n" "Particles with radius smaller than 1.5 voxels will likely cause" " aliasing artifacts, so this should not be set lower than 1.5.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "velocitytrails", "Velocity Trails") .setTooltip( "Generate multiple spheres for each particle, trailing off" " in the direction of the particle's velocity attribute.") .setDocumentation( "Generate multiple spheres for each particle, trailing off" " in the direction opposite the particle's velocity attribute.\n\n" "The velocity attribute must be named `v` and be of type 3flt.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "velocityscale", "Velocity Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip( "When velocity trails are enabled, scale the lengths of the trails by this amount.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "trailresolution", "Trail Resolution") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.2, PRM_RANGE_UI, 2.0) .setTooltip( "When velocity trails are enabled, separate the component spheres" " of each trail by this distance.\n\n" "Use this parameter to control aliasing and limit the number" " of particle instances.")); hutil::ParmList transferParms; transferParms.add(hutil::ParmFactory(PRM_STRING, "attribute#", "Attribute") .setChoiceList(&SOP_OpenVDB_From_Particles::sPointAttrMenu) .setSpareData(&SOP_Node::theFirstInput) .setTooltip( "A point attribute from which to create a VDB\n\n" "Integer and floating-point attributes of arbitrary precision" " and tuple size are supported.")); transferParms.add(hutil::ParmFactory(PRM_STRING, "attributeGridName#", "VDB Name") .setTooltip("The name for this VDB (leave blank to use the attribute's name)")); { std::vector<std::string> items; for (int i = 0; i < openvdb::NUM_VEC_TYPES ; ++i) { items.push_back(openvdb::GridBase::vecTypeToString(openvdb::VecType(i))); items.push_back(openvdb::GridBase::vecTypeExamples(openvdb::VecType(i))); } transferParms.add(hutil::ParmFactory(PRM_ORD, "vecType#", "Vector Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("How vector values should be interpreted")); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "buildattrs", "Transfer Attributes") .setDefault(PRMoneDefaults) .setTooltip( "Generate additional VDBs that store the values of point attributes.") .setDocumentation( "Generate additional VDBs that store the values of point" " [attributes|/model/attributes].\n\n" "When __Interior Mask VDB__ output is selected, attribute values will be set" " for voxels inside particles. Otherwise, attribute values will be set" " only for voxels in the narrow band around particle surfaces.")); parms.add(hutil::ParmFactory(PRM_MULTITYPE_LIST, "attrList", "Attributes") .setMultiparms(transferParms) .setDefault(PRMzeroDefaults)); parms.addFolder("Points"); parms.add(hutil::ParmFactory(PRM_INT_J, "dilation", "Dilation") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip( "Number of morphological dilation iterations " "used to expand the active voxel region")); parms.add(hutil::ParmFactory(PRM_INT_J, "closing", "Closing") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip( "Number of morphological closing iterations " "used to fill gaps in the active voxel region")); parms.add(hutil::ParmFactory(PRM_INT_J, "smoothing", "Smoothing") .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("Number of surface smoothing iterations")); parms.endSwitcher(); hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "optionsHeading", "Options")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "gradientWidth", "Gradient width")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "customGradientWidth", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "ptnIndexGridName", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "ptnIndexGrid", "")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "transformHeading", "Transform")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "outputHeading", "Output grids")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "threading", "Threading")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "matchlevelset", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "levelSet", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "gridName", "").setDefault("surface")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "fogVolumeGridName", "") .setDefault("density")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "fogVolume", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "maskVolume", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "maskVolumeGridName", "").setDefault("mask")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maskWidth", "").setDefault(0.25)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "group", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "writeintoref", "")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "bandWidth", "").setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "bandWidthWS", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "worldSpaceUnits", "")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxWidth", "Max Half-width")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "Rmax", "Max Radius In Voxels")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "voxelSize", "") .setDefault(PRMpointOneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "dR", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "Rmin", "").setDefault(1.5)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "dV", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "dX", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "transferHeading", "Attribute transfer")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "particleHeading", "Conversion settings")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "prune", "Prune Level Set")); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "footprint", "")); // temporary parameters used in the 3.2 beta obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "dilate", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "erode", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "distancevdb", "") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "distancevdbname", "").setDefault("surface")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "fogvdb", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "fogvdbname", "").setDefault("density")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "maskvdb", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "maskvdbname", "") .setDefault("boundingvolume")); /// @todo obsoleteAttrParms hvdb::OpenVDBOpFactory("VDB from Particles", SOP_OpenVDB_From_Particles::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBFromParticles") #endif .addInput("Points to convert") .addOptionalInput("Optional reference VDB") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_From_Particles::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Convert point clouds and/or point attributes into VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node can create signed distance fields, density fields (\"fog volumes\"),\n\ and/or boolean mask volumes from point clouds, optionally treating each point\n\ as a sphere whose radius is given by its point scale attribute (`pscale`).\n\ \n\ Since the resulting VDB volumes store only the voxels around each point,\n\ they can have a much a higher effective resolution than a traditional\n\ Houdini volume.\n\ \n\ NOTE:\n\ The `pscale` attribute is set by the [Attribute|Node:pop/attribute] POP\n\ or the [Point|Node:sop/point] SOP.\n\ \n\ Points smaller than 1.5 voxels cannot be resolved and will not appear in output VDBs.\n\ \n\ The __Particle Scale__ parameter applies uniform scaling to all spheres.\n\ \n\ Connect a VDB to the second input to transfer that VDB's orientation and voxel size\n\ to the output VDBs (see the __Reference VDB__ parameter), and optionally to merge\n\ that VDB's contents into the output VDBs.\n\ \n\ @related\n\ - [Node:sop/scatter]\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [OpenVDB From Polygons|Node:sop/DW_OpenVDBFromPolygons]\n\ - [Node:sop/isooffset]\n\ - [Node:sop/vdbfromparticles]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// void SOP_OpenVDB_From_Particles::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms || obsoleteParms->allDefaults() != 0) return; PRM_Parm* parm = obsoleteParms->getParmPtr("footprint"); if (parm && !parm->isFactoryDefault()) { setInt("velocitytrails", 0, 0.0, 1); } resolveRenamedParm(*obsoleteParms, "dR", "particlescale"); resolveRenamedParm(*obsoleteParms, "Rmin", "minradius"); resolveRenamedParm(*obsoleteParms, "dV", "velocityscale"); resolveRenamedParm(*obsoleteParms, "dX", "trailresolution"); resolveRenamedParm(*obsoleteParms, "voxelSize", "voxelsize"); resolveRenamedParm(*obsoleteParms, "maskWidth", "boundinglimit"); resolveRenamedParm(*obsoleteParms, "bandWidth", "halfbandvoxels"); resolveRenamedParm(*obsoleteParms, "bandWidthWS", "halfband"); resolveRenamedParm(*obsoleteParms, "levelSet", "builddistance"); resolveRenamedParm(*obsoleteParms, "fogVolume", "buildfog"); resolveRenamedParm(*obsoleteParms, "maskVolume", "buildmask"); resolveRenamedParm(*obsoleteParms, "writeintoref", "merge"); resolveRenamedParm(*obsoleteParms, "worldSpaceUnits", "useworldspace"); resolveRenamedParm(*obsoleteParms, "gridName", "distancename"); resolveRenamedParm(*obsoleteParms, "fogVolumeGridName", "fogname"); resolveRenamedParm(*obsoleteParms, "maskVolumeGridName", "maskname"); resolveRenamedParm(*obsoleteParms, "group", "referencevdb"); resolveRenamedParm(*obsoleteParms, "distancevdb", "builddistance"); resolveRenamedParm(*obsoleteParms, "distancevdbname", "distancename"); resolveRenamedParm(*obsoleteParms, "fogvdb", "buildfog"); resolveRenamedParm(*obsoleteParms, "fogvdbname", "fogname"); resolveRenamedParm(*obsoleteParms, "maskvdb", "buildmask"); resolveRenamedParm(*obsoleteParms, "maskvdbname", "maskname"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable or disable parameters in the UI. bool SOP_OpenVDB_From_Particles::updateParmsFlags() { bool changed = false; changed |= enableParm("distancename", bool(evalInt("builddistance", 0, 0))); changed |= enableParm("fogname", bool(evalInt("buildfog", 0, 0))); changed |= enableParm("interiormaskname", bool(evalInt("buildinteriormask", 0, 0))); bool useMask = evalInt("buildmask", 0, 0) == 1; changed |= enableParm("boundinglimit", useMask); changed |= enableParm("maskname", useMask); bool useRef = ((this->nInputs() == 2) && (0 != evalInt("usereferencevdb", 0, 0))); changed |= enableParm("referencevdb", useRef); changed |= enableParm("merge", useRef); changed |= enableParm("voxelsize", !useRef); bool useWSUnits = bool(evalInt("useworldspace", 0, 0)); changed |= setVisibleState("halfbandvoxels", !useWSUnits); changed |= setVisibleState("halfband", useWSUnits); bool useTrails = evalInt("velocitytrails", 0, 0) == 1; changed |= enableParm("trailresolution", useTrails); changed |= enableParm("velocityscale", useTrails); changed |= enableParm("attrList", bool(evalInt("buildattrs", 0, 0))); // enable / disable vector type menu UT_String attrName; GA_ROAttributeRef attrRef; const GU_Detail* ptGeo = this->getInputLastGeo(0, CHgetEvalTime()); if (ptGeo) { for (int i = 1, N = static_cast<int>(evalInt("attrList", 0, 0)); i <= N; ++i) { evalStringInst("attribute#", &i, attrName, 0, 0); bool isVector = false; if (attrName.length() != 0) { attrRef = ptGeo->findPointAttribute(attrName); if (attrRef.isValid()) { const GA_Attribute *attr = attrRef.getAttribute(); if (attr) { const GA_TypeInfo typeInfo = attr->getTypeInfo(); isVector = (typeInfo == GA_TYPE_HPOINT || typeInfo == GA_TYPE_POINT || typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL); if (!isVector) { const GA_AIFTuple *tupleAIF = attr->getAIFTuple(); if (tupleAIF) isVector = tupleAIF->getTupleSize(attr) == 3; } } } } changed |= enableParmInst("vecType#", &i, isVector); changed |= setVisibleStateInst("vecType#", &i, isVector); } } return changed; } // Callback to convert from voxel to world-space units int SOP_OpenVDB_From_Particles::convertUnits() { const fpreal time = CHgetEvalTime(); float voxSize = 0.1f; // Attempt to extract the voxel size from our cache. if (const auto* cache = dynamic_cast<SOP_OpenVDB_From_Particles::Cache*>(myNodeVerbCache)) { voxSize = cache->voxelSize(); } if (evalInt("useworldspace", 0, time) != 0) { setFloat("halfband", 0, time, evalFloat("halfbandvoxels", 0, time) * voxSize); } else { setFloat("halfbandvoxels", 0, time, evalFloat("halfband", 0, time) / voxSize); } return 1; } // This implementation differs somewhat from ParmFactory::setAttrChoiceList(). void SOP_OpenVDB_From_Particles::buildAttrMenu(void* data, PRM_Name* entries, int maxEntries, const PRM_SpareData* spare, const PRM_Parm*) { if (!data || !entries || !spare) return; size_t menuIdx = 0; entries[menuIdx].setToken("v"); entries[menuIdx++].setLabel("v"); SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); if (sop == nullptr) { // terminate and quit entries[menuIdx].setToken(0); entries[menuIdx].setLabel(0); return; } const int inputIndex = [&]() { const char* s = spare->getValue("sop_input"); return s ? std::atoi(s) : 0; }(); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); size_t menuEnd(maxEntries - 2); if (gdp) { // point attribute names GA_AttributeDict::iterator iter = gdp->pointAttribs().begin(GA_SCOPE_PUBLIC); if (!iter.atEnd() && menuIdx != menuEnd) { if (menuIdx > 0) { entries[menuIdx].setToken(PRM_Name::mySeparator); entries[menuIdx++].setLabel(PRM_Name::mySeparator); } for ( ; !iter.atEnd() && menuIdx != menuEnd; ++iter) { std::ostringstream token; token << (*iter)->getName(); entries[menuIdx].setToken(token.str().c_str()); entries[menuIdx++].setLabel(token.str().c_str()); } // Special case entries[menuIdx].setToken("point_list_index"); entries[menuIdx++].setLabel("point_list_index"); } } // terminator entries[menuIdx].setToken(0); entries[menuIdx].setLabel(0); } //////////////////////////////////////// namespace { // This class implements the particle access interface required by // openvdb::tools::ParticlesToLevelSet. class ParticleList { public: using Real = openvdb::Real; using PosType = openvdb::Vec3R; // required by openvdb::tools::PointPartitioner ParticleList(const GU_Detail* gdp, Real radiusMult = 1, Real velocityMult = 1) : mGdp(gdp) , mScaleHandle(gdp, GA_ATTRIB_POINT, GEO_STD_ATTRIB_PSCALE) , mVelHandle(gdp, GA_ATTRIB_POINT, GEO_STD_ATTRIB_VELOCITY) , mHasRadius(mScaleHandle.isValid()) , mHasVelocity(mVelHandle.isValid()) , mRadiusMult(radiusMult) , mVelocityMult(velocityMult) { } // Do the particles have non-constant radius bool hasRadius() const { return mHasRadius;} // Do the particles have velocity bool hasVelocity() const { return mHasVelocity;} // Multiplier for the radius Real radiusMult() const { return mRadiusMult; } void setRadiusMult(Real mult) { mRadiusMult = mult; } // The public methods below are the only ones required // by tools::ParticlesToLevelSet size_t size() const { return mGdp->getNumPoints(); } // Position of particle in world space // This is required by ParticlesToLevelSet::rasterizeSpheres(*this,radius) void getPos(size_t n, PosType& xyz) const { const UT_Vector3 p = mGdp->getPos3(mGdp->pointOffset(n)); xyz[0] = p[0], xyz[1] = p[1], xyz[2] = p[2]; } // Position and radius of particle in world space // This is required by ParticlesToLevelSet::rasterizeSpheres(*this) void getPosRad(size_t n, PosType& xyz, Real& rad) const { UT_ASSERT(mHasRadius); const GA_Offset m = mGdp->pointOffset(n); const UT_Vector3 p = mGdp->getPos3(m); xyz[0] = p[0], xyz[1] = p[1], xyz[2] = p[2]; rad = mRadiusMult*mScaleHandle.get(m); } // Position, radius and velocity of particle in world space // This is required by ParticlesToLevelSet::rasterizeTrails void getPosRadVel(size_t n, PosType& xyz, Real& rad, PosType& vel) const { UT_ASSERT(mHasVelocity); const GA_Offset m = mGdp->pointOffset(n); const UT_Vector3 p = mGdp->getPos3(m); xyz[0] = p[0], xyz[1] = p[1], xyz[2] = p[2]; rad = mHasRadius ? mRadiusMult*mScaleHandle.get(m) : mRadiusMult; const UT_Vector3 v = mVelHandle.get(m); vel[0] = mVelocityMult*v[0], vel[1] = mVelocityMult*v[1], vel[2] = mVelocityMult*v[2]; } // Required for attribute transfer void getAtt(size_t n, openvdb::Int32& att) const { att = openvdb::Int32(n); } protected: const GU_Detail* mGdp; GA_ROHandleF mScaleHandle; GA_ROHandleV3 mVelHandle; const bool mHasRadius, mHasVelocity; Real mRadiusMult; // multiplier for radius const Real mVelocityMult; // multiplier for velocity }; // class ParticleList //////////////////////////////////////// template<class ValueType> inline void addAttributeDetails( hvdb::AttributeDetailList& attributeList, const GA_Attribute* attribute, const GA_AIFTuple* tupleAIF, const int attrTupleSize, const openvdb::Int32Grid& closestPtIdxGrid, std::string& customName, int vecType = -1) { // Defines a new type of a tree having the same hierarchy as the incoming // Int32Grid's tree but potentially a different value type. using TreeType = typename openvdb::Int32Grid::TreeType::ValueConverter<ValueType>::Type; using GridType = typename openvdb::Grid<TreeType>; if (vecType != -1) { // Vector grid // Get the attribute's default value. ValueType defValue = hvdb::evalAttrDefault<ValueType>(tupleAIF->getDefaults(attribute), 0); // Construct a new tree that matches the closestPtIdxGrid's active voxel topology. typename TreeType::Ptr tree( new TreeType(closestPtIdxGrid.tree(), defValue, openvdb::TopologyCopy())); typename GridType::Ptr grid(GridType::create(tree)); grid->setVectorType(openvdb::VecType(vecType)); attributeList.push_back(hvdb::AttributeDetailBase::Ptr( new hvdb::AttributeDetail<GridType>(grid, attribute, tupleAIF, 0, true))); if (customName.size() > 0) { attributeList[attributeList.size()-1]->name() = customName; } } else { for (int c = 0; c < attrTupleSize; ++c) { // Get the attribute's default value. ValueType defValue = hvdb::evalAttrDefault<ValueType>(tupleAIF->getDefaults(attribute), c); // Construct a new tree that matches the closestPtIdxGrid's active voxel topology. typename TreeType::Ptr tree( new TreeType(closestPtIdxGrid.tree(), defValue, openvdb::TopologyCopy())); typename GridType::Ptr grid(GridType::create(tree)); attributeList.push_back(hvdb::AttributeDetailBase::Ptr( new hvdb::AttributeDetail<GridType>(grid, attribute, tupleAIF, c))); if (customName.size() > 0) { std::ostringstream name; name << customName; if(attrTupleSize != 1) name << "_" << c; attributeList[attributeList.size()-1]->name() = name.str(); } } } } inline void transferAttributes( hvdb::AttributeDetailList& pointAttributes, const openvdb::Int32Grid& closestPtIdxGrid, openvdb::math::Transform::Ptr transform, const GU_Detail& ptGeo, GU_Detail& outputGeo) { // Threaded attribute transfer. hvdb::PointAttrTransfer transferOp(pointAttributes, closestPtIdxGrid, ptGeo); transferOp.runParallel(); // Construct and add VDB primitives to the gdp for (size_t i = 0, N = pointAttributes.size(); i < N; ++i) { hvdb::AttributeDetailBase::Ptr& attrDetail = pointAttributes[i]; std::ostringstream gridName; gridName << attrDetail->name(); attrDetail->grid()->setTransform(transform); hvdb::createVdbPrimitive(outputGeo, attrDetail->grid(), gridName.str().c_str()); } } template<typename AttrT, typename GridT> inline openvdb::Int32Grid::Ptr convertImpl( const ParticleList& paList, GridT& outGrid, float minRadius, float maxRadius, bool velocityTrails, float trailRes, hvdb::Interrupter& boss, size_t& numTooSmall, size_t& numTooLarge) { openvdb::tools::ParticlesToLevelSet<GridT, AttrT, hvdb::Interrupter> raster(outGrid, &boss); raster.setRmin(minRadius); raster.setRmax(maxRadius); if (velocityTrails) { raster.rasterizeTrails(paList, trailRes); } else if (paList.hasRadius()) { raster.rasterizeSpheres(paList); } else { raster.rasterizeSpheres(paList, paList.radiusMult()); } // Always prune to produce a valid narrow-band level set. raster.finalize(/*prune=*/true); numTooSmall = raster.getMinCount(); numTooLarge = raster.getMaxCount(); return openvdb::gridPtrCast<openvdb::Int32Grid>(raster.attributeGrid()); } inline std::string getIgnoredParticleWarning(size_t numTooSmall, size_t numTooLarge) { std::string mesg; if (numTooSmall || numTooLarge) { std::ostringstream ostr; ostr << "Ignored "; if (numTooSmall) { ostr << numTooSmall << " small"; } if (numTooSmall && numTooLarge) { ostr << " and "; } if (numTooLarge) { ostr << numTooLarge << " large"; } ostr << " particles (hint: change Minimum Radius in Voxels)"; mesg = ostr.str(); } return mesg; } } // anonymous namespace void SOP_OpenVDB_From_Particles::Cache::convert( fpreal time, ParticleList& paList, openvdb::FloatGrid::Ptr sdfGrid, openvdb::BoolGrid::Ptr maskGrid, hvdb::Interrupter& boss) { using NoAttrs = void; const bool velocityTrails = paList.hasVelocity() && (0 != evalInt("velocitytrails", 0, time)); const float minRadius = float(evalFloat("minradius", 0, time)), maxRadius = 1e15f, trailRes = (!velocityTrails ? 1.f : float(evalFloat("trailresolution", 0, time))); size_t numTooSmall = 0, numTooLarge = 0; if (sdfGrid) { convertImpl<NoAttrs>(paList, *sdfGrid, minRadius, maxRadius, velocityTrails, trailRes, boss, numTooSmall, numTooLarge); } if (maskGrid) { convertImpl<NoAttrs>(paList, *maskGrid, minRadius, maxRadius, velocityTrails, trailRes, boss, numTooSmall, numTooLarge); } { const auto mesg = getIgnoredParticleWarning(numTooSmall, numTooLarge); if (!mesg.empty()) { addWarning(SOP_MESSAGE, mesg.c_str()); } } } void SOP_OpenVDB_From_Particles::Cache::convertWithAttributes( fpreal time, const GU_Detail& ptGeo, ParticleList& paList, openvdb::FloatGrid::Ptr sdfGrid, openvdb::BoolGrid::Ptr maskGrid, hvdb::Interrupter& boss) { const bool velocityTrails = paList.hasVelocity() && (0 != evalInt("velocitytrails", 0, time)); const float minRadius = float(evalFloat("minradius", 0, time)), maxRadius = 1e15f, trailRes = (!velocityTrails ? 1.f : float(evalFloat("trailresolution", 0, time))); openvdb::Int32Grid::Ptr closestPtIdxGrid; size_t numTooSmall = 0, numTooLarge = 0; if (sdfGrid) { closestPtIdxGrid = convertImpl<openvdb::Int32>(paList, *sdfGrid, minRadius, maxRadius, velocityTrails, trailRes, boss, numTooSmall, numTooLarge); } if (maskGrid) { if (closestPtIdxGrid) { // For backward compatibility, the index grid associated with the SDF // takes precedence over one associated with the mask. using NoAttrs = void; convertImpl<NoAttrs>(paList, *maskGrid, minRadius, maxRadius, velocityTrails, trailRes, boss, numTooSmall, numTooLarge); } else { closestPtIdxGrid = convertImpl<openvdb::Int32>(paList, *maskGrid, minRadius, maxRadius, velocityTrails, trailRes, boss, numTooSmall, numTooLarge); } } { const auto mesg = getIgnoredParticleWarning(numTooSmall, numTooLarge); if (!mesg.empty()) { addWarning(SOP_MESSAGE, mesg.c_str()); } } if (!closestPtIdxGrid || boss.wasInterrupted()) return; // Transfer point attributes. if ((0 != evalInt("buildattrs", 0, time)) && (evalInt("attrList", 0, time) > 0)) { hvdb::AttributeDetailList pointAttributes; int closestPointIndexInstance = constructGenericAtttributeList(time, pointAttributes, ptGeo, *closestPtIdxGrid); auto transform = (sdfGrid ? sdfGrid->transformPtr() : maskGrid->transformPtr()); transferAttributes(pointAttributes, *closestPtIdxGrid, transform, ptGeo, *gdp); // Export the closest-point index grid. if (closestPointIndexInstance > -1) { UT_String gridNameStr; evalStringInst("attributeGridName#", &closestPointIndexInstance, gridNameStr, 0, time); if (gridNameStr.length() == 0) gridNameStr = "point_list_index"; hvdb::createVdbPrimitive(*gdp, closestPtIdxGrid, gridNameStr.toStdString().c_str()); } } } // Helper method for point attribute transfer int SOP_OpenVDB_From_Particles::Cache::constructGenericAtttributeList( fpreal time, hvdb::AttributeDetailList &pointAttributes, const GU_Detail& ptGeo, const openvdb::Int32Grid& closestPtIdxGrid) { UT_String attrName; GA_ROAttributeRef attrRef; GA_Range range; int closestPointIndexInstance = -1; // for each selected attribute for (int i = 1, N = static_cast<int>(evalInt("attrList", 0, time)); i <= N; ++i) { evalStringInst("attribute#", &i, attrName, 0, time); if (attrName.length() == 0) continue; if (attrName == "point_list_index") { // The closest point index grid is a special case, // the converter has already generated it for us. closestPointIndexInstance = i; continue; } attrRef = ptGeo.findPointAttribute(attrName); if (!attrRef.isValid()) { std::ostringstream ostr; ostr << "Skipped unrecognized attribute: '"<< attrName << "'"; addWarning(SOP_MESSAGE, ostr.str().c_str()); continue; } evalStringInst("attributeGridName#", &i, attrName, 0, time); std::string customName = attrName.toStdString(); int vecType = static_cast<int>(evalIntInst("vecType#", &i, 0, time)); const GA_Attribute *attr = attrRef.getAttribute(); if (!attr) { addWarning(SOP_MESSAGE, "Skipped unrecognized attribute type"); continue; } const GA_AIFTuple *tupleAIF = attr->getAIFTuple(); if (!tupleAIF) { addWarning(SOP_MESSAGE, "Skipped unrecognized attribute type"); continue; } const GA_Storage attrStorage = tupleAIF->getStorage(attr); const int attrTupleSize = tupleAIF->getTupleSize(attr); const GA_TypeInfo typeInfo = attr->getTypeInfo(); const bool interpertAsVector = (typeInfo == GA_TYPE_HPOINT || typeInfo == GA_TYPE_POINT || typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL); switch (attrStorage) { case GA_STORE_INT16: case GA_STORE_INT32: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3i>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName, vecType); } else { addAttributeDetails<openvdb::Int32>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName); } break; case GA_STORE_INT64: addAttributeDetails<openvdb::Int64>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName); break; case GA_STORE_REAL16: case GA_STORE_REAL32: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3s>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName, vecType); } else { addAttributeDetails<float>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName); } break; case GA_STORE_REAL64: if (interpertAsVector || attrTupleSize == 3) { addAttributeDetails<openvdb::Vec3d>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName, vecType); } else { addAttributeDetails<double>(pointAttributes, attr, tupleAIF, attrTupleSize, closestPtIdxGrid, customName); } break; default: addWarning(SOP_MESSAGE, "Skipped unrecognized attribute type"); break; } } return closestPointIndexInstance; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_From_Particles::Cache::cookVDBSop(OP_Context& context) { try { hvdb::Interrupter boss("Creating VDBs from particles"); const GU_Detail* ptGeo = inputGeo(0, context); const GU_Detail* refGeo = inputGeo(1, context); const fpreal time = context.getTime(); mVoxelSize = float(evalFloat("voxelsize", 0, time)); if (mVoxelSize < 1e-5) { std::ostringstream ostr; ostr << "The voxel size ("<< mVoxelSize << ") is too small."; addError(SOP_MESSAGE, ostr.str().c_str()); return error(); } const bool outputLevelSetGrid = (0 != evalInt("builddistance", 0, time)), outputFogVolumeGrid = (0 != evalInt("buildfog", 0, time)), outputInteriorMaskGrid = (0 != evalInt("buildinteriormask", 0, time)), outputBoundingMaskGrid = (0 != evalInt("buildmask", 0, time)), outputAttributeGrid = ((0 != evalInt("buildattrs", 0, time)) && (evalInt("attrList", 0, time) > 0)), needLeveLSet = (outputLevelSetGrid || outputFogVolumeGrid || outputBoundingMaskGrid || (outputAttributeGrid && !outputInteriorMaskGrid)); if (!outputFogVolumeGrid && !outputLevelSetGrid && !outputAttributeGrid && !outputInteriorMaskGrid) { addWarning(SOP_MESSAGE, "No output selected"); return error(); } ParticleList paList(ptGeo, evalFloat("particlescale", 0, time), evalFloat("velocityscale", 0, time)); float background = 0.0; if (evalInt("useworldspace", 0, time) != 0) { background = float(evalFloat("halfband", 0, time)); } else { background = mVoxelSize * float(evalFloat("halfbandvoxels", 0, time)); } auto transform = openvdb::math::Transform::createLinearTransform(mVoxelSize); openvdb::FloatGrid::Ptr sdfGrid; openvdb::BoolGrid::Ptr maskGrid; openvdb::MaskGrid::Ptr pointMaskGrid; // Optionally copy the reference grid and/or its transform. hvdb::GridCPtr refGrid; if (refGeo && (0 != evalInt("usereferencevdb", 0, time))) { const auto refName = evalStdString("referencevdb", time); hvdb::VdbPrimCIterator it(refGeo, matchGroup(*refGeo, refName)); if (const hvdb::GU_PrimVDB* refPrim = (it ? *it : nullptr)) { refGrid = refPrim->getGridPtr(); } else { addError(SOP_MESSAGE, ("No reference VDB matching \"" + refName + "\" was found.").c_str()); return error(); } } if (refGrid) { transform = refGrid->transform().copy(); mVoxelSize = static_cast<float>(transform->voxelSize()[0]); // Match the narrow band width. const bool isLevelSet = ((refGrid->getGridClass() == openvdb::GRID_LEVEL_SET) && refGrid->isType<openvdb::FloatGrid>()); if (isLevelSet) { background = openvdb::gridConstPtrCast<openvdb::FloatGrid>(refGrid)->background(); addMessage(SOP_MESSAGE, "Matching the reference level set's half-band width " " and background value. The Half Band setting will be ignored."); } if (evalInt("merge", 0, time) != 0) { if (needLeveLSet && isLevelSet) { sdfGrid = openvdb::gridPtrCast<openvdb::FloatGrid>(refGrid->deepCopyGrid()); } if (outputInteriorMaskGrid && refGrid->isType<openvdb::BoolGrid>()) { maskGrid = openvdb::gridPtrCast<openvdb::BoolGrid>(refGrid->deepCopyGrid()); } if (!sdfGrid && !maskGrid) { if (needLeveLSet) { addWarning(SOP_MESSAGE, "Can only merge with a level set reference VDB."); } else { addWarning(SOP_MESSAGE, "Can only merge with a boolean reference VDB."); } } } } if (boss.wasInterrupted()) { return error(); } if (needLeveLSet) { if (!sdfGrid) { sdfGrid = openvdb::FloatGrid::create(background); } sdfGrid->setGridClass(openvdb::GRID_LEVEL_SET); sdfGrid->setTransform(transform); } if (outputInteriorMaskGrid) { if (!maskGrid) { maskGrid = openvdb::BoolGrid::create(); } maskGrid->setTransform(transform); } // Perform the particle conversion. const bool doSphereConversion = evalInt("conversion", 0, time) == 0; // Point topology conversion settings int dilation = static_cast<int>(evalInt("dilation", 0, time)); int closing = static_cast<int>(evalInt("closing", 0, time)); int smoothing = static_cast<int>(evalInt("smoothing", 0, time)); int bandWidth = int(std::ceil(background / mVoxelSize)); if (doSphereConversion) { if (evalInt("velocitytrails", 0, time) != 0 && !paList.hasVelocity()) { addWarning(SOP_MESSAGE, "Velocity trails require a velocity point attribute" " named 'v' of type 3fv."); } if (outputAttributeGrid) { this->convertWithAttributes(time, *ptGeo, paList, sdfGrid, maskGrid, boss); } else { this->convert(time, paList, sdfGrid, maskGrid, boss); } } else { pointMaskGrid = GUvdbCreatePointMaskGrid(*transform, *ptGeo); if (sdfGrid) { openvdb::FloatGrid::Ptr pointSdfGrid = openvdb::tools::topologyToLevelSet( *pointMaskGrid, bandWidth, closing, dilation, smoothing, &boss); openvdb::tools::csgUnion(*sdfGrid, *pointSdfGrid); } if (maskGrid) { openvdb::BoolTree::Ptr maskTree(new openvdb::BoolTree(pointMaskGrid->tree(), /*off=*/false, /*on=*/true, openvdb::TopologyCopy())); if (dilation > 0) { openvdb::tools::dilateActiveValues(*maskTree, dilation); } maskGrid->setTree(maskTree); } } if (outputBoundingMaskGrid) { openvdb::Real radiusScale = paList.radiusMult(); openvdb::Real offset = openvdb::Real(evalFloat("boundinglimit", 0, time)); offset = std::min(std::max(offset, 0.0), 1.0); // clamp to zero-one range. openvdb::FloatGrid::Ptr maxGrid = openvdb::FloatGrid::create(background); maxGrid->setGridClass(openvdb::GRID_LEVEL_SET); maxGrid->setTransform(transform->copy()); openvdb::FloatGrid::Ptr minGrid = openvdb::FloatGrid::create(background); minGrid->setGridClass(openvdb::GRID_LEVEL_SET); minGrid->setTransform(transform->copy()); if (offset > 0.0f) { if (doSphereConversion) { paList.setRadiusMult(radiusScale * (1.0 + offset)); this->convert(time, paList, maxGrid, nullptr, boss); paList.setRadiusMult(radiusScale * (1.0 - offset)); this->convert(time, paList, minGrid, nullptr, boss); } else { if (!pointMaskGrid) { pointMaskGrid = GUvdbCreatePointMaskGrid(*transform, *ptGeo); } openvdb::Real dx = openvdb::Real(std::min(dilation, 1)); int increase = int(std::ceil(dx * (1.0 + offset))); int decrease = int(dx * (1.0 - offset)); maxGrid = openvdb::tools::topologyToLevelSet( *pointMaskGrid, bandWidth, closing, increase, smoothing, &boss); minGrid = openvdb::tools::topologyToLevelSet( *pointMaskGrid, bandWidth, closing, decrease, smoothing, &boss); } } openvdb::tools::csgDifference(*maxGrid, *minGrid); openvdb::tools::sdfToFogVolume(*maxGrid); maxGrid->setName(evalStdString("maskname", time)); hvdb::createVdbPrimitive(*gdp, maxGrid); } if (outputLevelSetGrid && sdfGrid) { sdfGrid->setName(evalStdString("distancename", time)); hvdb::createVdbPrimitive(*gdp, sdfGrid); } if (outputInteriorMaskGrid && maskGrid) { maskGrid->setName(evalStdString("interiormaskname", time)); hvdb::createVdbPrimitive(*gdp, maskGrid); } if (outputFogVolumeGrid && sdfGrid) { // Only duplicate the output grid if both distance and fog volume grids are exported. auto fogGrid = (!outputLevelSetGrid ? sdfGrid : sdfGrid->deepCopy()); openvdb::tools::sdfToFogVolume(*fogGrid); fogGrid->setName(evalStdString("fogname", time)); hvdb::createVdbPrimitive(*gdp, fogGrid); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
50,327
C++
39.006359
99
0.625887
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/ParmFactory.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file ParmFactory.h /// @author FX R&D OpenVDB team /// /// @brief A collection of factory methods and helper functions /// to simplify Houdini plugin development and maintenance. #ifndef HOUDINI_UTILS_PARM_FACTORY_HAS_BEEN_INCLUDED #define HOUDINI_UTILS_PARM_FACTORY_HAS_BEEN_INCLUDED #include <GA/GA_Attribute.h> #include <OP/OP_AutoLockInputs.h> #include <OP/OP_Operator.h> #include <PRM/PRM_Include.h> #include <PRM/PRM_SpareData.h> #include <SOP/SOP_Node.h> #include <SOP/SOP_NodeVerb.h> #if defined(PRODDEV_BUILD) || defined(DWREAL_IS_DOUBLE) // OPENVDB_HOUDINI_API, which has no meaning in a DWA build environment but // must at least exist, is normally defined by including openvdb/Platform.h. // For DWA builds (i.e., if either PRODDEV_BUILD or DWREAL_IS_DOUBLE exists), // that introduces an unwanted and unnecessary library dependency. #ifndef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #else #include <openvdb/version.h> #endif #include <exception> #include <functional> #include <map> #include <memory> #include <string> #include <vector> #ifdef SESI_OPENVDB #ifdef OPENVDB_HOUDINI_API #undef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #endif class GU_Detail; class OP_OperatorTable; class PRM_Parm; namespace houdini_utils { class ParmFactory; using SpareDataMap = std::map<std::string, std::string>; /// @brief Return the spare data associated with the given operator. /// @details Only operators created with OpFactory will have spare data. /// @sa @link addOperatorSpareData() addOperatorSpareData@endlink, /// @link OpFactory::addSpareData() OpFactory::addSpareData@endlink const SpareDataMap& getOperatorSpareData(const OP_Operator&); /// @brief Specify (@e key, @e value) pairs of spare data for the given operator. /// @details For existing keys, the new value replaces the old one. /// @throw std::runtime_error if the given operator does not support spare data /// (only operators created with OpFactory will have spare data) /// @sa @link getOperatorSpareData() getOperatorSpareData@endlink, /// @link OpFactory::addSpareData() OpFactory::addSpareData@endlink void addOperatorSpareData(OP_Operator&, const SpareDataMap&); /// @brief Parameter template list that is always terminated. class OPENVDB_HOUDINI_API ParmList { public: using PrmTemplateVec = std::vector<PRM_Template>; ParmList() {} /// @brief Return @c true if this list contains no parameters. bool empty() const { return mParmVec.empty(); } /// @brief Return the number of parameters in this list. /// @note Some parameter types have parameter lists of their own. /// Those nested lists are not included in this count. size_t size() const { return mParmVec.size(); } /// @brief Remove all parameters from this list. void clear() { mParmVec.clear(); mSwitchers.clear(); } /// @{ /// @brief Add a parameter to this list. ParmList& add(const PRM_Template&); ParmList& add(const ParmFactory&); /// @} /// @brief Begin a collection of tabs. /// @details Tabs may be nested. ParmList& beginSwitcher(const std::string& token, const std::string& label = ""); /// @brief Begin an exclusive collection of tabs. Only one tab is "active" at a time. /// @details Tabs may be nested. ParmList& beginExclusiveSwitcher(const std::string& token, const std::string& label = ""); /// @brief End a collection of tabs. /// @throw std::runtime_error if not inside a switcher or if no tabs /// were added to the switcher ParmList& endSwitcher(); /// @brief Add a tab with the given label to the current tab collection. /// @details Parameters subsequently added to this ParmList until the next /// addFolder() or endSwitcher() call will be displayed on the tab. /// @throw std::runtime_error if not inside a switcher ParmList& addFolder(const std::string& label); /// Return a heap-allocated copy of this list's array of parameters. PRM_Template* get() const; private: struct SwitcherInfo { size_t parmIdx; std::vector<PRM_Default> folders; bool exclusive; }; using SwitcherStack = std::vector<SwitcherInfo>; void incFolderParmCount(); SwitcherInfo* getCurrentSwitcher(); PrmTemplateVec mParmVec; SwitcherStack mSwitchers; }; // class ParmList //////////////////////////////////////// /// @class ParmFactory /// @brief Helper class to simplify construction of PRM_Templates and /// dynamic user interfaces. /// /// @par Example /// @code /// houdini_utils::ParmList parms; /// /// parms.add(houdini_utils::ParmFactory(PRM_STRING, "group", "Group") /// .setHelpText("Specify a subset of the input VDB grids to be processed.") /// .setChoiceList(&houdini_utils::PrimGroupMenu)); /// /// parms.add(houdini_utils::ParmFactory(PRM_FLT_J, "tolerance", "Pruning Tolerance") /// .setDefault(PRMzeroDefaults) /// .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1)); /// @endcode class OPENVDB_HOUDINI_API ParmFactory { public: ParmFactory(PRM_Type, const std::string& token, const std::string& label); ParmFactory(PRM_MultiType, const std::string& token, const std::string& label); // Settings ParmFactory& setCallbackFunc(const PRM_Callback&); /// Specify a menu of values for this parameter. ParmFactory& setChoiceList(const PRM_ChoiceList*); /// @brief Specify a menu type and a list of token, label, token, label,... pairs /// for this parameter. /// @param typ specifies the menu behavior (toggle, replace, etc.) /// @param items a list of token, label, token, label,... string pairs ParmFactory& setChoiceListItems(PRM_ChoiceListType typ, const std::vector<std::string>& items); /// @brief Specify a menu type and a list of token, label, token, label,... pairs /// for this parameter. /// @param typ specifies the menu behavior (toggle, replace, etc.) /// @param items a list of token, label, token, label,... string pairs /// @note The @a items array must be null-terminated. ParmFactory& setChoiceListItems(PRM_ChoiceListType typ, const char* const* items); /// @brief Specify a menu of primitive group names for this parameter. /// /// @param inputIndex the zero-based index of the input from which to get primitive groups /// @param typ the menu behavior (toggle, replace, etc.) /// /// @details Calling this method with the default (toggle) behavior is equivalent /// to calling @c setChoiceList(&houdini_utils::PrimGroupMenuInput1), /// @c setChoiceList(&houdini_utils::PrimGroupMenuInput2), etc. /// /// @par Example /// To limit the user to choosing a single primitive group, replace /// @code /// parms.add(houdini_utils::ParmFactory(PRM_STRING, "reference", "Reference") /// .setChoiceList(&houdini_utils::PrimGroupMenuInput2); /// @endcode /// with /// @code /// parms.add(houdini_utils::ParmFactory(PRM_STRING, "reference", "Reference") /// .setGroupChoiceList(1, PRM_CHOICELIST_REPLACE); // input index is zero based /// @endcode ParmFactory& setGroupChoiceList(size_t inputIndex, PRM_ChoiceListType typ = PRM_CHOICELIST_TOGGLE); /// @brief Functor to filter a list of attributes from a SOP's input /// @details Arguments to the functor are an attribute to be filtered /// and the parameter and SOP for which the filter is being called. /// The functor should return @c true for attributes that should be added /// to the list and @c false for attributes that should be ignored. using AttrFilterFunc = std::function<bool (const GA_Attribute&, const PRM_Parm&, const SOP_Node&)>; /// @brief Specify a menu of attribute names for this parameter. /// /// @param inputIndex the zero-based index of the input from which to get attributes /// @param attrOwner the class of attribute with which to populate the menu: /// either per-vertex (@c GA_ATTRIB_VERTEX), per-point (@c GA_ATTRIB_POINT), /// per-primitive (@c GA_ATTRIB_PRIMITIVE), global (@c GA_ATTRIB_GLOBAL), /// or all of the above (@c GA_ATTRIB_INVALID or any other value) /// @param typ the menu behavior (toggle, replace, etc.) /// @param attrFilter an optional filter functor that returns @c true for each /// attribute that should appear in the menu; the functor will be moved, /// if possible, or else copied /// /// @note This method is supported only for SOPs. /// /// @par Example /// Create a menu that allows multiple selection from among all the string attributes /// on a SOP's first input: /// @code /// houdini_utils::ParmList parms; /// parms.add(houdini_utils::ParmFactory(PRM_STRING, "stringattr", "String Attribute") /// .setAttrChoiceList(/*input=*/0, GA_ATTRIB_INVALID, PRM_CHOICELIST_TOGGLE, /// [](const GA_Attribute& attr, const PRM_Parm&, const SOP_Node&) { /// return (attr.getStorageClass() == GA_STORECLASS_STRING); /// })); /// @endcode ParmFactory& setAttrChoiceList(size_t inputIndex, GA_AttributeOwner attrOwner, PRM_ChoiceListType typ = PRM_CHOICELIST_TOGGLE, AttrFilterFunc attrFilter = AttrFilterFunc{}); #if defined(GCC3) #define IS_DEPRECATED __attribute__ ((deprecated)) #elif defined(_MSC_VER) #define IS_DEPRECATED __declspec(deprecated) #else #define IS_DEPRECATED #endif /// @brief Specify a menu type and either a list of menu item labels or a list of /// token, label, token, label,... pairs for this parameter. /// @param typ specifies the menu behavior (toggle, replace, etc.) /// @param items a list of menu item labels or token, label, token, label,... pairs /// @param paired if @c false, treat all the elements of @a items as labels and assign /// them numeric tokens starting from zero; otherwise, treat the elements of @a items /// as token, label, token, label,... pairs /// @deprecated Use setChoiceListItems() instead. Using unpaired items may mean /// less typing now, but it prevents you from reordering or deleting entries later. IS_DEPRECATED ParmFactory& setChoiceList(PRM_ChoiceListType typ, const std::vector<std::string>& items, bool paired = false); /// @brief Specify a menu type and either a list of menu item labels or a list of /// token, label, token, label,... pairs for this parameter. /// @param typ specifies the menu behavior (toggle, replace, etc.) /// @param items a list of menu item labels or token, label, token, label,... pairs /// @param paired if @c false, treat all the elements of @a items as labels and assign /// them numeric tokens starting from zero; otherwise, treat the elements of @a items /// as token, label, token, label,... pairs /// @note The @a items array must be null-terminated. /// @deprecated Use setChoiceListItems() instead. Using unpaired items may mean /// less typing now, but it prevents you from reordering or deleting entries later. IS_DEPRECATED ParmFactory& setChoiceList(PRM_ChoiceListType typ, const char* const* items, bool paired = false); #undef IS_DEPRECATED ParmFactory& setConditional(const PRM_ConditionalBase*); /// @brief Specify a default value for this parameter. /// @details If the string is null, the floating-point value will be used /// (but rounded if this parameter is integer-valued). /// @note The string pointer must not point to a temporary. ParmFactory& setDefault(fpreal, const char* = nullptr, CH_StringMeaning = CH_STRING_LITERAL); /// @brief Specify a default string value for this parameter. ParmFactory& setDefault(const std::string&, CH_StringMeaning = CH_STRING_LITERAL); /// @brief Specify default numeric values for the vector elements of this parameter /// (assuming its vector size is > 1). /// @details Floating-point values will be rounded if this parameter is integer-valued. ParmFactory& setDefault(const std::vector<fpreal>&); /// @brief Specify default values for the vector elements of this parameter /// (assuming its vector size is > 1). ParmFactory& setDefault(const std::vector<PRM_Default>&); /// Specify a default value or values for this parameter. ParmFactory& setDefault(const PRM_Default*); /// @brief Specify a plain text tooltip for this parameter. /// @details This method is equivalent to setTooltip() ParmFactory& setHelpText(const char*); /// @brief Specify a plain text tooltip for this parameter. /// @details This method is equivalent to setHelpText() ParmFactory& setTooltip(const char*); /// @brief Add documentation for this parameter. /// @details Pass a null pointer or an empty string to inhibit /// the generation of documentation for this parameter. /// @details The text is parsed as wiki markup. /// See the Houdini <A HREF="http://www.sidefx.com/docs/houdini/help/format"> /// Wiki Markup Reference</A> for the syntax. ParmFactory& setDocumentation(const char*); ParmFactory& setParmGroup(int); /// Specify a range for this parameter's values. ParmFactory& setRange( PRM_RangeFlag minFlag, fpreal minVal, PRM_RangeFlag maxFlag, fpreal maxVal); /// @brief Specify range for the values of this parameter's vector elements /// (assuming its vector size is > 1). ParmFactory& setRange(const std::vector<PRM_Range>&); /// Specify a range or ranges for this parameter's values. ParmFactory& setRange(const PRM_Range*); /// Specify (@e key, @e value) pairs of spare data for this parameter. ParmFactory& setSpareData(const SpareDataMap&); /// Specify spare data for this parameter. ParmFactory& setSpareData(const PRM_SpareData*); /// @brief Specify the list of parameters for each instance of a multiparm. /// @note This setting is ignored for non-multiparm parameters. /// @note Parameter name tokens should include a '#' character. ParmFactory& setMultiparms(const ParmList&); /// Specify an extended type for this parameter. ParmFactory& setTypeExtended(PRM_TypeExtended); /// @brief Specify the number of vector elements for this parameter. /// @details (The default vector size is one element.) ParmFactory& setVectorSize(int); /// @brief Mark this parameter as hidden from the UI. /// @note Marking parameters as obsolete is preferable to making them invisible as changing /// invisible parameter values will still trigger a re-cook, however this is not possible /// when using multi-parms. ParmFactory& setInvisible(); /// Construct and return the parameter template. PRM_Template get() const; private: struct Impl; std::shared_ptr<Impl> mImpl; // For internal use only, and soon to be removed: ParmFactory& doSetChoiceList(PRM_ChoiceListType, const std::vector<std::string>&, bool); ParmFactory& doSetChoiceList(PRM_ChoiceListType, const char* const* items, bool); }; // class ParmFactory //////////////////////////////////////// class OpPolicy; using OpPolicyPtr = std::shared_ptr<OpPolicy>; /// @brief Helper class to simplify operator registration /// /// @par Example /// @code /// void /// newPopOperator(OP_OperatorTable* table) /// { /// houdini_utils::ParmList parms; /// /// parms.add(houdini_utils::ParmFactory(PRM_STRING, "group", "Group") /// .setHelpText("Specify a subset of the input VDB grids to be processed.") /// .setChoiceList(&houdini_utils::PrimGroupMenu)); /// /// parms.add(...); /// /// ... /// /// houdini_utils::OpFactory(MyOpPolicy(), My Node", /// POP_DW_MyNode::factory, parms, *table, houdini_utils::OpFactory::POP) /// .addInput("Input geometry") // input 0 (required) /// .addOptionalInput("Reference geometry"); // input 1 (optional) /// } /// @endcode class OPENVDB_HOUDINI_API OpFactory { public: enum OpFlavor { SOP, POP, ROP, VOP, HDA }; /// @brief Return "SOP" for the SOP flavor, "POP" for the POP flavor, etc. /// @details Useful in OpPolicy classes for constructing type and icon names. static std::string flavorToString(OpFlavor); /// @brief Construct a factory that on destruction registers a new operator type. /// @param english the operator's UI name, as it should appear in menus /// @param ctor a factory function that creates operators of this type /// @param parms the parameter template list for operators of this type /// @param table the registry to which to add this operator type /// @param flavor the operator's class (SOP, POP, etc.) /// @details @c OpPolicyType specifies the type of OpPolicy to be used to control /// the factory's behavior. The (unused) @c OpPolicyType argument is required /// to enable the compiler to infer the type of the template argument /// (there is no other way to invoke a templated constructor). template<typename OpPolicyType> OpFactory(const OpPolicyType& /*unused*/, const std::string& english, OP_Constructor ctor, ParmList& parms, OP_OperatorTable& table, OpFlavor flavor = SOP) { this->init(OpPolicyPtr(new OpPolicyType), english, ctor, parms, table, flavor); } /// @note Factories initialized with this constructor use the DWAOpPolicy. OpFactory(const std::string& english, OP_Constructor ctor, ParmList& parms, OP_OperatorTable& table, OpFlavor flavor = SOP); /// Register the operator. #if OPENVDB_ABI_VERSION_NUMBER >= 7 virtual ~OpFactory(); #else ~OpFactory(); #endif OpFactory(const OpFactory&) = delete; OpFactory& operator=(const OpFactory&) = delete; /// @brief Return the new operator's flavor (SOP, POP, etc.). /// @details This accessor is mainly for use by OpPolicy objects. OpFlavor flavor() const; /// @brief Return the new operator's flavor as a string ("SOP", "POP", etc.). /// @details This accessor is mainly for use by OpPolicy objects. std::string flavorString() const; /// @brief Return the new operator's type name. /// @details This accessor is mainly for use by OpPolicy objects. const std::string& name() const; /// @brief Return the new operator's UI name. /// @details This accessor is mainly for use by OpPolicy objects. const std::string& english() const; /// @brief Return the new operator's icon name. /// @details This accessor is mainly for use by OpPolicy objects. const std::string& iconName() const; /// @brief Return the new operator's help URL. /// @details This accessor is mainly for use by OpPolicy objects. /// @note A help URL takes precedence over help text. /// @sa helpText(), setHelpText() const std::string& helpURL() const; /// @brief Return the new operator's documentation. /// @note If the help URL is nonempty, the URL takes precedence over any help text. /// @sa helpURL(), setDocumentation() const std::string& documentation() const; /// @brief Return the operator table with which this factory is associated. /// @details This accessor is mainly for use by OpPolicy objects. const OP_OperatorTable& table() const; /// @brief Construct a type name for this operator from the given English name /// and add it as an alias. /// @details For backward compatibility when an operator needs to be renamed, /// add the old name as an alias. OpFactory& addAlias(const std::string& english); /// @brief Add an alias for this operator. /// @details For backward compatibility when an operator needs to be renamed, /// add the old name as an alias. /// @note This variant takes an operator type name rather than an English name. OpFactory& addAliasVerbatim(const std::string& name); /// @brief Add documentation for this operator. /// @details The text is parsed as wiki markup. /// @note If this factory's OpPolicy specifies a help URL, that URL /// takes precedence over documentation supplied with this method. OpFactory& setDocumentation(const std::string&); /// Add a required input with the given name. OpFactory& addInput(const std::string& name); /// Add an optional input with the given name. OpFactory& addOptionalInput(const std::string& name); /// @brief Set the maximum number of inputs allowed by this operator. /// @note It is only necessary to set this limit if there are inputs /// that have not been named with addInput() or addOptionalInput(). OpFactory& setMaxInputs(unsigned = 9999); /// Specify obsolete parameters to this operator. OpFactory& setObsoleteParms(const ParmList&); /// Add one or more local variables to this operator. OpFactory& setLocalVariables(CH_LocalVariable*); OpFactory& setFlags(unsigned); OpFactory& setInternalName(const std::string& name); OpFactory& setOperatorTable(const std::string& name); /// @brief Functor that returns newly-allocated node caches /// for instances of this operator /// @details A node cache encapsulates a SOP's cooking logic for thread safety. /// Input geometry and parameter values are baked into the cache. using CacheAllocFunc = std::function<SOP_NodeCache* (void)>; /// @brief Register this operator as a /// <A HREF="http://www.sidefx.com/docs/houdini/model/compile">compilable</A>&nbsp;SOP. /// @details "Verbifying" a SOP separates its input and parameter management /// from its cooking logic so that cooking can be safely threaded. /// @param cookMode how to initialize the output detail /// @param allocator a node cache allocator for instances of this operator /// @throw std::runtime_error if this operator is not a SOP /// @throw std::invalid_argument if @a allocator is empty OpFactory& setVerb(SOP_NodeVerb::CookMode cookMode, const CacheAllocFunc& allocator); /// @brief Mark this node as hidden from the UI tab menu. /// @details This is equivalent to using the hscript ophide method. OpFactory& setInvisible(); /// @brief Specify (@e key, @e value) pairs of spare data for this operator. /// @details If a key already exists, its corresponding value will be /// overwritten with the new value. /// @sa @link addOperatorSpareData() addOperatorSpareData@endlink, /// @link getOperatorSpareData() getOperatorSpareData@endlink OpFactory& addSpareData(const SpareDataMap&); protected: /// @brief Return the operator table with which this factory is associated. /// @details This accessor is mainly for use by derived OpFactory classes. OP_OperatorTable& table(); private: void init(OpPolicyPtr, const std::string& english, OP_Constructor, ParmList&, OP_OperatorTable&, OpFlavor); struct Impl; std::shared_ptr<Impl> mImpl; }; // class OpFactory //////////////////////////////////////// /// @brief An OpPolicy customizes the behavior of an OpFactory. /// This base class specifies the required interface. class OPENVDB_HOUDINI_API OpPolicy { public: OpPolicy() {} virtual ~OpPolicy() {} /// @brief Return a type name for the operator defined by the given factory. std::string getName(const OpFactory& factory) { return getName(factory, factory.english()); } /// @brief Convert an English name into a type name for the operator defined by /// the given factory, and return the result. /// @details In this base class implementation, the operator's type name is generated /// by calling @c UT_String::forceValidVariableName() on the English name. /// @note This function might be called (from OpFactory::addAlias(), for example) /// with an English name other than the one returned by /// factory.@link OpFactory::english() english()@endlink. virtual std::string getName(const OpFactory& factory, const std::string& english); /// @brief Return an icon name for the operator defined by the given factory. /// @details Return an empty string to use Houdini's default icon naming scheme. virtual std::string getIconName(const OpFactory&) { return ""; } /// @brief Return a help URL for the operator defined by the given factory. virtual std::string getHelpURL(const OpFactory&) { return ""; } /// @brief Return a label name for the operator defined by the given factory. /// @details In this base class implementation, this method simply returns /// factory.@link OpFactory::english() english()@endlink. virtual std::string getLabelName(const OpFactory&); /// @brief Return the inital default name of the operator. /// @note An empty first name will disable, reverting to the usual rules. virtual std::string getFirstName(const OpFactory&) { return ""; } /// @brief Return the tab sub-menu path of the op. /// @note An empty path will disable, reverting to the usual rules. virtual std::string getTabSubMenuPath(const OpFactory&) { return ""; } }; //////////////////////////////////////// /// @brief Helper class to manage input locking. class OPENVDB_HOUDINI_API ScopedInputLock { public: ScopedInputLock(SOP_Node& node, OP_Context& context) { mLock.setNode(&node); if (mLock.lock(context) >= UT_ERROR_ABORT) { throw std::runtime_error("failed to lock inputs"); } } ~ScopedInputLock() {} void markInputUnlocked(exint input) { mLock.markInputUnlocked(input); } private: OP_AutoLockInputs mLock; }; //////////////////////////////////////// // Extended group name drop-down menu incorporating "@<attr>=<value" syntax OPENVDB_HOUDINI_API extern const PRM_ChoiceList PrimGroupMenuInput1; OPENVDB_HOUDINI_API extern const PRM_ChoiceList PrimGroupMenuInput2; OPENVDB_HOUDINI_API extern const PRM_ChoiceList PrimGroupMenuInput3; OPENVDB_HOUDINI_API extern const PRM_ChoiceList PrimGroupMenuInput4; /// @note Use this if you have more than 4 inputs, otherwise use /// the input specific menus instead which automatically /// handle the appropriate spare data settings. OPENVDB_HOUDINI_API extern const PRM_ChoiceList PrimGroupMenu; } // namespace houdini_utils #endif // HOUDINI_UTILS_PARM_FACTORY_HAS_BEEN_INCLUDED
26,540
C
43.014925
99
0.68783
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/abitest/TestABI.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 const char* getABI(); const char* getNamespace(); void* createFloatGrid(); void* createPointsGrid(); void cleanupFloatGrid(void*); void cleanupPointsGrid(void*); int validateFloatGrid(void*); int validatePointsGrid(void*);
309
C
24.833331
48
0.773463
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/abitest/main.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/openvdb.h> // include method declarations both inside and outside houdini namespace #include "TestABI.h" namespace houdini { #include "TestABI.h" } // namespace houdini int test() { { // verify the ABI matches const std::string abiTest = houdini::getABI(); const std::string abiMain = getABI(); if (abiTest != abiMain) { std::stringstream ss; ss << "Error: Mismatching ABIs for ABI Test - " << abiTest << " vs " << abiMain; throw std::runtime_error(ss.str()); } // output a warning if the namespaces match const std::string namespaceTest = houdini::getNamespace(); const std::string namespaceMain = getNamespace(); if (namespaceTest == namespaceMain) { std::cerr << "Warning: Namespace names match, " << "so this test is not expected to fail." << std::endl; } } { // check ABI from Houdini to non-Houdini void* grid = houdini::createFloatGrid(); validateFloatGrid(grid); houdini::cleanupFloatGrid(grid); grid = houdini::createPointsGrid(); validatePointsGrid(grid); houdini::cleanupPointsGrid(grid); } { // check ABI from non-Houdini to Houdini void* grid = createFloatGrid(); houdini::validateFloatGrid(grid); cleanupFloatGrid(grid); grid = createPointsGrid(); houdini::validatePointsGrid(grid); cleanupPointsGrid(grid); } return 0; } int main(int, char**) { try { test(); } catch (const std::runtime_error& e) { std::cerr << "Error: " << e.what() << std::endl; return 1; } catch (...) { std::cerr << "Unknown Error " << std::endl; return 1; } return 0; }
1,915
C++
25.611111
72
0.57859
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/abitest/TestABI.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/openvdb.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointConversion.h> #include <stdexcept> #ifdef HOUDINI namespace houdini { #endif //////////////////////////////////////// // Validation Methods // throw an exception if the condition is false inline void VDB_ASSERT(const bool condition, const std::string& file, const int line) { if (!condition) { throw std::runtime_error("Assertion Fail in file " + file + " on line " + std::to_string(line)); } } #define VDB_ASSERT(condition) VDB_ASSERT(condition, __FILE__, __LINE__) //////////////////////////////////////// // Version methods const char* getABI() { return OPENVDB_PREPROC_STRINGIFY(OPENVDB_ABI_VERSION_NUMBER); } const char* getNamespace() { return OPENVDB_PREPROC_STRINGIFY(OPENVDB_VERSION_NAME); } //////////////////////////////////////// // Grid Methods void* createFloatGrid() { openvdb::initialize(); openvdb::FloatGrid::Ptr grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>( /*radius=*/1.0f, /*center=*/openvdb::Vec3f(0.0f), /*voxelSize=*/0.1f); return new openvdb::FloatGrid(*grid); } void* createPointsGrid() { openvdb::initialize(); const std::vector<openvdb::Vec3R> pos { openvdb::Vec3R(0,0,0), openvdb::Vec3R(10,10,10), openvdb::Vec3R(10,-10,10), openvdb::Vec3R(10,10,-10), openvdb::Vec3R(10,-10,-10), openvdb::Vec3R(-10,10,-10), openvdb::Vec3R(-10,10,10), openvdb::Vec3R(-10,-10,10), openvdb::Vec3R(-10,-10,-10) }; auto transform = openvdb::math::Transform::createLinearTransform(0.1); openvdb::points::PointDataGrid::Ptr grid = openvdb::points::createPointDataGrid<openvdb::points::NullCodec, openvdb::points::PointDataGrid, openvdb::Vec3R>(pos, *transform); return new openvdb::points::PointDataGrid(*grid); } void cleanupFloatGrid(void* gridPtr) { openvdb::uninitialize(); openvdb::FloatGrid* grid = static_cast<openvdb::FloatGrid*>(gridPtr); delete grid; } void cleanupPointsGrid(void* gridPtr) { openvdb::uninitialize(); openvdb::points::PointDataGrid* grid = static_cast<openvdb::points::PointDataGrid*>(gridPtr); delete grid; } int validateFloatGrid(void* gridPtr) { openvdb::FloatGrid* grid = static_cast<openvdb::FloatGrid*>(gridPtr); VDB_ASSERT(grid); VDB_ASSERT(grid->tree().activeVoxelCount() > openvdb::Index64(0)); VDB_ASSERT(grid->tree().leafCount() > openvdb::Index64(0)); std::stringstream ss; grid->tree().print(ss); VDB_ASSERT(ss.str().length() > size_t(0)); auto iter = grid->tree().cbeginLeaf(); VDB_ASSERT(iter); VDB_ASSERT(iter->memUsage() > openvdb::Index64(0)); return 0; } int validatePointsGrid(void* gridPtr) { openvdb::points::PointDataGrid* grid = static_cast<openvdb::points::PointDataGrid*>(gridPtr); VDB_ASSERT(grid); VDB_ASSERT(grid->tree().activeVoxelCount() > openvdb::Index64(0)); VDB_ASSERT(grid->tree().leafCount() > openvdb::Index64(0)); std::stringstream ss; grid->tree().print(ss); VDB_ASSERT(ss.str().length() > size_t(0)); auto iter = grid->tree().cbeginLeaf(); VDB_ASSERT(iter); VDB_ASSERT(iter->memUsage() > openvdb::Index64(0)); auto handle = openvdb::points::AttributeHandle<openvdb::Vec3f>::create(iter->constAttributeArray("P")); VDB_ASSERT(handle->get(0) == openvdb::Vec3f(0)); return 0; } #ifdef HOUDINI } // namespace houdini #endif
3,702
C++
24.190476
107
0.633982
NVIDIA-Omniverse/ext-openvdb/tsc/ccla.md
<!-- SPDX-License-Identifier: CC-BY-4.0 --> <!-- Copyright Contributors to the OpenVDB Project. --> Corporate Contributor License Agreement ("Agreement") Thank you for your interest in the OpenVDB Project a Series of LF Projects, LLC (hereinafter "Project") which has selected the Mozilla Public License Version 2.0 (hereinafter "MPL-2.0") license for its inbound contributions. The terms You, Contributor and Contribution are used here as defined in the MPL-2.0 license. The Project is required to have a Contributor License Agreement (CLA) on file that binds each Contributor. You agree that all Contributions to the Project made by You or by Your designated employees shall be submitted pursuant to the Developer Certificate of Origin Version (DCO, current version available at https://developercertificate.org) accompanying the contribution and licensed to the project under the MPL-2.0. You agree that You shall be bound by the terms of the MPL-2.0 for all contributions made by You and Your employees. Your designated employees are those listed by Your CLA Manager(s) on the system of record for the Project. You agree to identify Your initial CLA Manager below and thereafter maintain current CLA Manager records in the Project's system of record. Initial CLA Manager (Name and Email): ______________________________________ Corporate Signature: Company Name: ____________________________________________ Signature: _______________________________________________ Name: _______________________________________________ Title _______________________________________________ Date: _______________________________________________
1,651
Markdown
49.060605
362
0.656572
NVIDIA-Omniverse/ext-openvdb/tsc/icla.md
<!-- SPDX-License-Identifier: CC-BY-4.0 --> <!-- Copyright Contributors to the OpenVDB Project. --> Individual Contributor License Agreement ("Agreement") Thank you for your interest in the OpenVDB Project a Series of LF Projects, LLC (hereinafter "Project") which has selected the Mozilla Public License Version 2.0 (hereinafter "MPL-2.0") license for its inbound contributions. The terms You, Contributor and Contribution are used here as defined in the MPL-2.0 license. The Project is required to have a Contributor License Agreement (CLA) on file that binds each Contributor. You agree that all Contributions to the Project made by You shall be submitted pursuant to the Developer Certificate of Origin Version (DCO, current version available at https://developercertificate.org) accompanying the contribution and licensed to the project under the MPL-2.0, and that You agree to, and shall be bound by, the terms of the MPL-2.0. Signature: __________________________________________ Name: _______________________________________________ Date: _______________________________________________
1,106
Markdown
51.714283
352
0.681736
NVIDIA-Omniverse/ext-openvdb/tsc/process/security.md
**Security and OpenVDB** The OpenVDB Technical Steering Committee (TSC) takes security very seriously. OpenVDB was not originally built as an outward-facing library. Users should exercise caution when working with untrusted data. Code injection bugs have been found in much simpler data structures, so it would be foolish to assume OpenVDB is immune. OpenVDB is also focused on high performance. It will rely on incoming parameters being valid. For example, array bounds checking is intentionally avoided. Likewise, integer overflow concerns are intentionally not addressed. ***Reporting*** If you discover a security vulnerability that you feel cannot be disclosed publicly, please submit it to [email protected]. This will go to a private mailing list of the TSC where we will endeavour to triage and respond to your issue in a timely manner. ***Outstanding Security Issues*** None ***Addressed Security Issues*** None ***File Format Expectations*** Attempting to read a .vdb file will: * Return success and produce a valid VDB data structure in memory * Fail with an error * Execute forever * Run out of memory The last two options may be surprising. VDBs, however, are designed as open-ended containers of production data that may be terabytes in size. It is a bug if some file causes the library to crash. It is a serious security issue if some file causes arbitrary code execution. ***Runtime Library Expectations*** We consider the library to run with the same privilege as the linked code. As such, we do not guarantee any safety against malformed arguments. Provided functions are called with well-formed parameters, we expect the same set of behaviors as with file loading. It is a bug if calling a function with well-formed arguments causes the library to crash. It is a security issue if calling a function with well-formed arguments causes arbitrary code execution. We do not consider this as severe as file format issues because in most deployments the parameter space is not exposed to potential attackers. ***Proper Data Redaction*** A common concern when working with sensitive data is to ensure that distributed files are clean and do not possess any hidden data. There are a few surprising ways in which OpenVDB can maintain data that appears erased. The best practice for building a clean VDB is populate an empty grid voxel-by-voxel with the desired data and only copy known and trusted metadata fields. ****Inactive Voxels**** When voxels are marked inactive in the grid, they are not cleared to the background value. If you rely on the data being deleted, you should overwrite the voxel's values as well as deactivating them. In addition, calling pruneInactive will free deactivated tiles. This is particularly important when passing a VDB to another process. ****Topology**** It is important to note the general topology of the grid provides a 1-bit image of the data in question. By taking the expected bandwidth into account, a close approximation of an SDF can be recreated by just the topology data. Building a new tree with a new topology of only the desired data can avoid this. ****Metadata**** VDBs will try to preserve metadata through most operations. This can provide an unexpected sidechannel for communication. ****Steganographic**** Most image-based steganographic techniques can be applied to VDBs. Narrow band SDF have an additional concern, however. Since only the zero crossing affects the perceived geometry, there is considerable room for information hiding in the off-band voxels. Conversion to polygon and reconstruction from the polygonal mesh should eliminate those channels. Most VDB algorithms ignore inactive values. Hidden data stored in inactive voxels may thus be preserved by the VDB tools, even for non-SDF grids. tools::changeBackground can be run to clear all inactive voxels, and tools::pruneInactive to ensure minimal topology.
3,942
Markdown
36.198113
74
0.793506
NVIDIA-Omniverse/ext-openvdb/tsc/process/deprecation.md
**Deprecation Strategy for OpenVDB** OpenVDB is committed to supporting three years of [VFX Reference Platform](http://www.vfxplatform.com/) and all releases of Houdini and Maya based on those versions of the platform. The latest supported year is that in which the VDB version listed matches the major version. For example, version 6.1.0 of OpenVDB supports VFX Reference Platform years 2019, 2018 and 2017. This infers the following support: * OpenVDB ABI=4, ABI=5 and ABI=6 * C++11 and C++14 * Houdini 16.5, 17.0 and 17.5 When version 7.0.0 is released, OpenVDB will support VFX Reference Platform years 2020, 2019 and 2018. Support for Houdini 16.5 and C++11 will be dropped. Support for obsolete ABIs will not be dropped until the first minor release after the introduction of a new ABI. For example, the latest version to retain support for ABI=4 will be the release prior to 7.1.0.
895
Markdown
37.95652
78
0.775419
NVIDIA-Omniverse/ext-openvdb/tsc/process/release.md
**Release Process for OpenVDB** The following assumes that the current OpenVDB library version number is 6.0.0 and the new version number is 6.1.0. Adjust for the actual version numbers as appropriate. - [ ] Open a Jira "Release OpenVDB 6.1.0" ticket with "OpenVDB_6.1.0" as the Fix Version. - [ ] Update `CHANGES` and `doc/changes.txt` with release notes. Ensure doc/doxygen-config has the correct release number. [_Specifics TBD, pending a review of release note management tools._] - [ ] Open a pull request to merge the above changes into `openvdb/master`. Associate the pull request with the Jira ticket created earlier, and verify that the CI build runs successfully. - [ ] Draft a new [GitHub release](https://github.com/AcademySoftwareFoundation/openvdb/releases). Title it "OpenVDB 6.1.0" and tag it as `v6.1.0`. - [ ] Update `openvdb-website/index.html` with a news item announcing the release, and delete the oldest news item. Open that page in a browser and check that the website renders correctly and that there are no broken links. - [ ] Build the documentation (for the core library and python module) using Doxygen 1.8.11 and replace the contents of `openvdb-website/documentation/doxygen/` with the output. There are two sub folders, /search and /python. /search will be generated by the core doxygen commands. The /python folder will be built by epydoc. The python documentation requires the python module to be built and installed. Make sure you build with OPENVDB_PYTHON_WRAP_ALL_GRID_TYPES=ON, otherwise documentation will not be generated for some grid types. An example CMake command list (assuming within a nested build folder): ```shell # Build python module and core documentation install_location_on_path=/usr/local website_dox_location=openvdb-website/documentation/doxygen/ cmake ../ \ -DCMAKE_INSTALL_PREFIX=$install_location_on_path \ -DOPENVDB_BUILD_PYTHON_MODULE=ON \ -DOPENVDB_PYTHON_WRAP_ALL_GRID_TYPES=ON \ -DOPENVDB_BUILD_BINARIES=OFF \ -DOPENVDB_BUILD_DOCS=ON \ && make install # remove existing doxygen rm $website_dox_location/* && rm $website_dox_location/search/* # move new doxygen cp -r $install_location_on_path/share/doc/OpenVDB/html/* $website_dox_location/ # commit doxygen changes git commit -s -m "<TICKET-NUMBER> Doxygen updates" # generate python documentaion - make sure this pick up your newly installed module epydoc --html -o python pyopenvdb # remove existing python docs rm $website_dox_location/python/* # move python documentation cp -r python/* $website_dox_location/python/ # commit pydoc changes git commit -s -m "<TICKET-NUMBER> Python doc updates" ``` [_This step should be automated, and the thousands of files it generates should preferably not be committed to the repository._] - [ ] Open a pull request to merge the above changes into `openvdb-website/master`. Associate the pull request with the Jira ticket created earlier. - [ ] Publish the GitHub draft release. - [ ] Post a release announcement to the [OpenVDB forum](https://groups.google.com/forum/#!forum/openvdb-forum). - [ ] In preparation for the next release, change one or more of `OPENVDB_LIBRARY_PATCH_VERSION_NUMBER`, `OPENVDB_LIBRARY_MINOR_VERSION_NUMBER` and `OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER` in `openvdb/version.h`. Unless it is known that the next release will include API- or ABI-breaking changes, increment only the patch number to begin with (in this case, from 6.1.0 to 6.1.1). In `doc/doxygen-config` update `PROJECT_NUMBER`, `OPENVDB_VERSION_NAME`, `OPENVDB_ABI_VERSION_NUMBER` and the `@vdbnamespace` alias to match `version.h`, and add a "Version 6.1.1 - In development" section to `CHANGES` and to `doc/changes.txt`. Open a pull request to merge these changes into `openvdb/master`. - [ ] Add an "OpenVDB_6.1.1" version to Jira. END
3,827
Markdown
74.058822
690
0.758819
NVIDIA-Omniverse/ext-openvdb/tsc/process/codereview.md
**Code Reviewing and Merging OpenVDB Pull Requests** The Technical Steering Committee have write-access to the OpenVDB repository and are responsible for triaging, reviewing and ultimately merging or rejecting GitHub pull requests. This document lists the policy and best practices to guide the TSC in this process. ***Policy*** * No direct commits to the master (or any other protected) branch, every code change should be a pull request * Any member of the community can provide feedback on any pull request (and is encouraged to do so) * A CODEOWNERS file introduced to the root of the repo to configure ownership (global, per-directory and/or per-file) - this will automatically request pull request reviews from the relevant maintainers (https://help.github.com/articles/about-codeowners/) * Minimum of one non-author TSC member approval on every pull request before merging * Non fast-forward merges must be used (ie the merge must not be rebased onto master) * Travis CI and DCO status checks must strictly pass before merging, ASWF Jenkins CI should loosely pass (https://help.github.com/articles/types-of-required-status-checks) ***Best Practices*** * Prefer all requested reviewers to approve before merging * Merging a pull request should be the responsibility of the author if they are a TSC member * Any TSC member can merge a pull request authored by a non-TSC member, but with a preferred minimum of two approvals from TSC members (including themself) * Re-writing the branch history by rebasing a pull request branch just before a merge is discouraged, unless it significantly improves the overall history (such as any broken commits on the review branch that might make reverting or bisecting more difficult) * Prefer pull requests to be open for a minimum of 72 hours before merging in order to gather any feedback * Aim for all pull requests to be responded to by one of the TSC members within a minimum of two weeks with either explanation of non-acceptance, request for changes or merge * TSC meetings should regularly review and discuss any outstanding pull requests * Pull requests should link to the associated Jira ticket (if applicable) in the description or title, this provides a route back to the Jira ticket through the Git history (git blame -> PR merge -> Jira ticket) * All pull request comments should aim to be answered and resolved before committing
2,391
Markdown
94.679996
262
0.799247
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-11-21.md
Minutes from 33rd OpenVDB TSC meeting, November 21, 2019, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), John Mertic (LF) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Summary of last months 4) Release plan for v7 5) Copyright statements from DWA 6) Upcoming PR’s (round robin) 7) Distributed VDB in Houdini: Gas Net VDB Slice Exchange 8) SSE4.2 in Houdini Clarification. 9) PR227 (Additional memory check fixes from Autodesk, requires second approval) 10) Next Meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey Preamble) A representative from Intel planning to come to a future TSC meeting to discuss VKL. Bruce is considering inviting someone to join from Intel who may be able to contribute to the project too. 3) Summary of last months Quick overview of discussion in the last month. Decided that leaf node memory usage can't be achieved in the ABI=7 window. Clarification of the performance improvements in using the POPCNT instruction. It was decided that a release branch will be used in the last few weeks prior to a release. PRs will continue to be merged into master as normal, however any PRs intended for the upcoming release will be merged or cherry-picked across to the release branch. This will allow normal development and merging to continue while a release is being prepared. All in favour of introducing an experimental ABI flag so that we can develop and test planned ABI changes without having to wait until the start of the next major release which is typically a very short window. 4) Many of the bigger changes will not make it into 7.0 as they're not ABI-related. This includes Ken's fast sweeping and Dan's point partitioning work. There are a couple of ABI-related tasks outstanding, in particular Nick and Dan to discuss OVDB-33. 5) Copyright statements from DWA Approval from DWA legal on the proposed license change after discussion with the LF. Jeff Bradley from DWA has submitted a PR which needs an additional approval to merge. Dan to reach out to Jeff and others at DWA to discuss the copyright header/footer changes, though it was agreed this shouldn't hold up the license change. 6) Upcoming PR’s (round robin) Ken has two major PRs, one more minor one that involves stencil improvements and gaussian curvature and a larger subsequent one to add the fast sweeping method. Planned for before the end of the year. Ken also to take on ownership of Peter's sharpening work which we believe to just be reviewing. Dan has a number of PRs coming involving point partitioning and VDB Points merging that will be introduced one-by-one. Nick intends to look at MeshToVolume. Initial plan is some cleanup, improvements for C++11/C++14 and documentation. Still an issue with determinism that hasn't been tracked down. Dan to look at taking over the POPCNT work to get it finished in time for 7.0. 7) Distributed VDB in Houdini: Gas Net VDB Slice Exchange Jeff wishes to introduce a Gas Net VDB Distribution Microsolver to the project based on Houdini distribution. It will be the first DOP node in the VDB project but that is not expected to be an issue. Also, VDB SOP and VDB Activate SOP still planned for contribution. 8) SSE4.2 in Houdini Clarification. Houdini does not currently ship with SSE 4.2 enabled which confirms our decision to have it be an option is the correct one. 9) PR227 (Additional memory check fixes from Autodesk, requires second approval) This PR needs an additional approval from a TSC member to be merged. External PRs require two TSC approvals to merge. 10) Next Meeting Next planned meeting is: Novemeber 28th 2019. 2pm-3pm EST (GMT-5).
3,704
Markdown
36.424242
80
0.788067
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-11-10.md
Minutes from 70th OpenVDB TSC meeting, Nov 10th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW) Regrets: *Peter* C. Agenda: 1) Confirm Quorum 2) Secretary 3) AX - PR818 4) Matrix instantiation - PR723 5) Trivial types - PR865 6) Split out TypeList - PR866 7) LevelSetUtil UB Fix - PR868 8) Morphology - PR754 9) PR process 10) Unit tests 11) NanoVDB 12) Next Meeting 1) Confirm Quorum Quorum is present. 2) Secretary Secretary is Dan Bailey. 3) AX - PR818 Nick working on upstreaming floored mod changes. Requested API changes are ok to go into a subsequent release. 4) Matrix instantiation - PR723 Nick has added component-wise min/max methods to matrix classes so that matrix grids can be instantiated. Matrix doesn't inherit from Tuple, would be nice to resolve this at a later date. Less-than and greater-than operators make sense in a tuple context, but not for Vec, Quat, Mat classes. Would be worth making this small breaking change to the API, perhaps adopting an Imath base class would be a good time to do this. 5) Trivial types - PR865 Removed custom copy-constructors and deconstructors to make Vec, Mat and Quat classes trivial so that the NodeUnion specialization is no longer needed for these cases. Currently, they are all set to default, but discussed making them implicit and adding a comment instead. NodeUnion specialization now only used for std::string. Even though we may want to deprecate string grids, still worth keeping it around for any other custom specializations. 6) Split out TypeList - PR866 Moves TypeList implemention into a new header, Nick using this quite a bit. 7) LevelSetUtil UB Fix - PR868 Referencing the first element in a vector is undefined behavior if the vector is empty, so switch to using data() method. Jeff mentioned that the compiler can also agressively optimize away null pointer checks if the source was deemed to be a reference. 8) Morphology - PR754 Nick to host a group code review session next week to discuss this PR in more detail. 9) PR process PRs often end up stagnating. TSC members either don't have time to review, do a cursory review or provide some suggestions for non-critical improvements and then they never get to the point of approval. Months later we return to them and then can't remember all the context. All agreed that we should have a more formal process to merge PRs. Suggested process is to have a two-week window once a PR has first been submitted to allow the TSC time to review. Once that has passed, it can either progress to a group code review session if it's particularly large or complex or briefly presented by the author for approval one week later pending no objections. Barrier for PR acceptance should be lower. Raising concerns or suggesting improvements shouldn't necessarily block progress of a PR. It may be fine to merge code with known but acceptable issues. Ken highlights that the master branch should be treated as in-development and making changes to newly introduced code is accepted and encouraged. Often the first usage of the code is only once it is in the master branch. Having unit tests is still required for merging. 10) Unit tests All in agreement about moving away from CppUnit, it's a simple unit test framework and is no longer being actively updated. Ken has a strong preference to use GoogleTest (or GTest), which is already in use in NanoVDB. It provides useful features such as being able to run tests using filters and until a failure occurs. Committee is divided on how to migrate. Rewriting all the unit tests in GTest will take a lot of time and that time would be better spent on other priorities. Jeff favors having a dependency on both CppUnit and GTest and only adding new tests using GTest. Nick and Dan would prefer to get rid of CppUnit as a dependency. Dan would like to attempt a shim layer that may reduce the time spent migrating. Perhaps acceptable to have both dependencies if only one dependency per file. Migration could be done over a few months. Breaking up existing tests such as TestTools an orthogonal, but worthwhile effort. 11) NanoVDB DDA point offset issue was resolved by offsetting ray by half a voxel for point raytracing. Encapsulated in a PointRayMarch structure that hides this detail. Would be worth backporting this fix to vdb_render amongst others. Ken feels NanoVDB has stabilized now and should be ready to merge soon. 12) Next Meeting Next meeting is November 17th, 2020. 12pm-1pm EST (GMT-5). Nick to present Morphology PR. Ken to investigate enabling Zoom screen-sharing.
4,673
Markdown
37
80
0.789429
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-02-20.md
Minutes from 41st OpenVDB TSC meeting, February 20th, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), JT Nelson (Blender) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) PR 455 Looks good. But Peter suggested he did not want to merge? Resolved: Add and name the C++ tool guassianSharpen, but keep the UI name Sharpen. 4) Default branch Default branch is now Develop. We must change branch when we do PRs. 5) VDB Merge The initial round of feedback on the UI has been read. Another push will be done for final verification. The confusing Activity Union toggle Will be moved into the activity collation options. 6) JT Nelson Working on Blender integration of VDB. Have a fracture modifier integrated. Working on crushing technology. Adding solid geometry support. Should there be blender path in the openvdb directory? We've not maintained Maya branch well. We should consider outreach to Autodesk to see if they can maintain it. 7) Roadmap What does priority mean? Personal affiliation vs OpenVDB priority? We decided this is some combination of the two. a) AX A new release next week. Performance improvements. End of March will be ready for feature branch. Needs documentation and testing. Language spec still incoming. Can prioritize the spec. Spec is currently in Doxygen and private, Will be ported to Google Doc for commenting. b) Fast Sweeping PR exists, not yet pushed. Very close. c) Sharpening Discussed this meeting. d) Counting Tools Agreed it can live in both places. Leave for Google Summer of Code e) Merging Tools Part of VDB Merge project, ongoing. f) Topology Tools Moving tree API to tool and accelerate. Likely want to deprecate tree methods. g) Fill Tools Straight forward method to move off tree and into tools. h) Densify Similar to Fill Tools i) BBox Similar to Fill Tools j) Extrema More complications. Confusion exists now, is easy to make very slow by accident. Should be separate tool. k) Mask/Bool Resample Low res -> High res has to be optimized. l) Reducing Memory Footprint No discussion noted. m) Adding more interpolation types Higher order, monotonic cubic o) Point Merging Ongoing phases. p) Surfacing VDB From particles is used a lot for surfacing. Need better surfacing. Artist prefered fast base surface and then do their own adjustments. But likely time to look at again. Add a single SOP that does the single method? Like Zhu-Bridson. But rasterization is the key. What about MPM kernels? Similar rasterizer. Solution seems to have add the rasterization infrastructure and Zhu-Bridson solution. This could then be extended to MPM kernels. q) Mesh Support Contentious discussion of if this is a good thing. Some members feel it is an overreach of what VDB is supposed to be. Others feel real world production experience shows it to be an extremely useful component. Topology information can be passed as light weight information. Separate side car of primitive data and topology information. Works well for changing topology meshes like fluid meshes. 8) Next meeting March 5th 2020. 2pm-3pm EST (GMT-5).
3,260
Markdown
22.292857
79
0.770859
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-11-28.md
Minutes from 34th OpenVDB TSC meeting, November 28, 2019, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) OVDB 117: Nondeterministic Meshing 4) PR Change for leaky mesh. 5) Strange CI Failures? 6) CMake PR from David 7) Release plan for 7.0 8) Curvature methods and gaussian methods 9) Roadmap 10) PR 573: DSO changes 11) OVDB-33: Threadsafety of copy constructor. 12) Merged in Copyright Header changes. 13) PR 566: Update project license. 14) PR 561: Add child node request. 15) PR 493: POPCNT. 16) Spark FX conference BoF? 17) Next meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) OVDB 117 This is the non deterministic mesh bug. This was a result of non-reentrancy of threadedlocal storage used for the scratch primitive tree. Introduced by clear() becoming threaded in 3.2. If the primitive id ends up being the sentinel value it doesn't reset and instead uses the background value. Should clear remain multithreaded? Change how we assign the reset value? Or change the background value? Parallel clear does make a difference, so it is justified to have a multithreaded clear(). But the scratch pad should never have got large in any case. And it is inside a parallel for in any case. We should check for this pattern of thread local storage elsewhere. PR will change to serial clear. 4) PR Change for leaky mesh. Nick will attempt to look at the code change. 5) Strange CI Failures? "Run Failed" from github when doing a pull request? Forward these errors to Dan. 6) CMake PR from David Needed separate PR request to do the final clean up. Can we directly edit the PR? This will cause it to persist in their repository. We should be courteous about it and alert them we plan on changing their PR. But we reserve the right to modify external users if they have enabled the switch. We should add to our contribution guidelines that we may change your PR. 7) Release plan for 7.0 Nick will do the release, with Dan providing a checklist. 8) Curvature methods and gaussian methods No changes in this PR to the SOP. Adds more curvature options. Likely will want to add more options to the analysis SOP. Regarding the free standing functions that are removed. API of tools has been allowed to change. Add to the review if we should keep them. Note the genus computation can become unstable. 9) Roadmap Should add a roadmap for the release. Start the discussion after 7.0 release. 10) PR 573: DSO changes Nick is to inspect. 11) OVDB-33. Threadsafety of copy constructor. Must be prioritized for 7.0. Nick to inspect. 12) Merged in Copyright Header changes. We should check for any existing copyrights owned by our respective companies. New files should use the copyright to contributors and SPDX 13) PR 566: Update project license. Update project license. Needs second review. 14) PR 561: Add child node request. Ken to review. 15) PR 493: POPCNT. Replaced with new PR which is incoming. Much of the original code was for the deprecated root node mask that is not worth optimizing, so the new PR is smaller. 16) Spark FX conference BoF? Artist tailored conference. Dan will follow up and see if he can attend. 17) Next planned meeting is: December 5th 2019. 2pm-3pm EST (GMT-5).
3,333
Markdown
30.45283
297
0.763276
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-01-17.md
Minutes from 5th OpenVDB TSC meeting, Jan. 17, 2019 Attendees: John M., *Jeff* L., *Nick* A., *Ken* M., *Peter* C., *Dan* B., Andrew P. Additional Attendees: Bruce Chernia (Intel), Daniel Heckenberg (Animal Logic) Agenda: 1) Confirmation of quorum 2) Selection of secretary 3) Update from TAC (Ken) 4) Update on Multi-Res grid from Autodesk (Ken) 5) Intel representative 6) JIT for OpenVDB (Nic) 7) Copyright notice (Dan) 8) Write permissions (Dan) 9) Release process (Dan) 10) Cleanup and improve repository (Ken) 11) Schedule next meeting 1) A quorum was confirmed. 2) Secretary - Nick Avramoussis. 3) OpenColourIO is accepted within the probationary stage. There's been interest form 3rd party companies, including Nvidia, Intel and Autodesk. Security and CI: The issue of a security expert was discussed as something which should be addressed across all ASWF projects. Jeff mentioned that the guidelines are vague for this role and that in the context of VDB, this mainly relates to bug handling/invalid file reading. Ken mentioned the concern of arbitrary metadata in regards to attacks. Andrew advised on static code analysis tools as part of a CI build (lgtm, clang). Dan mentioned using clang tidy as a CI step which would be easier to solve on the CMake side to address CII best practices. The viability of Travis support was questioned. Previous limitations have been removed (Nov 2018) which now allow for building on Ubuntu 16.04 for Houdini 17. Dan has a pull request which optimises the Travis builds to help with timeouts, though it's agreed that it is not a sustainable solution. It was agreed that the main thing holding us back from a Jenkins transition was the CMake build. CMake: A fully supported CMake build through CI is becoming more of a priority. Current TSC members do not have extensive experience with CMake. Nick mentioned his experience and usage within DNEG production and will share example modification. It was agreed that other expertise would be ideal for deciding on a final structure. OpenColorIO (and others) have extensive CMake systems which could perhaps be used to help establish CMake conventions within the ASWF. Ken mentioned a concern if this was actioned due to the amount of dependencies OpenVDB has. Ken mentioned that WETA have dedicated a lot of time to standardizing CMake builds. Ken will establish communication with Nick, Kimble and Bruce to start a discussion on CMake structure. 4) Autodesk have received the TSC questions on their MRes grid structure. No concerns have been raised but they have asked for more time to respond. 5) Bruce Chernia (Intel) spoke on a desire to investigate OpenVDB integration with Embree and OSPRay. These tools are exclusively focused on real time ray tracing on the CPU. Bruce mentioned that they would be happy to contribute back findings to the OpenVDB project during their investigations. 6) Nick spoke on the current state of OpenVDB AX. A stable V1.0.0 release is still planed and will involve an incorporation into OpenVDB. This release will incorporate all items visible on the OpenVDB AX roadmap, available within the AX repository on DNEGs github. Nick mentioned that a summer release is most likely, with an integration to be performed before SIGGRAPH 2019. Jeff and Nick discussed concerns in regards to Houdini integration, with a commitment being made to support various VEX syntax. SIGGRAPH: Ken brought up the OpenVDB course at SIGGRAPH 2019. Ken will send around an email thread to start a discussion on topics to be presented. EMail Lists: Jon brought up that TSC members should try where possible to use the aswf e-mail lists. Dan mentioned that the private e-mail list is currently public. Jon to check and confirm a switch to private. Contributing File: A clause in the CONTRIBUTING.md file which currently declares a committer's responsibility as '25% of one's time' is to be maintained but re-worded. The main desire is to ensure TSC members can be held accountable if no work is apparent. 7) The OpenVDB copyright notices need to be updated for 2019. A unanimous agreement was made to switch to a SPDX license. This can be done independently of removing the copyright dates. Jon recommended to add an additional copyright with the dates removed and not remove the current DreamWorks one. 8) Ken cannot be added as a reviewer onto current PRs and will check his write access status on the repository. Currently no TSC members have admin access to the ASWF repo, required for setting code review and pull request policy. It was agreed that Ken, Peter, Dan and Nick will be given admin rights. Ken to submit this request to the LF. Dan to send an e-mail to the ASWF helpdesk regarding JIRA watchers. 9) An official OpenVDB 6 announcement needs to be performed. Peter is the only TSC member with access to the website. Peter suggested on a vote in the next TSC meeting on whether we continue binary distributions. 10) ~Time 11) Our next TSC meeting is scheduled for Thursday Jan 24, 2019 11am-12pm PST.
5,038
Markdown
47.451923
98
0.795951
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-05-21.md
Minutes from 50th OpenVDB TSC meeting, May 21st, 2020, (EDT) Happy 50th meeting anniversary! Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: JT Nelson (Blender), Bruce Chernia (Intel), Andre Pradhana (DWA), Peter Cheng (DWA) Regrets: *Peter* C. Agenda: 1) Confirm quorum Quorom is present 2) Secretary Jeff Lait 3) Forum Question about multires; appears to be addressed. 4) houdini_utils DWA will look into whether this can be integrated into a single directory or not. Can we separate off the houdini_utils code as a pure VDB support tool? Can DWA fork their internal houdini_utils from the VDB version so we don't have to keep targeting a separate directory? 5) Ken's workload a) Fast Sweeping PR b) Benchmark for Multi-Res Can we get the funding from ASWF to do the benchmark component? Potential targets of the funding are busy for a month or so. Can we get the ASWF dedicated resources moved to VDB? Send a letter out? DWA is planning the Gas Net Slice Exchange as current priority. Ken's first priority is Fast Sweeping. We'll re-address the benchmark requirements in another month. 6) Siggraph We discussed what things might be possible to annouce for Siggraph. Fast Sweeping. Has a lot of functionality not exposed in the sop. Some should be in a new SOP, such as extrapolation. Others, such as renormalizing, should fold into existing SOPs. Best plan is to get it in the tool level before worrying too much about these decisions. AX: hopefully? Depends on amount of time for reviewing. Some PRs need reviewing now! Sharpening. VDB Activate. Is in now. VDB Merge. Dan is hopeful. 7) Website We tend to forget that PRs are there. We should add a code owner so the website triggers code reviewers for all of the TSC. None of the download links work now that we moved from nexus to artifact; and artifact is not working. Linux foundation is investigating. 8) AX Matches VEX Scalar promotion rules. Have CMake PR for finding the right modules. And AX is ready to integrate. 9) Grid Types Becoming more frequent of an issue. In AX we have our own types based on VDB math. This includes matrices. AX should be agnostic to registered VDB types. Registration should only matter for serialization. Any intermediate data structure should be allowed. We should be able to make a 6-float VDB without registration. Clip, prune, and other grid tools defined on the tree require certain functions to exist: less than, greater than, absolute value, etc. These don't really exist on Matrix. Would like an agnostic grid, so matrix3 <-> float9. And move away from math library. So VDB_Grid::float3 rather than VDB_Grid::Vec3. This is helped as we move more stuff from the nodes to the grids. Should we add < for matrices? Or add a openvdb::lessthan() that does explicit lexigraphic comparison? The ABI means we will keep these methods for a while, so we should find a solution that doesn't require moving them out. Matrix2 is not currently supported in AX, so the float4 is still unambiguous. Nick will share the code needed, along with an email of what Jeff's suggestion is. 10) Next Meeting May 28st 2020. 2pm-3pm EDT (GMT-4).
3,220
Markdown
38.280487
766
0.764286
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-06-25.md
Minutes from 53rd OpenVDB TSC meeting, June 25th, 2020, (GMT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Brecht Van Lommel, Andre Pradhana (DW) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) New Meeting Time 4) Forum Posts 5) 7.1/8 Release Schedule 6) VDB Net Slice Exchange 7) Next meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) New Meeting Time Dan has requested the TSC meeting be moved to an earlier day of the week. Consensus that the new time should be on Tuesday and an hour earlier than the current meeting (5pm GMT) - 10am PDT / 1pm EDT / 6pm BST. Ken to adjust the calendar invite. 4) Forum Posts A question was raised about the sharpen feature in the Convert VDB SOP when converting polygons. This implementation (hvdb::SharpenFeaturesOp) is only provided in the Houdini toolset and not in the core library itself. Initial suggestion was for other vendors to copy the implementation in the Houdini library with an acknowledgement that this implementation should really be pushed down to the core library. However, it was noted that VDB provides rather primitive mesh data structures. The best approach would probably be to wrap the Houdini API and to adapt the implementation to work based on a templated wrapper class, but needs more investigation to understand how easy that may be to achieve in practice. At the very least a unit test based on the implementation in the Houdini plugin would be more useful to other DCCs and allow us to offer basic regression testing. A generic mesh data structure in Houdini could be useful. Ken also keen on a tetrahedral data structure, converting VDBs to tets would be useful. Jeff mentioned that dumb tet conversion is trivial, robust tet conversion that works well with finite element simulation is much harder, but would be very desirable. 5) 7.1/8 Release Schedule Aim is to release VDB 7.1 as soon as possible so that Jeff can base the next Houdini release on this version, ideally by 1st July. Likely will be until a week or two after this date. Jeff suggests that a 7.2 release should come before 8.0 and include AX so that DCCs do not need to wait on the VFX Reference Platform bumping to the next major version. 8.0 is scheduled for Q4. * VDB Sharpen SOP Ken to look at and approve the VDB Sharpen SOP. Still needs a unit test, but Ken may approve without for sake of expediency. * Fast Sweeping / VDB Extrapolate SOP Andre to look at creating a draft PR with the DWA version of the Extrapolate SOP adapted to the Ken's implementation. Work can be done on refining the SOP UI parameters in parallel with finishing the API. * VDB Merge SOP Dan to look at finishing a first version of the VDB Merge SOP. It will be limited in functionality (add fog volumes, union SDFs) and can be extended in future releases. Question about usability if reordering of inputs changes resolution as proposed with first input being used as resolution only. Nick raised concerns about having two SOPs doing the same thing (VDB Combine SOP vs VDB Merge SOP). Adapting the Combine SOP is feasible however would not be as efficient as a multi-input SOP for stealing use cases. The multi-input is also more intuitive for many common workflows and would not require changing modes as often - the default mode in the VDB Combine SOP is Copy A which is almost never what is needed. General feeling that the VDB Combine SOP is a bit of a kitchen sink and could do with some improvements. The Combine SOP is still useful because it is intuitive in certain cases such as subtracting one VDB from another. Open question about what to do with the Combine SOP in the future, but decision is to proceed with introducing VDB Merge SOP for now and allow artists to opt-in gradually. * VDB Net Slice Exchange Andre has compiled and tested the VDB Net Slice Exchange that SideFX provides as part of the HDK toolkit samples, however has not been able to test it on production examples yet. Waiting on a little more production testing before including it in a VDB release. 6) Next Meeting New day, new time. Tuesday June 30th, 2020. 1pm-2pm EDT (GMT-4).
4,162
Markdown
40.63
80
0.788323
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2021-01-12.md
Minutes from 75th OpenVDB TSC meeting, Jan 12th, 2021, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW), Bruce Chernaik (Intel) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) GitHub Issues 4) ASWF Questionnaire 5) SIGGRAPH 6) Listing of authors 7) [[deprecated]] C++14 8) Deprecation of visitTree() 9) Adding Half 10) VDB visualize as points tool 11) Change Logs 12) Next Meeting 1) Confirm Quorum Quorum is present. 2) Secretary Secretary is Nick Avramoussis. 3) forum Briefly revisited the topic of Github issues vs the forum vs JIRA. Still no consensus, TBD. 4) ASWF Questionnaire The ASWF questionnaire has been distributed. Noted that there is a lot of questions before you get to the OpenVDB specific section and that it presents topics on every other ASWF FOSS too. 5) SIGGRAPH Course vs Open Source Days. General agreement that the OSDs feel like more BOF style presentations. Should clarify with the ASWF any expectations this year for those days. 6) Listing of authors Revisited the conversation on @author tagging in files. General consensus now is that both having the names or removing them is fine, but what is the policy. All agreed to keep the current stance, names are purely optional depending on the author and conflicts or issues on changes can be raised on the PR in question or with the TSC. 7) [[deprecated]] C++14 Questions about the removal of the OPENVDB_DEPRECATED macro in OpenVDB 8.0.0. Have a macro makes deprecations easy to disable/enable per project. Most agreed this is a useful feature. Other solutions (system includes/include guards) are not necessarily always viable. Vote on re-introducing the macro with an argument option for compiler messages: Nick, Ken, Jeff for, Dan against. Passed. 8) Deprecation of tree visit() methods Tree::visit() methods were deprecated with a message to use the DynamicNodeManager, but this is not a simple transition. It's not a 1:1 replacement as it changes execution order and parallelism. This needs documentation. We should only deprecate methods with similar or matching new functionality, or provide transition guides. 9) Adding Half 10) VDB visualize as points tool 11) Change Logs Time. 12) Next Meeting Next meeting is Jan 19th, 2021. 12pm-1pm EST (GMT-5).
2,373
Markdown
26.604651
79
0.774968
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-03-19.md
Minutes from 43rd OpenVDB TSC meeting, Match 19th, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), Johannes Meng (Intel), JT Nelson (Blender), Robin Rowe (Cinepaint) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) NanoVDB 4) AX
 5) CI and GitHub Actions migration 6) Level set tracking 7) PR review 8) Roadmap 9) Next meeting 1) Quorum was confirmed.g 2) Secretary was Dan Bailey 3) NanoVDB Ken has implemented a GPU version of OpenVDB called NanoVDB. It is currently based on CUDA but could be re-implemented using OpenCL without too much work. Data structures are immutable, so primary application is rendering. Ken reporting around a 30x speedup on the GPU compared with an 88-thread CPU for doing a naive raymarching using bundles of rays. CPU version being compared against is not fully accelerated (ie no vectorization). It uses a binary search for the root node which may improve performance on the CPU too. 4) AX Nick has been working on a language spec to be shared soon. As a test application, vdb_view has been modified to manipulate VDB grids on-the-fly using AX. 5) CI and GitHub Actions migration PR665 has successfully used PR re-targetting as a way of ingesting external contributions. The default branch will be reverted back to master soon as a result. This is a precursor to storing Houdini daily builds in the GitHub Actions cache to sidestep any issues with secret keys. 6) Level set tracking Level set tracking currently dilates in both directions whereas a more efficient implementation may be to look at the direction of the moving interface and to dilate in just one direction. Dan to investigate. Nick has a faster, but less accurate implementation of dilation and erosion to share for future discussion. In some cases it can be as much as 2x faster. 7) PR review Quick review of some outstanding PRs. 8) Roadmap Time 9) Next meeting April 2nd 2020. 2pm-3pm EDT (GMT-4).
1,995
Markdown
27.514285
80
0.776942
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2021-01-26.md
Minutes from 77th OpenVDB TSC meeting, Jan 26th, 2021, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B., *Andre* P. Additional: Vojta Kuzel, JT Nelson (Blender), Laura Lediaev (Sony Pictures Imageworks), Bruce Cherniak (Intel), Johannes Meng (Intel). Regrets: Peter C. 1) Forum (DAG) 2) Half 3) VDB 8 bugfix progress #936 4) Blender update (JT) 5) Min boost requirement #935 6) AX SOP progress #931 7) Comment on USD 8) Next Meeting 1) Question in the forum: why do we Octree v.s. VDB grid. Octree has a small fan-out factor (8) while the smallest fan-out factor in a VDB grid is 512. The chance of two leaf nodes having collision is small in VDB. An OpenVDB grid is also designed to change topology, whereas DAG is mainly designed to be a (preconditioning) data structure to be used for rendering. For this purpose, a NanoVDB grid is maybe more suitable for the problem. Updating a DAG dynamically will be difficult. However, Jeff Lait mentioned that NanoVDB also adds bounding box information in the Leaf Nodes, making it not suitable for a DAG-like data structure (at least in its current implementation). Ken thinks that the idea is interesting and encourages Vojta Kuzel to explore it. However, having DAG as a first class citizen in OpenVDB will be difficult because we allow the tree to dynamically change. Ken mentions that the reason people shy away from implementing it is because of skepticism over whether it will work out in practice. Dan mentions that applying this idea to a Mask Grid may be more appropriate. 2) Half Dan mentions that Carry from ILM reached out. Dan thinks that Carry would like an ASWF project to use another ASWF project so we can give feedback. The look up table in OpenEXR half has gone away in version 2.5. Dan also mentions using Imath’s vector classes. Ken says that the vector classes discussion is a bigger discussion that should be discussed at another point. Jeff mentions that we want to reduce OpenVDB’s dependency and that no matter what we do, we need to put the embedded half implementation in a new namespace. Ken would like to have this PR to be approved since it will be useful for NanoVDB. Ken mentions something we are forgetting: OpenVDB's dependency on half is hidden now: there is no in-memory representation that has a half. Any grid that is stored as a half on disk is converted to full 32 bit. This has come up from Autodesk (stuck in OpenVDB version 5 because they introduced a hack to load half from disk, keep it as half, and ray-trace it as a half in memory.) Ken mentions that, today, if you have a half grid, when you read it, it will be converted to full float. The HALF flag is only used in IO right now. Ken hasn't seen the changes that are implemented by Autodesk. Jeff sees this as another point to add a half type as a native type. Ken says that what we need is a way to go from 32 bit float to 16 bit float and back again. OpenVDB will work if we have that today. But Ken would like to have half as a first class citizen, so you can have a half grid in memory so that you can do ray tracing, etc. Dan believes that PointDataGrid supports half types: the truncate codec stores it as half in memory and when you read it, it is turned into a float. Jeff also agrees that we need to have half as a first class citizen (as openvdb::math::half). He proposes to have Vec3H for Vector 3 of halfs. Cary from ILM was asking if there is anything that they can do to make OpenEXR/Imath's half to be easier to use by OpenVDB. According to Nick: if they make it to be header only. Supposedly, if you have half float intrinsic provided by the compiler, then there is no need for a lookup table. At the end of this discussion, there is nothing that the OpenEXR/Imath group can do to make their implementation to be easier to be adopted by OpenVDB. Nick mentions that this PR will lock OpenVDB down to this implementation of half. Ken asks why we can't explore a way to have the build process pick the embedded OpenVDB's half with the other half that comes from, e.g., OpenEXR/Imath. Nick mentions several possibilities to do this: - generate the header at build time. - we can use compile defines to do this, but this is not particularly nice because downstream projects will need to use this as well. - we can do something weird to mimic the folder structure of OpenEXR inside of OpenVDB so we can change the include path. Jeff mentions that he is not sure how to inject the type-traits in half. Ken mentions that the first step is to accept this PR so that we have a default implementation that ships with OpenVDB. In the future, we can update the half implementation with a newer version of half that comes with OpenEXR/Imath or with a version from CUDA. Jeff gives an argument on why this PR can be 'harmful' because it complicates the build system even more. But he believes that it is not that harmful. Ken asks Jeff about Houdini, since Houdini has another implementation of half? Jeff says that Houdini uses fpreal16, which will be separate from the one that is name-spaced in OpenVDB. It is not a problem right now. It will start to be a problem when we have a half grid and a developer wants to use the half implementation that is not shipped with OpenVDB. Jeff mentions that this PR will get rid of OpenEXR dependency. We can talk about moving it to a third_party directory. But this does not solve the problem of being able to easily link against the OpenEXR half. Is there a way to inject OpenEXR half to the openvdb::math namespace so that you can use an #ifdef in half.h/Types.h. Should we add a compile flag? Dan thinks that it is worth a try. Ken mentions that the current PR does not block us from including the dependency on OpenEXR/Imath in the future. Nick agrees. Nick thinks that we want to make the dependency to be optional, instead of removing it completely. Jeff mentions the problem with backward compatibility because right now OpenVDB links against OpenEXR and Types.h includes OpenEXR/half.h. The problem with this PR is that it will break someone's code if the developer depends on OpenVDB to link against OpenEXR. However, this is bad development practice, which should be avoided by including OpenEXR/half and linking against OpenEXR directly in the first place. Now, a developer may need to change the code by replacing half with math::half. Dan mentions a similar problem that comes with gcc upgrade. Dan summarizes what we are trying to achieve: if we were to put this embedded half into a third_party directory and always build it with the OpenVDB core library and include a mechanism that optionally switches the definition that is used by the name-spaced version, whether it is the embedded half or an external half. According to Nick, the issue is in installation. Nick strongly believes that we should have that option of switching the definition of half. The build-time dependency is fine. If you want to use the embedded version of half, you need to install it, and the include path should be valid because Types.h should be able to pull in the embedded version of half. We may be able to replicate the directory structure of OpenEXR to make sure that the include path of the embedded half matches with the one that is shipped with OpenEXR, but this is not the ideal solution. Nick will continue to think about this and will come up with something that works. Ken asks if the current PR blocks the other approach? Nick says that it temporarily removes the dependency. Jeff says that it is a red-line we are crossing. Dan thinks that this is a good thing to figure out because it will help us to solve other dependency issues, such as blosc. Nick will look at this problem again and will get back. 3) VDB 8 bugfix progress #936 (Fix CSG Intersection) Dan is writing a more meticulous unit test around all the functionality. The existing Composite unit test does not cover everything. Merge needs to come up-stream of Composite. Dan believes that Jeff's comment on the internal node is not an issue, but he will double check this and will make sure that it is covered by the unit test. The current problem is in intersection when a child of the root node exists in one, but not the other. OpenVDB is still using linear search. Ken says that linear search with 8 entries is faster than binary search. Ken will take a look at how to do a search more efficiently. Ken will also take a look at TBB issue again. 4) Blender update (JT) JT has a 15 seconds update. His team has been working on making mesh to volume conversion to be better. Some of the conversion is half-baked. His group has been working on USD, Jupyter Notebook, and using OpenVDB and OpenVDBAX kernel. 5) Min boost requirement #935 6) AX SOP progress #931 7) Comment on USD 8) Next Meeting Next meeting is Feb 2nd, 2021. 12pm-1pm EST (GMT-5).
8,890
Markdown
92.589473
661
0.782227
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-03-07.md
Minutes from 10th OpenVDB TSC meeting, Mar. 7, 2019 Attendees: *Ken* M., *Nick* A., *Dan* B., *Peter* C., *Jeff* L. Additional Attendees: Bruce Chernia (Intel), Andrew Pearce (DWA), Thanh Ha (LF), John Mertic (LF), Daniel Elliot (Weta) Agenda: 1) Confirmation of quorum 2) Selection of secretary 3) OpenVDB BOF @ SIGGRAPH 4) Posting questions to Forum or mailing list or ... 5) Contributing.md 6) Houdini MMB improvements for Mako Templates 7) unifying open-source and native SOPs 8) OpenVDB 6.0.0 next steps and announcement 9) CI/CMake Update * 10) VDB tools and grid member functions * 11) Documentation * 12) Source headers * 13) JIRA Improvements * 14) Other? * 15) Schedule next meeting * This was an optimistic agenda, we only had time to discuss items 1-8 and 15. 1) A quorum was confirmed. 2) Secretary - Dan Bailey 3) OpenVDB BOF @ SIGGRAPH The TAC has proposed a Siggraph Birds of a Feather to discuss the ASWF with a focus on governance and migration of the first few projects. This will likely include some of the proposed agenda for the OpenVDB course, meaning the course content will be more focused on the project itself than the governance. 4) Posting questions to Forum or mailing list or ... There was a post to the OpenVDB Google groups forum by a member of the community that suggested that the forum was no longer monitored by the maintainers. Ken previously made a post to the forum which was slightly ambiguous and has since clarified the situation. We need to decide the process by which people should ask questions. John is helping OpenColorIO move from their own forum to groups.io which is the service used to host our three ASWF mailing lists (openvdb-dev, openvdb-user, openvdb-tsc-private). The benefits are that it would help consolidate communication and that lots of people prefer mailing lists. The service also offers a calendar and file storage per mailing list. China communication is also possible because it doesn't run through a Google service that is blocked by the firewall. John confirmed that it is possible to bring over the history from the forum to the new mailing list, however users would need to be migrated manually. For OpenColorIO, they are communicating that they're closing the forum and directing the subscribed members how to sign up to the new mailing list. This can serve as a useful way to cull out subscribed members that are no longer interested in the forum. John highlighted that some communities prefer differentiating developer- related discussion to user-related discussions hence the openvdb-dev and openvdb-user lists, but that may not be right for OpenVDB. He also mentioned that many projects use a rotating triage person who will be responsible for responding and forwarding emails that come into the forum or mailing list. There was support for trying this idea. There was a brief discussion about what to do with GitHub issues, an email redirection could be set up or it could be shut down entirely to avoid confusion. The project still needs clearer documentation about how to contribute and a follow-on announcement to clarify the process to the community. All TSC members to look at Dan's email regarding contribution.md as a first step to improving this. A vote will be scheduled for next time about which services to keep and which ones to discard. 5) Contributing.md After approval from the TSC in a previous meeting, Jeff has submitted a PR to remove the explicit 25% contribution time for committers. There was some confusion about the distinction between committers and TSC members. John clarified with an example that in the situation where a company might have 20 people with committer priviliges, there may be desire to have a subset of these committers represented on the TSC so as not to increase the representation of that company in TSC votes. That is not currently an issue for OpenVDB. Ken wanted a change to the wording to ensure that committers were active developers and specifically on the project. John thought that Ken's proposed changes were fine and that there was sufficient ability for the TSC to revoke committer privileges for an individual that has ceased to contribute development to the project. Unanimous vote in favour of Ken's proposed changes to Jeff's PR. 6) Houdini MMB improvements for Mako Templates This PR has been outstanding for some time. Dan has concerns about the code not being tested and allowed to drift so that it was no longer functional. Jeff thought this was the best place to document how to make changes that could be picked up by mako templates and would discourage rewriting of this code in the future. It was decided that Nick would make two tweaks to his PR - to match the branch names that Jeff has chosen for Houdini and to add a #define to disable the code from being compiled by default unless explicitly enabled, then we approve and merge. Dan to bump the email thread with his proposed MMB changes to the mako templates as this was not included as part of this PR. 7) Unifying open-source and native SOPs General discussion of the proposed change. Nick asked about using namespacing as an alternative and Jeff explained some limitations in the namespacing approach such as the operator type bar only being applicable for HDAs and not for compiled SOPs. Dan explained the proposed ophide policy mechanism of using a compile-time flag optionally overriden by a run-time environment variable. Nick is concerned about the phases of this proposed solution, particularly about any confusion arising from introducing the interlacing of the label names before the ophide policy has been implemented. No objections raised to tackling this proposal in a different order. Nick and Dan to comment further on the PR. 8) OpenVDB 6.0.0 next steps and announcement OpenVDB 6.0.0 has only undergone a soft release and has yet to be officially announced. The main blocker is updating the Doxygen documentation on the website. Discussion followed about changing the process, Thanh suggested an approach using ReadTheDocs or GitHub Pages that might be suitable. Decision made to proceed with this release using the current process and to table further changes to the documentation for a later date. 15) Next Meeting Next meeting is scheduled for March 21st, 2019, 11am-12pm PST.
6,328
Markdown
46.586466
78
0.801675
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-02-28.md
Minutes from 9th OpenVDB TSC meeting, Feb. 28, 2019 Attendees: *Ken* M., *Nick* A., *Dan* B., *Peter* C., *Jeff* L. Additional Attendees: Bruce Chernia (Intel), Thanh Ha Retrospective Agenda: 1) Confirmation of quorum 2) Selection of secretary 3) Update from TAC 4) Adding Libraries 4a) Adding Eigen 4b) 3rd party reporting 4c) Adding CLI library 5) Deprecation policy 6) OpenVDB and Houdini Unification 7) Next Meeting 1) A quorum was confirmed. 2) Secretary - Jeff Lait 3) Dan gave an update from the TAC meeting. They are considering Circle CI rather than Travis. Circle Upsides - No 50 min limit; instead a 5hr limit - Can produce build artifacts - 2x as fast (despite same hardware?) - Doesn't lock your step order Circle Downsides - No commit vs PR build. Might need to setup for PR builds only, which may require extra merges to force a PR build before sending to master. - Randomizes job order - 4gb limit. But this can be addressed if non-free version is used. Concerns were raised about the 4gb limit and VDBs notorious use of memory. In any case, decision will be made pending the results of the TACs investigation. 4a) Ken requires Eigen for some new tools so would like to add as dependency. It is header only, and several parties are already using it internally. But it likely can't be confined to a .cpp file. MKL was suggested as an Eigen alternative. While the license is likely now compliant, concerns were raised about the library install size. Ken moved that we adopt Eigen. Dan amended that it should be gated by a USE_EIGEN compile time flag. Passed with unanimous consent. 4b) An ongoing problem is for people to determine what dependencies are required to build VDB. Different people pick different versions of the library, either causing direct build failures, or failures when mixing together. Blosc in particular has issues if latests versions are used. Since the CI must already know the true versions for its install, this is a documentation problem to provide it for users. Two action items are: 1) Update the install file to reflect the current versions in the CI 2) Add 3rd party list on the website modeled after: http://www.sidefx.com/docs/houdini/licenses/index 4c) Ken suggested we include the CLI library to make the command line programs less fully of boilerplate. boost::options was suggested as another alternative as boost already is included. Suggestion was tabled for later. 5) Dan moved that the deprecation policy forwarded by email should be adopted. Support will consist of current and last two years of the VFX platform. Currently this is 2019, 2018, and 2017. This will make a minimum Houdini version of 16.5. Next year will be 2020, 2019, and 2018, and will unlock C++14. Passed with unanimous consent. 6) Dan moved we adopt the proposals for renaming OpenVDB nodes user-facing labels to match Houdini's and add a (aswf) suffix, along with built-in policies to allow auto-hiding of duplicate nodes. General agreement, but concerns were raised about the specific names to use. Many have got used to OpenVDB prefixes, so change will cause friction. Likewise, many do not know what "ASWF" means. Though, alternatively, it might be good to start the education about ASWF in this manner. Dan moved that he prepare a PR and we can continue the bike-shedding when we have something concrete. Passed with unanimous consent. 7) Next Meeting Next meeting is scheduled for March 7th, 2019, 11am-12pm PST.
3,496
Markdown
35.051546
74
0.777174
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-08-29.md
Minutes from 26th OpenVDB TSC meeting, August 29th, 2019, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M., *Peter* C. Additional Attendees: Sean McDuffee Agenda: 1) Quorum 2) Secretary 3) Round Robin 4) Release Schedule for 6.2.0 5) levelSetVolume 6) VDB Smooth doesn't densify 7) Bitscan/popcount 8) Deprecation 9) Combine SOP Performance 10) Tree Refactoring 11) Delayed Loading 12) Next Meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) Round Robin A quick round robin to identify what is being worked on and what may make it for 6.2. Ken: Working on velocity extension. Read-Only grid may end up being internal only. Has found interesting results by making a write-only grid that allows better multithreaded scaling than usual multiple grids + merge. Nick: AX integration. Feature branch is hoped to exist in a week or so. As well, CMake as required. Should anistropic rasterization be higher priority? In response to a later question, MPI will not be available with current priorities. Dan: Point partitioning improvements. C++11 changes. 30-40% better performance on some datasets. Merging of grids is open questions. Likewise, refactoring tree hierarchy and improving combine SOP. Jeff: Hopes to get VDB Activate moved to OpenVDB. 6.1 integration into Houdini 18.0. Peter: Principle axis extraction from volumes, moment of inertia. Sharpen filter. Discussion on sharpen filter. Does support non-level set, consensus was it should be kept regardless of differences with Ken's version. 4) Release Schedule for 6.2.0 No one had any critical items to be done for 6.2.0. 5) levelSetVolume VDB Clip in 6.X leaves different values in inactive voxels, causing volume computation to change. Consensus is that such half-open SDF don't have a proper volume so this is acceptable. Peter notes that there is an error computing volume of entirely empty leaf nodes as empty bounding boxes are not detected by the volume computation. 6) VDB Smooth doesn't densify This is a known issue. Jeff will submit a Jira ticket on it. Committee differs on whether the right course of action is to add a densify node to Houdini, or change all algorithms to support sparse tiles. However, algorithms that don't support sparse volumes should be documented as such, and the relevant help updated. 7) Bitscan/popcount The TSC was unsure of the performance difference, so recommended asking the submitter to verify a measurable difference. POPCOUNT is the more debatable as SSE 4.2 is more recent (Only with Houdini 17.0 did that become a requirement). We noted that this method does not belong in Utils, but belongs in Tree. No objections to moving to Tree for 7. 8) Deprecation Our deprecation policy was unclear in what happens if we release 7 before the new year. The intent with 7 is to support the 3 VFX platform years 2020, 2019, and 2018, but it might be read that if it is released in 2019 it must support 2019, 2018, and 2017. No objections to allowing releases to base on future VFX platforms. Dan will submit a PR with a clarification. 9) Combine SOP Performance Combining 300 VDBs with maximum ends up entirely single threaded. While the SOP interface allows a list to merge at once, not in the underlying API. May require re-ordering if mixed grid types. 10) Tree Refactoring Clip is particularly problematic as it is in the File operations. This is useful for CUDA/GPU and MRes because it would allow subclassing the internal nodes without as much meaningless boilerplate. But a main motivation is that these are depth-first algorithms, so redoing as breadth first would allow multithreading and performance. Concerns were raised that this will break a lot of client code. Proposal is to provide free floating functions that perform the grid-dispatch internally, to make it easier to transform grid->foo() into foo(grid). Another concern is that this may open too much to the API. Current interface provides a nice abstraction to internal details. The counter argument is that these sort of operations should be possible by external code, so this is a good way to find where the API falls short. Is this a good chance to rethink the whole tree? There seems to be some cruft that could be removed as is for obsolete file formats. General agreement that we should always be ready to re-think our structures. 11) Delayed Loading This is still in a state of limbo between the File SOP and VDB. One sticking point has been Windows support. Another is the difficulty to pass options to the IO translator. 12) Next Meeting Next planned meeting is: September 12th 2019. 3pm-4pm EDT (GMT-4).
4,665
Markdown
34.348485
80
0.783923
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-11-03.md
Minutes from 69th OpenVDB TSC meeting, Nov 3rd, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW), Bruce Cherniak (Intel) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Timezones 4) OpenVDB Grid Layout (Cell centered vs Node centered) 5) Next Meeting 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) Timezones Agreed to move with daylight savings - i.e. whatever the meeting invite says! 4) Node Value Locations (Cell centered vs Node centered) A user has run into issues where the distinction between how values may potentially be stored in OpenVDB numerical grids vs OpenVDB Points grids was not clear. The problem is essentially that methods to retrieve node bounding information from a given VDB do not necessary respect the positioning of cell values. This is especially true for CoordBBoxs whose integer coordinates can either represent the minimum to maximum-1 bounds (Node centered) OR the minimum-0.5 to maximum+0.5 bounds (Cell centered). The distinction between the two is left to the underlying algorithm. Comments were made in regards to the position information of points within a PointDataGrid. Points are stored relative to the cell center; that is their voxel offsets are between -0.5,+0.5 and not 0,1. This difference is irrelevant when considering the fast discarding of candidate nodes; instead one must account for the differences of a volume modeling a continuum and a points grid with discrete data stored potentially throughout a given cell. Jeff observed that the ray intersection code could potentially be incorrect for cell centered values. All agreed that better documentation is a must. Nick, derived or specialized implementation of bounding boxes which represent either state with clearer API methods would also help show this distinction. Ken to investigate further and draw up images representing the problem for further discussion and to hopefully include in the docs. 5) Next Meeting Next meeting is November 10th, 2020. 1pm-2pm EDT (GMT-4).
2,118
Markdown
40.549019
80
0.795562
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2018-12-06.md
Minutes from 3rd OpenVDB TSC meeting, Dec. 6th, 2018 Attendees: John M., *Jeff* L., *Nick* A., *Ken* M., *Peter* C., *Dan* B., Andrew P. A quorum was confirmed. Secretary - Jeff Lait 3) Guidelines on Code Reviews We will attempt the CODEOWNER files to automate assignment of initial code reviews of pull requests. Squashing is discouraged unless required for bisection; but will be reviewed in a few months. We probably want documentation explaining how third parties can submit pull requests; the intention is for this to be included in the CONTRIBUTING file whereas the code review guidelines will live in a process subdirectory along with any other TSC-related docs. 3a) Ken will follow up to find out when/how we can get commit bits set for TSC members. 4) VFX Platform The VFX platform has requested a version be ready by mid-december. The ABI6 version has been tested on Clang versions and Houdini versions. It was acknowledged that a December 6.0 would be an extremely light release - substantial upcoming features will not make it in this window. Motion: Release current ABI6 on git, but not make any noise about it. Make noise about it in the new year. Unanimous consent. Action: Peter will merge the ABI6 pull request. 5) Questions for Autodesk The current question list will be shared for another week to see if there is any more input. Dan will pose a question about the potential to use the VDB Point Grid to dangle multi-res off the node structure. Question for Autodesk: What do they think of VDB's LOD structure? Question for us: What is the minimum set of tools for us to be interested? 6) Security Expert We have specific concerns about security, likely limited to file interporability. This could be added to the Contributing document. In particular, hardening VDB so it can be used as a service is of lower importance than performance. "We take security seriously. Our primary concern is attacks via the .vdb file format." It remains unclear what is required by this role. No one declared any formal training in the role, and all are uncertain about the implications of being nominated. In particular, is the security expert seen as having vetted the code? Or are they just the primary contact for security issues? 7) Website Discussed during section 3. Currently the build scripts and 2gb of data needs to be migrated to allow the website to live outside of DW. John suggested he will look into feasibility of moving to github pages. 8) Next Meeting Next meeting will be next week. Unanimous consent.
2,559
Markdown
33.133333
190
0.783509
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-01-16.md
Minutes from 38th OpenVDB TSC meeting, January 16th, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), Nathan Walster (Framestore), Manuel Gamito (Framestore), Mark Elendt (SideFX), Daniel Lee (SideFX) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Discussion on per-node-meta-data 4) Update on MRes from Bifrost 5) Google Summer Of Code 6) Faster tree-merge based on the recently added addChild methods. 7) Next meeting 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) Background: Various studios are using (or want to use) OpenVDB's data structure for custom rendering of volumes. From previous discussions with Framestore, the TSC understands that some of the statistical data that's needed from VDBs (min/max/average etc) can be cumbersome to compute and store. The TSC has been discussing ways in which the leaf nodes of VDBs could be used to store additional 'metadata'. The aim was to gather a better understanding of the use case and whether these paths could align. The particular rendering technique being discussed uses ray tracking and requires sampling collision distances withing the volume. These samples further need to be aware of the localised minimum and maximum values. This localised region is typically the size of a VDB leaf nodes with a given filter parameter. This means that reductions can map well to a leaf node, but there is no way to store this value alongside the node itself (in the same volume). To solve, additional minimum and maximum volumes are stored alongside the source (surface/density) volume. These extra volumes are computed on write of the final source volume and hold tiled extrema representations of the source. That is, each leaf node of the source grid corresponds to a tile value in each minimum and maximum grid. There can be high computation expense necessary to compute these volumes depending on the filter parameters, which can be further influenced by velocity (for motion blur). Additionally, users are able to modify the source grids final values at render time using AX, requiring statistics to be recomputed. An ideal work-flow would involve values not being recomputed in areas where they don't need to be, with the ability to keep them in sync with the source grid using some sort of native VDB data structure. The challenge with this synchronisation is ensuring that other applications know when this data needs to be regenerated, however the implementations in most of the scenarios being discussed are completely custom. Custom tree types are also being leveraged, which would make an AX or native storage solution potentially more complex. It was noted that these custom types may not be necessary and could potentially be dropped in favour of standard vector types. Conclusion: - It would be useful to have faster and more general reduction tools in VDB to compute hierarchal values, with more general approaches allowing users to specify their own filter/reduction requirements. An initial simple example could include new header functionality which takes a VDB and returns tiled/consolidated leaf level values as a new tree. - It would be possible to extend AX to support such reductions. Controls to choose tree level iteration and coordinate lookups would be necessary. - Storing blind data at the leaf node level would provide a native solution for 'linking' custom statistics to their corresponding locations. This would avoid having sidecar files/vdbs to hold this extra data. VDB's LOD structure was also mentioned as a way of providing a more closely coupled set of grids (also see additional notes). The general consensus was that a more specific solution for customizable reduction may be easier to begin with. Jeff to look at Mantras implementation of extrema values and feedback. Dan to put together a Google doc with ideas. 4) Update on MRes from Bifrost Ken continues to be in contact with Autodesk who remain committed to the MRes discussion. Ken to implement a simple MRes example provided by Micheal Bang (Autodesk) in VDB's LOD grid structure and share it with Autodesk to help compare and demonstrate the pros of the MRes structure. 5) Google Summer Of Code Dan to put together a Google doc on project ideas. 6) ~Time Additional notes: General consensus that a more extensive (better than just a vector of grids) native way to 'link' or group VDBs together would be extremely useful. In addition to grouping statistical data, a common scenario that comes up in simulations is linking collision SDFs to their collision velocity fields. It becomes difficult on read of a VDB file to infer this link without metadata or relying on naming conventions. Nick to create a ticket describing the problem. 8) Next meeting January 23th 2020. 2pm-3pm EST (GMT-5).
4,881
Markdown
47.82
80
0.797173
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-04-18.md
Minutes from 13th OpenVDB TSC meeting, April 18, 2019 NOTE: This meeting was moved one week later from the originally scheduled April 11th and one hour later. Attendees: *Ken* M., *Nick* A., *Dan* B., *Peter* C., *Jeff* L. Additional Attendees: Bruce Chernia (Intel), John Mertic (LF) Agenda: 1) Confirmation of quorum 2) Selection of secretary 3) Response from Autodesk 4) 6.1 Release 5) Copyright/SPDX 6) DSO_TAG_INFO 7) OpenVDB Twitter 8) Next Meeting 1) A quorum was confirmed. Ken chaired. 2) Secretary - Jeff Lait 3) Response from Autodesk Existing mres system doesn't have file io or transform implemented, which means it has a lot to gain by VDB unification. The existing mres system doesn't seem to have canonical formats for template parameters. We've found with VDB that while fully templatable topology was useful for prototyping, in the long run it is infeasible to support multiple options across compilation boundaries - VDB is already notoriously slow to compile with just type instantiation. Mres would need to find ideally one, perhaps two, canonical template instantiations. Mres has multiple roots so can support all of space rather than only a single cube. While Grid/Metadata/Transform/IO may be shared, it seems the Tree is less likely to be shared. Having a different structure, however, justifies integration - if it were truly the same there would be little point to it. The TSC was divided on importance of activation. This is an essential part of VDB and is widely used outside of just narrow band computations to do masking and achieve efficiency. However, activation implies non-present voxels that complicates a lot of algorithms (like blur) possibly unnecessarily. Some were concerned imposing activation may remove some of the benefits of Mres, such as being defined throughout space and/or efficiency. We are still unsure which algorithms are being provided. A critical number likely need to be present to justify including it. We need a clearer understanding of how mres is superior to a stack-of-grids. Successful multi-res has been implemented in, for example, SP-grids using stack-of-grids, and VDB seems well suited to a stack-of-grids multi resolution workflow. Ideally some non-trivial algorithm (such as maybe Eikonal equation?) could be shared in the Mres implementation, and we could attempt to re-implement as a stack-of-vdb grids. This would give a good understanding of where Mres is superior and provide some bench marks of speed differences. We will start a new email thread to discuss a response and attempt a conference call when that is ready. SIGGRAPH is proposed as a potential meeting time to discuss this directly. 4) 6.1 Release There is a press release planned for SIGGRAPH for which we want a release. Nick and Dan believe we are ready for a 6.1 release of primarily CMake changes in a few weeks. Pushing 6.1 too long into the future will leave the SIGGRAPH release hollow, however. But getting CMake out of the way would help close a lot of outstanding work. Nick is hopeful AX may make the SIGGRAPH release timeframe which will provide significant updates for that release. Werror PR is mostly done, except outstanding type conversion. Since how to handle type conversions is ambiguous and would delay the PR, instead they should be silenced until a future PR. Circle CI seems strictly better than Travis and we will switch to Circle going forward. Pre-Houdini 16.0 deprecation removes 2.5k LOC so is eagerly awaited. Unanimous consent on sending an email with planned 6.1 Release Candidate PRs. TSC members should comment if they have outstanding issues. If not, will merge by middle of next week. 5) Copyright/SPDX Dan will remove the 2018 to 2019 transition. Peter will seek explicit permission to remove the year from copyright notices so we can stop the yearly bump. SPDX is to be left to later. 6) DSO TAG INFO It is believed that the DSO_TAG_INFO is unnecessary, so can be removed from CMake builds to avoid breaking compiler caches. Jeff requested verification from production as this has been removed from the 18.0 examples. 7) OpenVDB Twitter Dan has acquired the openvdb twitter handle and will make it available to the TSC for our social media needs. 8) Next Meeting April 25th 2019. 3pm-4pm EDT (GMT-4). Note it will be at the same time as this meeting (moved one hour later to account for NZ non-DST)
4,431
Markdown
36.243697
79
0.789889
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-04-02.md
Minutes from 44th OpenVDB TSC meeting, April 2nd, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), Johannes Meng (Intel), JT Nelson (Blender), John Metric (Linux Foundation), Jim Leuper (DW), David Tonnesen (DW), Peter Cheng (DW), Andre Pradhana (DW) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Intel VKL 4) Nano VDB 5) Hashing VDB Grids 6) CI Instability 7) Admin rights 8) Google Summer of Code 9) Houdini 18++ 10) VDB Activate 11) AX language spec out. 12) Improved Morphology, PR #675 13) ASWF Nexus 14) Fast Sweeping 15) Next meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) Intel VKL Tentative plan for a presentation in 3 weeks. Will announce on the mailing list when ready. 4) Nano VDB Presented at the render council. Linearized tree, not fragmented. Uses grid as native BVH. DDA rewritten for this. Much faster than current, even on CPU. 1.2 GigaRay/second on dragon. On par with triangle meshes. In talks to release it. Min/max per node. Active bbox per node. Some of the DDA could port back to mainline VDB. 5) Hashing VDB Grids Is hashing useful? Need a fast hash function, and a cryptographic hash. Ideally hash data separate from metadata. We should provide the architecture to plug in a hash function that is multithreaded & knows about active voxels, and how to sequence the node hashes. This was already brought up on a previous TSC meeting. 6) CI Instability Reasons CI is broken: a) Out of disk space building Houdini dsos. Sometimes we get extra space so it doesn't fail, but if we get the actual official limits it fails. Need an auto-deleter as we build dsos. Maybe remove symobls? We have a PR to remove the debug build, this should be approved by someone on the TSC. b) TBB version was broken in vcpackage. Fixed temporarily until github actions fixes it. c) On Windows, cpu-timer unit test. Concern is if the timer is a steady timer. Or is thread-safe. Need to double check this. Unit tests with timers in general are a problem. Dan will split up the unit test change from the chrono changes and investigate if the new timer is thread safe now. 7) Admin rights Dan moves he gets admin rights to OpenVDB, contingent on him not turning off CLA checks. Ken, Nick, Jeff, and Dan vote aye, no one disagrees. This motion passes. 8) Google Summer of Code 10 proposals, 1 of which passes initial cut. Student wants to improve delayed loading on windows. Likely based in India. We have to choose two mentors. Likely need Nick for similar timezone. Talk about in next meeting. April 20th deadline to decide. 9) Houdini 18++ Next major version of Houdini will have at least VDB 7.0.0. 10) VDB Activate How much should be changed to make a PR? Brace correction? Jeff to do a PR of current version, we will then decide how much prettying needs to be done to bring it in. 11) AX language spec out. This is still being written, but the original spec for bikeshedding can be found here: https://idclip.github.io/openvdbax-doxygen/ax.html Nick will send around a set of high level questions that are known issues. 12) Improved Morphology, PR #675 Greatly improves speed of dilating more than 8 voxels. Google sheet on the PR with some timings. Mostly a change of framework. 13) ASWF Nexus Only OpenVDB is using it. Do we really need it? We have >100 MB files that we can host somewhere. If we can be given new links, we'll reset the links. ASWF will move to S3 and give us new links. All four TSC members at the meeting explicitly approve of this transition. 14) Fast Sweeping Version at DW has issues, but are improved and fixed in the new one. Ken has a PR almost ready. 15) Next Meeting: April 9th 2020. 2pm-3pm EDT (GMT-4).
3,808
Markdown
33.627272
295
0.754989
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2021-01-05.md
Minutes from 74th OpenVDB TSC meeting, Jan 5th, 2021, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: JT Nelson (Blender), Andre Pradhana (DW), Bruce Cherniak (Intel) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) GitHub Issues 4) SIGGRAPH Course 5) VDB 7.2.1 Release and VDB 8.0.0 Release 6) Extrapolate 7) ASWF Video 8) Misc ground work 9) Next Meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) GitHub Issues PR#903: Wrong distance sign computation issue. Touching spheres have a vanishing gradient at the tangent. This means the Eikonal equation fails. But the smoothing may also be responsible. If you have no self intersections, openvdb is not necessarily best for converting to SDF. The mesh is only treated as a suggestion as we work around many common errors. If you look at plane of symmetry between two spheres, the gradient is less than one. And as you go to the center it goes to zero. Just because you have an SDF does not imply length of normal is always one. Maybe we need an assume manifold option that works only with correct meshes. Jeff will add a link to the meeting notes. 4) SIGGRAPH Course Do we go for short or long course? Ken, Dan, and Jeff confirmed. But AX isn't brand new? But courses are not for solely new stuff. We need a mock-up of the schedule. Nick will confirm in a week. 5) VDB 7.2.1 Release and VDB 8.0.0 Release Patch release was to fix include bug and node manager change. We should write a test to verify the copying is occurring the way we expect. The operator needs to be copied, but accidentally references were left behind. The 8.0.0 release has most stuff. No extrapolate SOP, but look forward to that in 8.1. Release announcement isn't done yet. Ken will do the announcement. The ABI changes for trivial types is guarded by ABI. One issue bulding with Houdini 18.0, which uses boost 1.6.1. VFX19 is now 1.6.6. 18.0 uses the older VFX platform, so to comply with 18.0's VFX platform causes a mismatch. If we remove support for a VFX platform, we should remove corresponding versions of Houdini. We will add a note that Houdini18.0 uses boost 1.6.1 so the warning has to be disabled. Note: Post meeting I verified that Houdini 18.0 is VFX19. Houdini 18.0 uses Boost 1.6.1, but since its symbols are hidden it is irrelevant for its compliance to VFX19. (There is a note to that effect in the licenses page) 6) Extrapolate Found a crash bug where multiple fields were updated. Checking to see if it is fixed and will push soon. 7) ASWF Video Embedded version of video, should we put it on the homepage? People are in favour. Can we secure some new images? Approval can be long and slow. We can get ASWF to approve images rather than going internally. Some in DW have already started a process to get permission. Maybe a gallery if we get too many images. 8) Misc ground work a) TBB Deprecations We have hit the wall for this as the deprecations are now gone. There is a comment in LeafBuffer that suggests std::atomic is insufficient, but it appears to have std::memory_order now. It is likely std::memory_release is the correct option. Ken is to investigate this. b) Blosc What are the valid versions? c) PRs to make optional Various PRs to make dependencies optional - try to push these through. c) i) Half.h We could add #include, implement our own, or a software/hardware option. There is a component that uses EXR for output. OpenEXR should be an optional dependency. Do we do a link time, run time, or compile time error? We wanted a pure #define independent header, but that then means that you run-time discover the missing functionality. Addressing the raytracing tool is straightforward. But removing half is a problem - all the website files stop working. How do we include half quickly? What if we just include it? We'd have to namespace it. Jeff is to investigate this. d) Release notes clashing Every time you go to make a PR you get conflicts in the release notes as they stomp on each other. e) Automating Releases This is a huge amount of work due to rebuilding documentation. It is rebuilding the doxygen that is painful. Maybe a draft github release that triggers an action to do the doc building. The doc building needs to be done on the cloud, not locally. Should we do more frequent releases? f) Move fully to GTest Move AX over. 9) Next Meeting January 12th, 2021. 1pm-2pm EST (GMT-5)
4,508
Markdown
36.890756
314
0.761091
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-06-27.md
Minutes from 22nd OpenVDB TSC meeting, June 27, 2019 Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: John Mertic (LF), Sean McPherson (Intel) Absent: *Peter* C. Agenda: 1) Quorum 2) Secretary 3)
 CII Best Practices / TAC CI meeting 3b) CLA Process 4) Quick PR Review 5) Lambdas and ValueAccessors Unplanned) Regression Tests 6) H18 + ABI=6 7) GCC and Dual ABI
 8) Metadata and .bgeo
 9) Next Meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) CII Best Practices / TAC CI meeting Making progress towards migration to Azure CI. A CMake issue with GLFW arose from the transition from Ubuntu to CentOS 7. PR has just been merged that addresses this. Waiting on Andrew from LF to setup authentication to produce official ASWF images, then we'll add VFX 2019 CI using Azure. Next steps will be adopting a similar CI workflow for VFX 2018 and VFX 2017. Outstanding item relating to how to handle a Docker image with Houdini in it. This is a legal issue, not a technical issue as these images cannot be public, otherwise users can access the Houdini HDK without signing the EULA. One approach is to use a private repository, Dan has experimented with this and it works well. However as brought up in the CI meeting, this is sub-optimal because it means that anyone using their own fork won't have access to a Houdini build for testing. General agreement that there should be a better solution to this problem. It was noted that the ASWF is actively investigating this and that the LF has also looked into how to resolve this in the past. John to reach out to Daniel Heckenberg to see if he can help with this effort. Current thinking is to install Houdini HDK on every build so as to be able to complete the migration from Circle to Azure. CII Best Practices was discussed in the CI working group meeting yesterday. We're still missing a few areas. Dan has submitted a PR to add the security mailing alias to the website, Peter asked for a minor tweak to the wording. Ken has requested an edit to the security policy document and approved it, this has been merged now. It was noted that other ASWF projects are interested in this document. A question was raised about some of the wording in the CII Best Practices and John clarified that it is acceptable to mark Unmet or N/A to "Suggested" items. Only "Must" items have to be marked Met. As a result should be possible to ignore the dynamic analysis section for now as this is not mandatory. Worth adding a comment to any items being marked Unmet though. Still some issues with getting static analysis up-and-running. Dan speculated that this might be a problem to run on every commit because our unit tests take a long time to run with the code coverage flags enabled. Further discussion on this issue put off until Dan has had a chance to properly investigate. 3b) CLA Process DWA and DNeg have digitally signed CLAs. Still awaiting Weta, SideFX and ILM before switching on the automated CLA checks. John to resend the CLA instructions and clarified that the LF will be managing the CLAs once this system is in place. OpenCue are using it, OpenColorIO haven't switched it on yet. 4) Quick PR Review 477 - Dan to follow up with Ken offline, as there appears to be some older commits attached to this PR. 474 - All feedback addressed, ready to be merged once the CI passes. 459 - John's PR to add the maintainers file is failing the CI, but not clear why. Merge anyway. Other PRs require Peter's input so moved on from this agenda point. 5) Lambdas and ValueAccessors Discussion following the recent email thread about how to use ValueAccessors with lambdas. Ken highlighted that provided the ValueAccessor is being used for a reasonable number of accesses, the extra performance cost of construction vs copy construction is almost negligible. Ken pointed out that another typically negligible performance penalty in the ValueAccessor is registering them with the tree which can be optionally disabled. Ken recounted that Peter's original objection was that a user of the library would expect copy-construction to perform a copy. Changing this behavior wouldn't be ideal. In general, no major concerns with changing the copy-constructor of the ValueAccessor anyway, but some brief discussions about alternatives that might be favored instead. Ultimately, no decision made on this. Ken proposed we start out by doing some benchmarking. Dan asked about whether using a hash map at the root node as well as an ordered map for faster access performance would alleviate much of the need for the ValueAccessor. Ken replied that this had been attempted in an earlier version of VDB, but that std map performance is fast with a small data set. This is typically the case with the root node due to the high fan-out factor. Nonetheless, the ValueAccessor would still represent a performance increase over a hash map. All in agreement that there are a number of different patterns of how ValueAccessors are used throughout the library and no clear recommendations. It would be worth providing some guidelines and improving the consistency of how this is done, particularly around the use of lambdas. Unplanned) Regression Tests Ken would like to see regression tests included in the library. Weta and SideFX are both using regression tests that involve rendering images and using image difference algorithms. Nick raised that VDB hashing would be a useful feature to add to the library to help with regression testing. Ticket to be made to track this as this has been mentioned before. 6) H18 + ABI=6 Jeff mentioned that the H18 alpha does not yet have a version of OpenVDB with ABI=6. The major sticking point is the issue with how to handle metadata (as discussed in 8). 7) GCC and Dual ABI
 The VFX Reference Platform specifically states to use the old C++ ABI (defining the compiler flag -D_GLIBCXX_USE_CXX11_ABI=0). As a result Houdini is shipped using this ABI. An issue was reported on the OpenVDB forum where a user was using a later version of GCC (or a later OS such as Fedora 23) which uses the newer ABI by default. This was causing confusion because the OpenVDB core library was built with the new ABI but failed in attempting to link against Houdini which was built with the old ABI. We agreed that the best option would be to explicitly define the compiler flag for the old ABI when building the core library for Houdini. 8) Metadata and .bgeo
 Jeff has encountered a regression attempting to upgrade VDB for Houdini where the VDB Clip SOP has started outputting file_ primitive attributes as a result of a bug fix Peter made some time ago. The proposed solution here is to drop all file_ metadata when reading from a .bgeo file. No concerns with this solution. 9) Next Meeting July 11th 2019. 3pm-4pm EDT (GMT-4).
6,836
Markdown
44.58
80
0.794324
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-10-20.md
Minutes from 67th OpenVDB TSC meeting, Oct 20th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW), Bruce Cherniak (Intel) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) ASWF Questions 4) Screen-sharing 5) Faster CSG Operations (PR785) 6) Review Process Retrospective 7) Next Meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) ASWF Questions Ken is providing some project-specific questions to the ASWF tomorrow. Various questions posed such as locking-down the tree configuration, usage of the Maya plugin, etc. Any others people think of, please share with Ken before tomorrow. 4) Screen-sharing Screen-sharing is still disabled in our ASWF calls. Ken is joining the meeting using his NVidia Zoom account and believes the host account is the official openvdb gmail account. Ken to look into how to unlock screen-sharing and/or to discuss with John about changing which account is the host account. In this instance, Dan's ILM Zoom account was used to host a new call with screen-sharing enabled. 5) Faster CSG Operations (PR785) Dan re-presented the theory behind this functionality as presented in the Siggraph 2019 OpenVDB Course and gave an overview of the code changes in the PR, answering questions from the TSC as they came up. Changes are organized in the PR by commit. Some areas of discussion and/or investigation include: Ken highlighted that different words of a node mask could in theory be modified concurrently to unlock parallelism across a single node. Nick and Jeff wish to try and resolve the confusion of sometimes using a bool threaded parameter and sometimes using a bool serial parameter across the codebase in general. Using bool serial is probably the right decision here to maintain consistency, but would be nice to fix in an independent effort. Jeff suggested using an enum to maintain backwards compatibility. Ken raised that the new DynamicNodeManager class needs more documentation. Dan to address this. Nick raised that there were issues in the past with threading a core method that was previously unthreaded as a result of nested parallelism. Dan to investigate whether there are situations in which construction of the LeafManager or NodeManager happens inside a thread. Jeff and Ken raised concerns with the TreeToMerge class accepting either const or non-const trees and users inadvertently picking the wrong one. The suggestion proposed here is to add a dummy class parameter to each constructor similar to tbb::split to make construction more explicit. Potentially this dummy class could be part of the Types header. Dan to look into making this change. This feature has been deployed at ILM along with the VDB Merge SOP that is not part of this PR. Main item of feedback was that the Tree visitor pattern was previously being used with a const tree as the NodeManager required a non-const tree. Dan has extended the NodeManager to accept a const tree following the implementation of the LeafManager which also does this. This change is now included as part of this PR. Jeff proposes that we aim to approve this PR by the TSC meeting next week. 6) Review Process Retrospective Big changes such as AX, NanoVDB and the PR discussed here are hard to get into the codebase. Bikeshedding occurs and smaller, simpler changes tend to be discussed and merged as priority. Need to keep trying to address how to unblock changes that are hard to review and particularly those that are refactoring large portions of the existing codebase. Public API is most important to review as that can be time-consuming to try and change later. Bugs in the implementation details will often be discovered in due course but that should not hold up features progressing. Provided there is decent unit test coverage of new functionality being added and that all of the existing unit tests pass, that should help to lower the barrier to approval. Ken reiterates that we should perceive the master branch in GitHub as in development. Users deploying directly from this take on a fair amount of risk. In general, all in favor of using this live code review process again in these types of cases to help push the project forwards. 7) Next Meeting Next meeting is October 27th, 2020. 1pm-2pm EDT (GMT-4). Jeff will be dressed up for Halloween.
4,408
Markdown
41.39423
80
0.80127
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-05-14.md
Minutes from 49th OpenVDB TSC meeting, May 14th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Bruce Chernia (Intel), Robin Rowe (Cinepaint) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Fixing CI Builds 4) OpenVDB Houdini / Houdini Utils library 5) VDB Blur with Constant Tiles 6) AX Update 7) VDB SOP vs VDB Create SOP 8) VDB Grid Types 9) Next Meeting 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) Fixing CI Builds The VDB CI was broken due to vcpkg updates. Dan to fix with a new PR. We currently don't receive notifications if the CI fails and there are no cron jobs. Dan to investigate these. It was noted that the vcpkg and homebrew installations are volatile as they are not locked down to particular dependencies. Ideally we'd have VFX platform style images across all OS's, with "latest" CI representing the current style. There exists another PR #708 which also fixes the windows builds and looks to improve them. 4) OpenVDB Houdini / Houdini Utils library Dan, proposal to merge the directory structure of of these two "libraries". The separation was introduced by DW to help manage internal vs external source code. Note VDB View also perform a similar build step (where headers are copied into a temporary location). Discussion tabled until DW representatives are available. 5) VDB Blur with Constant Tiles Jeff, bug reports in mantra due to strange "voxel" artifacts on blurred VDBs which contained active constant tiles and therefore do not blur across node boundaries. Ideal solution would be to only voxelize tiles which are guaranteed to be affected. Ticket created to represent this work OVDB-143. Nick, explicit calls to voxelize would assist in the short term even with the memory side effects. 6) AX Update Plan to have AX exist as a feature branch week beginning 18th May. Blocked on two CMake PRs, #708 and a future CMake change to FindOpenVDB. 7) VDB SOP vs VDB Create SOP Last outstanding SOP with discrepancies with Side FX's custom implementation. Jeff is aware and will look to upstream. 8) VDB Grid Types Nick, discussion around the VDB math library and VDB's default registered grid types. AX aims to support a subset of its implemented types on VDB types but it's not clear which of these types should be supported. For example, AX support Vec4 point attributes but only Quat attributes are registered. This unveils a further issue with grid/point types which are equivalent but cannot be represented without using the explicit math implementation. This relates to previous discussion about introducing runtime grid types and possible changes to the VDB math library. Questions around which grid types should be registered and how to better allow conversion from compatible types. To be discussed further at future meeting. 9) Next Meeting May 21st 2020. 2pm-3pm EDT (GMT-4).
2,947
Markdown
36.316455
80
0.788938
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-01-09.md
Minutes from 37th OpenVDB TSC meeting, January 9th, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L. Additional Attendees: Bruce Chernia (Intel) Regrets: *Ken* M., *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Next Week's Rendering Meeting 4) Google Summer of Code 5) PR Reviews 6) VDB on Windows 7) CMake Configuration 8) Next meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) Next Week's Rendering Meeting Ken will talk to Framestore and see if we can confirm this. 4) Google Summer of Code There is a thread on TAC mailing list. We need 4-5 summer tasks for 12 weeks with mentorship. Should expect it to take as much time to mentor as it would take to write. Some ideas: * Delayed loading on Windows * CSG operations without rebuilding * Integrate Ken's Levelset Sweeping * Erosion to work better * Blur that doesn't fail at constant tiles * Dilate active values apparently fixes dilate. 5) PR Reviews 562: Looks good, needs us to look at. Ideally needs unit test, but shouldn't be submitter's task. Performance might be a hit, doesn't seem to be, but can't tell. Jeff to second-approve. Nick to merge. 402: Add note to help cards for ASWF SOPs. Peter wished a more complete implementation, but pending that we can go with this one. 579: ABS change shouldn't be done, just the ordering issue. 591: Jeff to Approve Recent MSVC PRs: Min spec changes. We need a CMake check to verify our MSVC version. 2017 is our MSVC requirement. Dan to resubmit one with just the template change, and update the build check for MSVC version. 598: VDB Merge Jeff has not looked at it yet. Should have everything for the inplace all requirements. Draft PR for UI discussion incoming. 6) VDB on Windows VDB on Windows for Houdini: We have no CI for it now. Revisit git hub actions. Jeff to check if there is a settings to switch off github actions for copies of the repos. (Editor: There are) 7) CMake Configuration Ability for OpenVDB to output the cmake module for what it was built for. Could be a student task? Text file to describe the cmake for what was built. We would want Houdini to ship with this cmake file to show how it was built. 8) Next meeting This will either be on Rendering or Roadmap. January 16th 2020. 2pm-3pm EST (GMT-5).
2,299
Markdown
28.870129
230
0.755111
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-08-04.md
Minutes from 59th OpenVDB TSC meeting, Aug 4th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Bruce Chernia (Intel), Peter Cheng (DW), Richard Jones (DNeg), Andre Pradhana (DW) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Forum 4) Open Source Day 5) Fast Sweeping and 7.1 6) NanoVDB 7) Next Meeting 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) Forum Nothing to discuss 4) Open Source Day The ASWF is hosting an Open Source Day, with VDB having a 55 minute slot on the 20th August, 10am PDT, 6pm BST. Nick and Ken to be put down as speakers. First half to comprise of an overview and new features/AX, with the second half opening to an open forum discussion. Ken to draft an abstract and send to Nick for approval. 5) Fast sweeping and 7.1 Ken has made all necessary changes. Nick, some file writes still occurring in the unit tests. Jeff, what happens if you have a single VDB with topologically disconnected surfaces, once which can be processed and one which can't (i.e the second has an invalid iso crossing). Ken, not sure, will investigate and attempt to implement desirable behaviour, ideally returning a topologically matching VDB (for non-expanding methods) with the first valid disjoint SDF correctly processed. Specifically with fogToSdf, the result should always match in terms of topology. 7.1 to be delayed until this is resolved. 6) NanoVDB Ken presented the latest work on NanoVDB, a project parallel to the OpenVDB project which aims to solve some specific use cases with the data structure. The presentation covered the main aims and features of NanoVDB for the TSC. A vote was held to assess whether NanoVDB should be considered for adoption to the OpenVDB project. Intent to adopt: Unanimous consent. Next steps to come after public announcement. 7) Next Meeting Aug 11th 2020. 2pm-3pm EDT (GMT-4).
1,955
Markdown
30.047619
79
0.771867
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-09-01.md
Minutes from 63rd OpenVDB TSC meeting, Sep 1st, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW), Ahmed Mahmoud Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Forum 4) 7.2 5) NanoVDB 6) GCC 9.1 (PR 769) 7) CpuTimer using C++11 chrono (PR 690) 8) Introducing a "feature/abi8" branch (related to PR 788) 9) Deprecating code (PR 806, StringGrid, Tree::prune(), LeafNode::str(), etc) 10) "Locking down the grid configuration" 11) Extrapolate SOP 12) Next Meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) Forum Ken to reply to post about large VDBs/OOC. Dan to reply to delayed-loading post. There was a question about static libraries on Windows. Include paths need to be included as a system header using MSVC. Including VDB headers is generating warnings that are suppressed when building the core library. Nick has replied to this qs. 4) 7.2 Nick has merged the PRs that removed the Makefiles and restructured the codebase. Planning on introducing the first PR that brings AX into the master branch, initially the core AX library only, then the Houdini SOP subsequently. 5) NanoVDB NanoVDB probably aiming for an 8.0 integration to allow a little more time for the codebase to mature. Ken currently merging NanoVDB up to the feature branch on a weekly cadence. New work includes a tool for recomputing the grid statistics, foreach, range and invoke wrappers around TBB functionality. Current effort is in adding DirectX support, seems this will be relatively trivial. Some users are asking for documentation around the memory layout. Investigation ongoing to look at replacing Jeff's work with the C library with a more automated approach. Jeff notes that initial compile time is a bit of a problem when using JIT. Matt Pharr has incorporated NanoVDB into PBRT with pleasing results. 6) GCC 9.1 (PR 769) Addressed concerns Jeff raised with bool specialization for higher-order intepolation schemes by tackling the problem at the source. PR is now ready to be merged. 7) CpuTimer using C++11 chrono (PR 690) Dan investigated thread-safety issues with C++11 chrono. Adopted similar solution to that favored by TBB which is to store the number of microseconds since epoch instead of the time_point struct. This allows for starting and stopping a CpuTimer across multiple threads. PR is now ready to be merged. 8) Introducing a "feature/abi8" branch (related to PR 788) Need somewhere to put ABI=8 development in the run-up to 8.0.0. Solution preferred by all is to use the master branch instead of introducing a new feature branch that needs to be kept in sync. This will be gated by a standard ABI=8 macro but with an additional EXPERIMENTAL flag to reduce chance it will be picked up by accident, similar to the DEPRECATED flag. 9) Deprecating code (PR 806, StringGrid, Tree::prune(), LeafNode::str(), etc) When should we deprecate? Is one minor version sufficient? Consensus that we should be a bit more aggressive with deprecations, users often ignore deprecation warnings anyway. Still nice to give users as much warning as possible. New major version is a good time to drop API support and force users to change their code. We should consider introducing a 7.1.1 patch version with deprecation warnings. All to review the CSG operation PR 785. Can we drop support for duplicateSourceStealable()? Nick still using it heavily. Limitations with current SOP implementation that causes issues with time dependency. Solution is to migrate to DS, but big sweep required to do so. Agreement that we should remove these methods anyway, Jeff/SideFX keen for people to not use this technique now. 10) "Locking down the grid configuration" Dan has implemented explicit template instantiation for the tree hierarchy. It speeds up building unit tests by around 30%, but building the core library now slower. It works by suppressing implicit template instantiation whenever tree hierarchy headers are included and explicitly instantiating them once in a source file compiled with the core library. Unfortunately had to be implemented using C preprocessor. More discussion needed, implementation shared as draft PR 813. 11) Extrapolate SOP Andre reports an issue with an assert firing in the fast sweeping code when run in debug mode, despite it working correctly in release mode. Assert in question validates that array indices are non-negative. Andre to share an example with Dan to investigate the root cause. 12) Next Meeting Skipping next week. September 15th, 2020. 1pm-2pm EDT (GMT-4).
4,643
Markdown
38.02521
79
0.789145
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-10-27.md
Minutes from 68th OpenVDB TSC meeting, Oct 27th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Andre Pradhana (DW), Bruce Cherniak (Intel) Regrets: *Peter* C. Agenda: 1) Confirm Quorum 2) Secretary 3) Paged Array 4) AX 5) Faster Merge 6) Author Field in Headers 7) Next Meeting 1) Confirm Quorum Quorum is present. 2) Secretary Secretary is Jeff Lait. 3) Paged Array Nick's concurrent unit test script manages to consistently crash the paged array test. There was a thread-unsafe method. The current fix is just decreasing the chance of race conditions. There is an atomic counter checking against capacity. But there is no barrier, so two could trigger the capacity check at once. This is only the direct method; the accessor method doesn't have the problem as it does local insertions that it passes on demand, which is mutex locked. The paged array has two APIs to add values. A direct access, and an accessor-style method. Using accessors is almost always faster than the direct method. Having a slow method leads people to think VDB is 100x slower than a direct grid, as they don't try the accessor. Why have an inferior method? The direct access for paged array is not thread safe. So why do we have it? Should we take out non-accessor methods, for this and VDB? What about a single query on a tree? So what about an accessor type that isn't cached? Maybe we should have a set of different accessor types to handle different caching policies? If we lose the direct access, it is much harder for new users to get going. But people keep going with very slow approaches. valueOn accessor is so much slower that it should just be removed. Is most of the time the root hash? Should we re-root the hash to reduce the amount of overlap? Or replace the root hash to be faster? a) Can we make the tree faster? b) Should we specialize the accessor for non-caching methods? Replacing the root hash is tricky. We are requiring the sorting property of the root node. We may need to change the root node behaviour depending on the number of entries - small entries could be a raw vector, for example. 4) AX Nick has been working on it. Modulo implementation. Currently does C-standard method. Will change to match on VEX/Python approach. Done and upstreamed. rand() uses boost::rand. Switched to std::. Speed is slower for 32-bit, faster for 64-bit. Currently API is rand and rand32, considering whether 32-bit generator has better distribution than 64-bit. Done and upstreamed. Integer: do we remove short type for local variables? We need to keep the syntax to access the grid types. The same applies to int / long. Not sure yet of performance cost. Literals should be 64-bit. Vec3 local variables are currently 32-bit to match grids, this could be changed to 64-bit with 32-bit as a specifier for grids. The other big question is literals: can 32-bit AX have 64-bit literals? Generally agreed having the L suffix is not a good idea, but how to compute intermediate values is unclear. Short circuit boolean operations. AX doesn't currently short circuit, so if (false && i++) would increment i. VEX doesn't short circuit. This surprises members of the committee, and has been submitted as a bug. CI is now not bulding LLVM from source but using the docker containers. Most runtime exceptions have been replaced by a logger. The compiler will now return nulls and generate a log if logger provided. Otherwise it will throw exceptions on errors. 5) Faster Merge The feedback has been addressed. Now requires references and the const vs non-const is now deep vs steal. 6) Author Field in Headers What do we do with @author fields? Having a prime author is useful. You don't get that from git. Historically files were committed by one username. Is this an owner or primary authors? Some files have no names as there is no one author as too many have touched the file. To avoid potential drama, we have drafted an ad-hoc author policty. We will not remove people from the @author list. If we make a substantial change to a file, we can add ourselves at our discretion. 7) Next Meeting Next meeting is November 3rd, 2020. 12pm-1pm EST (GMT-5).
4,297
Markdown
51.414634
515
0.767745
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-12-19.md
Minutes from 36th OpenVDB TSC meeting, December 19, 2019, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: John Mertic (LF) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Road map / AX 4) OpenVDB user mailing list 5) Update regarding PointPartitioning/PointMerging 6) Proposal for alternative workflow for external contributors 7) Dev analytics 8) Timeline for deprecating Makefiles 9) Meeting with Framestore regarding I/O of custom leaf node meta data 10) Next meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) Road map / AX Nick has a concrete list of things to do before making a feature branch for AX. Intention is to send a list of qs to TSC with links to the documentation. Extensibility is a key concern currently. Priority is to send a full language spec to the TSC as a google doc for discussion. Implementation completion aiming for end of Feb/March. Going through the rest of the Road Map tabled for a later meeting. Ken wishes us to make this a recurring topic. 4) OpenVDB user mailing list Discussion about migrating from Google Groups to groups.io openvdb-user mailing list, most LF projects now using groups.io and general feeling that this would be a good thing to do. Desire to maintain content history during migration, John thinks that is not a problem. Open question about whether to merge openvdb-dev and openvdb-user, but general feeling that both have their uses. Auto-migration of mailing list not generally considered a good idea and this can be used as a chance to cull mailing addresses that are no longer in use. Embedding the panel in the OpenVDB website important but not a blocking issue, John to contact groups.io people about how to achieve this. 5) Update regarding PointPartitioning/PointMerging Dan starting to submit small PRs to build towards this effort, request for more code reviews. 6) Proposal for alternative workflow for external contributors Dan is suggesting that we set the default branch to something else (develop?) instead of master so that by default, external contributors are not making their contributions directly into the master branch of the repository. This gives us a chance to tweak contributions, add release notes, etc then submit our own PR from develop back to master which will run additional CI checks. TSC members will still have the right to make PRs directly into master. The TSC member that approves an external PR should also be the one that prepares the PR into master, intention is that the changes do not live on develop for long. Open question from Nick about permissions regarding making PRs into master. General agreement to the idea, Dan to flesh out a proposal with more details. 7) Dev analytics John shared the LF efforts with dev analytics (https://lfanalytics.io/projects/ad847f51-3046-4989-b500-f5237ecd49d0/dashboard). Some questions about what is the best metric to use, it primarily relies on Git commits at present, but lines of code also not an ideal metric. 8) Timeline for deprecating Makefiles Still a list of outstanding items for CMake, desire to formalize which are required for us to move away from supporting the old Makefile build system. Need a warning when you use the old Makefiles. Jeff / SideFX still relying on the old Makefiles for now, Ken occasionally uses them. 9) Meeting with Framestore regarding I/O of custom leaf node meta data Proposed date for 16th January to discuss storing min/max mipmap data for delta tracking for rendering. Ken to reach out to Framestore as they are believed to already have an implementation of this to confirm a meeting date. Intention is to add other interested parties to this meeting and focus on this one issue - Pixar, SideFX, etc. 10) Next meeting January 9th 2020. 2pm-3pm EST (GMT-5).
3,819
Markdown
40.521739
81
0.795758
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-08-18.md
Minutes from 61st OpenVDB TSC meeting, Aug 4th, 2020, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Bruce Chernia (Intel), Peter Cheng (DW), Andre Pradhana (DW) Regrets: *Peter* C, *Dan* B.. Agenda: 1) Confirm quorum 2) Secretary 3) Forum 4) NanoVDB 5) Repository Structure 6) Makefile 7) Next Meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) Forum Questions about computing averages of leaf nodes. Unclear if the questioner wants it baked in the tree or just computed. "On load" implies it is to be computed on construction. Another post is on large VDBs and Out of Core (OOC). Wants to be able to stream it and unload it. Ken has in the past written a minimal version that does delayed loading and unloading. Loads in only the leaf data into a preset pool. A mistake we've tried to do is make VDB try to do everything. So we kept putting everything in one structure, so it is not ideal for any application. Out of core is substandard, and random access is harmed by the out of core. We're trying to simplify the tree, and reduce dependency. Ken would like to contribute his streamable read only tree at some point. Ken will reply to the thread. 4) NanoVDB Nick and Ken will talk at the OpenSource event where we can talk about it. This will be an alpha release of NanoVDB. There is still another platform that might cause tree changes. This will imply there is a long period where it isn't official. How can we push to it without a pull request? Can do development on a fork. The feature branch on the fork can be done quickly. And this is then pushed up regularly to master. How to make it easy for people to push up changes? AX for example has three repos that are all trying to stay up to date. Previously we had a private repo, that required a sync to the dneg fork. Ken can keep the current private repo private. He can manually send synchronization pull requests by copying over from the private repo to the public. It is expected development will occur on the private repo and push to the public for the next few months until it transfers fully. Any PRs to the public repo can be hand-merged by Ken into the private repo to apply. 5) Repository structure. Proposal to change the structure to allow for different versions to mix together for different submodules. Requires headers not be off the root directory, so you can -I include submodules from different locations. Each module can get a subdirectory of its same name to store the C and header files. This will affect existing tooling. Doing this before AX and Nano will be good. We then also end up with a clean source folder. Build system can ensure that the correct componets are built, so you never include anything from the local repo that is supposed to go from the installation on disk. This will break existing PRs so we should make sure they are relatively up to date. The actual change is easy, the implications are harder. All present vote in favour. 6) Makefile Jeff reported Houdini's transition to the CMake from the Makefile. Ideally we support multi-target builds so can build Release and Debug in one configuration. Should CMAKE_POSTFIX_DEBUG have a _d by default. Mac seems to be inconsistent with where to put the 7.1.0 decorator - it goes ahead of the library extension rather than after. We should consider now removing the Makefile. Unclear how many are using it as it currently doesn't work. We should raise it at the OpenSource day to see if there is push back. We are currently letting the Makefile rot. So we should remove it. We will put NanoVDB in raw, with both its Makefile and CMake. We will create a feature branch on Wednesday for NanoVDB. 7) Next Meeting Tuesday, August 25th 2020. 1pm-2pm EDT
3,836
Markdown
56.268656
598
0.773201
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2021-02-02.md
Minutes from 78th OpenVDB TSC meeting, Feb 2nd, 2021, (EDT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B, *Andre* P. Additional Attendees: Johannes Meng (Intel), JT Nelson (Blender), Bruce Chernaik (Intel), Laura Lediaev (ImageWorks) Regrets: none Agenda: 1) Confirm Quorum 2) Secretary 3) Blender Update 4) GitHub Issues 5) Removal of EXR from Core 6) Half Update 7) NanoVDB 8) CSG Intersection Fix 9) Update Documentation Process 10) Boost Minimum Requirements 11) AX SOP 12) Next meeting 1) Confirm Quorum Quorum is present. 2) Secretary Secretary is Jeff Lait. 3) Blender Update A brief update on the current integration status of VDB with Blender, a more full update will be done when the documentation and code are properly in sync. 4) GitHub Issues We have an issue with issues. We have too many and it is growing. Some issues are things like tbb 2021 not being supported. Others are things we have already triaged - but they stay on the list. For example, the suggestion for a proxy for meshes. Moving to Jira would clear our Issue list, but mean there are many places to look (and submit) for the current status. We find the Issues are mixture of bugs, ideas, and questions. One option is a discussion tab. But this again is another place to go, and what happens when a discussion becomes a bug? Submitters should not be expected to know where things go. There is a flow of new issues from Unprocessed to In Process to Discussed. Should we make this explicit so we can ensure all issues have been Discussed? There is a risk that if we leave discussed issues active, our project looks incompetent as there are many open issues. Bug vs Enhancement labels is something we should probably add as a result of discussion rather than from the submitter. The How To Submit Issues should talk about how we use labels and what they mean. Do we want a tag for issues we are currently working on? We decided to continute the discussion offline. Nick will provide an initial google doc seeded with the Jira workflow. 5) Removal of EXR from Core Problematic as people use it. However, known use cases are with the command line render tool, which will be unaffected. Could we have it still kept without support in our CMake? But this leaves people little better off, if they can alter the CMake to add the library support, they could as easily add the explicit saveEXR code from the command line utility. We are now in agreement to remove the saveEXR from the core library. 6) Half Update CMake now configures BuildConfig.h. This stores if you built with EXR half. Still needs to be validated that this works with external Half implementations. It was proposed the flag refer to IMath half rather than Exr as it is planned to move to an external library. Or maybe it should be an external half flag? 7) NanoVDB How to store half in memory? Could quantize on statistics? Ie, the leaf nodes know their min/max, so if had 8bits of precision could store values within that. If the quantization is stored per leaf node, this would require the codec switch to be done per leaf node. The suggestion is instead that the entire tree gets quantized with a fixed codec. It was noted you need to dither the quantization. Raytracing soft fog can become very sensitive to quantization jumps. It was pointed out Bayer dithering does not work for this as it is optimal for area integrals, but volumes need to be optimal for arbitrary line integrals. NanoVDB stores tile offsets rather than byte offsets, so cannot have varying codec in the leaf nodes as leave nodes must be fixed size. Points tried varying codecs, but not very useful. Maybe leave room for a codec per leaf node? Maybe a global range to avoid jumps in quality between neighbour leaves. Having each leaf have its own quantization can result in neighbour leaves having very different qualities - imagine a single stray 1000 value that crushes low values around it; versus a leaf with only low density that is preserved. Out of range values could then be clamped, letting an artist control quality with an a priori quality metric. A big question is do we have half as a type or a codec? Discussion is tabled until a later meeting. 8) CSG Intersection Fix Fix is complete. Resolves the root node problem. Updated the old unit tests and verified against the old composite header. One change is to make the operations commutative in an rare case, but this is likely more correct. Does this need migration documentation? The migration document is required for the leaf manager, not for this. This merely fixes what should have been a drop in replacement. 9) Update Documentation Process The 7.2.2 update is a chance to streamline this. Will try to build documentation via github actions. Goal is to get into github pages. Then website can point to the github pages. 10) Boost Minimum Requirements We need to support 1.66, but do not need to prohibit 1.61. While Houdini is technically correct to have 1.61 in 18.0 because it is hboost, not boost, in practice we pass void * across from the built OpenVDB with the native Houdini OpenVDB; so if there are any internal boost structures they need to be binary compatible. So moving forward the Houdini boost should sync with the vfx reference platform even though it is hboosted. 18.5 has already moved forward. In the short term we can change the cmake to only require 1.61 so you can use 18.0. We should consider adding a CI test for Houdini 18.0 compatibility to explicitly use 1.61 rather than 1.66, however, to verify we do not introduce a 1.61 incompatibility. 11) AX SOP Ready to go, needs approval 12) Next meeting Next meeting is Feb 9th, 2021. 12pm-1pm EST (GMT-5).
5,753
Markdown
54.864077
618
0.78081
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2018-11-30.md
Minutes from 2nd OpenVDB TSC meeting, Nov. 30, 2018 Attendees: Nick A., Andrew P., Ken M., Jeff L., John M., Dan B., Peter C., Thanh H. A quorum was confirmed. Secretary - Ken Museth. It was decided that in the future this role will be rotating among the TSC members. 3) Autodesk (specifically the Bifrost team) has reached out to Ken regarding a proposal to adopting their multi-resolution grid into OpenVDB. The committee expressed interest in investigating this further and will compose a set of questions that will help decide if this is feasible and beneficial. Ken will collect these equations and forward them to Autodesk. 4) Currently only Peter C. has permission to to merge pull-requests but the committee decided that this privilege should be granted to all TSC members. Thanh H can facilitate this but will need to check with the TAC first. All TSC members will email their GitHub emails to Ken and he will forward them to Thanh H. 5) Dan B. informed the committee of some ongoing challenges related to our transition to the new Continuous Integration system (Travis to Jenkins) and build system (make to cmake). Dan suggested to downgrade the failure threshold momentarily and Thanh M will configure it. 6) Andrew brought up the need for a dedicated “Security Expert” on the project. The committee is requesting more information about what exactly this means and John M agreed to do some research, including to see if the Linux Foundation offers any course on this subject matter. We also agreed to revisit this topic again. 7) Ken wanted to confirmation that we plan to offer binary distributions of OpenVDB in the future - especially for Window. Thanh M. confirmed that this is in fact the plan. 8) Ken raised the general question of copyright notices in our source code. DreamWorks will retain this notice in existing files, and newly added files will specify the relevant copyright (either of the developer or their organization as appropriate). Jeff L. agreed to write up a temple that we will add to the coding standards. 9) Dan B. is still waiting to hear back for the VFX Platform group about a December release of version 6. His ABI changes has been reviewed and approved by Nic A. and Jeff L. agreed to try and build them at his end. Once merge privileges has been granted to the TSC we can go ahead to merge and release v6.0 - assuming we get the OK for the VFX Platform. Else there seems little rush to make this release and we could potentially open up for more features to be included in v6.0. 10) Our next TSC meeting is scheduled for next Thursday Dec 6, 2018 2pm-3pm. John M. also offered to set up a mailing-list for the TSC.
2,667
Markdown
58.288888
99
0.790026
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-06-06.md
Minutes from 19th OpenVDB TSC meeting, June 6, 2019 Attendees: *Ken* M., *Nick* A., *Dan* B., *Peter* C., *Jeff* L. Additional Attendees: Bruce Chernia (Intel) Agenda: 1) Quorum Secretary 2) Secretary Selection 3) CI Update 4) Plans for 6.2 Non-Planned A) Rasterization Non-Planned B) Response to Autodesk 5) TestUtil::testPagedArray 6) OpHiding Next Steps Non-Planned C) Retrospective 7) Delayed Load Update 8) Version/Namespacing SOPs 9) Speed of hasActiveTiles/voxelizeActiveTiles 10) GCC and Dual ABI 11) Memory Allocators 12) Topology Replace 13) Schedule Next Meeting 1) A quorum was confirmed 2) Secretary - Dan Bailey 3) CI Update The ASWF is likely going to be selecting Azure Pipelines as their favored CI service so we are investigating moving to this. Azure Pipelines has been enabled as a GitHub Check and Dan has submitted a PR to switch on everything that currently builds in Circle. This works a little differently in Azure than Circle, because it uses Docker containers to store pre-installed dependencies to accelerate the startup time and avoid re-installing all our dependencies from scratch. The next step is to consolidate the Houdini installations into a single docker container and to make this docker repository private, as we cannot distribute releases of Houdini in a public repository, because it requires signing a EULA. DockerHub individuals have access to one private repository, whereas DockerHub organizations have none so without paying a regular monthly subscription, the Houdini repository will need to live under Dan's account for the time being. Longer term, the intention is to build using DockerHub images published through the ASWF, though that's not included as part of this initial migration. 4) Plans for 6.2 No precise date is present at the moment. Siggraph is still a tentative date. Integration with Houdini++ likely requires a release around that date. Peter reminds the committee that we shouldn't feel under pressure to do this release. We should hold off if the release isn't ready or there are not sufficient changes for it to be worthwhile. In turn each committee member listed what they would like to contribute to 6.2. Peter is looking to include a new sharpening tool and SOP. PR has been submitted, awaiting feedback from Ken. Jeff was asked about how to best display kernel coefficients in Houdini's UI and replied that SideFX hadn't found an ideal solution to this problem. Ken will release a fix to PagedArray later today. Planning to submit multi-threaded conversion to spheres and velocity extension among a few other tools. He would like to include read-only grid data structure developed at Weta, but thinks it's unlikely to be possible by 6.2. This may be of particular interest to the rendering community. Nick believes surfacing is not feasible in the likely timeframe and is primarily focused on AX. Initial aim is to include AX on a feature branch that is part of the main repository by 6.2. Nick to ask John Mertic about setting up a private repo for the committee to share features that cannot yet be made public. Dan would primarily like to include delayed-loading improvements and VDB SOP unification, both of are submitted as PRs and under active discussion. A secondary goal is to extend the move points API to add merge capability and introduce this functionality to VDB SOPs. Nick to share DNeg's production-proven implementation of merging VDB Points grids to help with this effort. Non-Planned A) Rasterization It was widely acknowledged that rasterization is a hard problem and has many differing requirements resulting in a highly divergent solution space. Jeff gave a brief overview of some of the key considerations and a little history about the various different rasterization tools that Houdini ships with. The TSC members are in agreement that we would like to better solve this problem and it was suggested that Siggraph might provide a good opportunity to discuss this area in more detail. Non-Planned B) Response to Autodesk Ken is working on a response to Autodesk and will share a Google Doc with the TSC members shortly. 5) TestUtil::testPagedArray Ken has completed a fix, planning to submit shortly. 6) OpHiding Next Steps An in-depth discussion about this feature was held. The TSC members are currently unable to find consensus on the right approach to take here. However, agreement was found in a number of areas. The TSC remains motivated to resolve the underlying problem where duplicate OpenVDB and VDB SOPs show up in the tab menus as this behavior has caused confusion to artists for many years. There was agreement on the need to provide a link between the open-source SOPs and the native SOPs shipped with Houdini. It was felt that this would be best done in C++ by extending the existing policies and the operator registration. There was no objection to including a startup script in some form. There was agreement that using HScript/OPcustomize was well-suited for simple startup scripts that hide nodes and Jeff confirmed that there was no plans to deprecate HScript. Where any conditional logic were to be included, it was felt that using a pythonrc script would be a better fit. There was a desire to try and provide just one solution to this problem. This was mainly motivated from not wanting to cause undue confusion to artists and developers. It was also agreed that the configurability of the new policy/flags mechanism implemented by Dan in C++ was overkill given that a startup script would be offered. Dan will remove this functionality from the PR. The TSC was largely in agreement that an environment variable was an acceptable mechanism to provide to artists to adjust the hiding policy. The main area of contention was whether to make a startup script the only way to adjust hiding of nodes or whether to also allow this to be configured from C++. Dan and Jeff felt that adding an OPcustomize/pythonrc file added an additional point of failure and that it required users to correctly install OpenVDB which shouldn't be taken for granted. Peter and Nick felt that using OPcustomize/pythonrc was the correct way to solve this problem and that VDB should instead follow precedence and advice offered by SideFX in using that mechanism. To make use of this feature, users would be expected to install this additional file. Jeff highlighted an additional problem in how the ASWF label suffix should be added if two different mechanisms for controlling visibility are in use. Peter suggested using an oprename in the start script to solve this there. The next steps are for Dan to re-visit the current implementation and attempt to find some common ground. Peter will look at implementing an alternative solution using pythonrc as a prototype for discussion in the meeting next week or the week after. Non-Planned C) Retrospective The TSC acknowledged that the process of resolving the OpHiding problem wasn't very effective and has resulted in some back-and-forth. Dan and Jeff collaborated on a proposal that didn't receive any feedback at the time. There was an assumption that lack of feedback meant no significant objection to the proposal. Dan put together an initial implementation which has been subsequently changed multiple times and Peter has spent quite a lot of time reviewing. One suggestion was to present this proposal to the TSC members in an earlier meeting, which may have helped elevate some of this discussion earlier. Peter cautioned that sometimes the discussion can only be had once there is an implementation to look at. Ken raised an additional concern - in the past features used to be vetted internally at Dreamworks, proven in production first and then released publicly. With a cross-company collaboration now in-place, much of that vetting is now happening before a feature has been production-proven. This may give the TSC less confidence in including it in OpenVDB. 8-12) Time 13) Schedule Next Meeting June 13th 2019. 3pm-4pm EDT (GMT-4).
8,028
Markdown
45.680232
80
0.808047
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-12-15.md
Minutes from 73rd OpenVDB TSC meeting, Dec 15th, 2020, (EDT) Attendees: *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Andre Pradhana (DW) Regrets: *Peter* C., *Nick* A. Agenda: 1) Confirm quorum 2) Secretary 3) TSC Membership 4) Latest Dependencies 5) NodeUnion issues 6) Templated volumeToMesh() 7) Obsolete parms 8) 8.0.0 Release 9) Next Meeting 1) Quorum was confirmed. 2) Secretary was Dan Bailey 3) TSC Membership Ken would like to invite Andre Pradhana to become an official TSC member. Simplest route to achieving this is for Peter Cucka to officially nominate Andre as his DWA replacement. Ken to reach out to Peter. 4) Latest Dependencies Need a CI solution for building the latest dependencies, particularly the GCC compiler and Boost. Current master breaks using GCC 10 due to removing headers related to size_t declaration. Dan has introduced an apt-get solution that builds against GCC 10 and resolved the size_t issue. Would be better to find a way to build against a bleeding edge set of all dependencies. 5) NodeUnion issues There was a breaking ABI change in the NodeUnion changes that Nick has reverted for 8.0 and re-applied with ABI guards. Nick has also looked at expanding the ABI checks to try and better catch this in future. Ken proposed checking data member offsets which would help catch these ABI errors but any modifications to the vtable would still go unnoticed. 6) Templated volumeToMesh() There has been a question about using a templated API to volumeToMesh similar to MeshDataAdaper. A good idea, but not a priority for the TSC and hard to make it work efficiently using the proposal put forward as it will involve frequent resizing. Jeff to reply. 7) Obsolete parms Jeff found an issue with an obsolete parameter in VDB to Spheres SOP that caused reverting of a parameter to the default value and has fixed. We should check if there are any other cases where this comes up. 8) 8.0.0 Release PRs to merge include PR898 (deprecating old code) once Dan has addressed feedback from Ken and PR839 (find active values improvements) once Ken has addressed feedback from Dan. 8.0.0 release on track to be released before Christmas. 9) Next Meeting Next meeting is January 5th, 2020. 1pm-2pm EST (GMT-5).
2,261
Markdown
30.416666
80
0.783282
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-07-28.md
Minutes from 58th OpenVDB TSC meeting, July 28st, 2020, (GMT) Attendees: *Nick* A., *Jeff* L., *Ken* M., *Dan* B. Additional Attendees: Johannes Meng (Intel), Bruce Chernia (Intel), JT Nelson (Blender), Peter Cheng (DW), Andre Pradhana (DW). Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Forum Issues 4) Fast Sweeping 5) GCC 9, PRs for 7.1 6) 7.1 Release, who does it? 7) VDB Point Move 8) Leaf manager/Node manager. 9) Next Meeting 1) Quorum was confirmed. 2) Secretary was Jeff Lait 3) Forum Issues None to discuss 4) Fast Sweeping Working on Dan's changes. While benchmarking, surprise arose as the performance improvement does not primarily come from the new sorting, but rather from faster subsequent access to the coordinates in the fast-sweeping kernel. Less memory traffic, and more coherent, makes the raw stencil 2x faster. Should be possible to extend any attribute if you support lerp. So we should support vectors in a single sweep. Andre found convergence better than expected. Extending the Flip! SDF from Houdini. L1 error 7x10-4 for single sweep vs 30 sweeps. A purely convex model could do a single pass which does all 8 sweeps in parallel rather than sequentially. Topologically disjoing domains could be processed entirely independently. Currently if you have a very small sphere with not many points in the cross planes more divisions would be beneficial. This might not work on the GPU where you need to be more aggressive in finding more threads. The extension attribute will be templated, allowing for support of velocity fields. Not looked at introducing a tolerance yet. How to expose a functor into Houdini? Suggestion is to provide a VDB for that purpose. So should be two optional inputs, for a mask and for the functor. Instead, perhaps main input can have both the surface an extrap source fields. Output would be the source field re-created in the space of the surface field. The mask will be used for the region to extrapolate. The SDF will be used only for computing isosurfaces within this mask. So mask should be second input. We aren't sure of the tolerance to use. If we have an explicit tolerance that is lower than what the algorithm can achieve, it will never converge. Tolerance support is not a blocker. 5) GCC 9, PRs for 7.1 Ken related his ABI=0 and ABI=1 build issues from CentOS 8. Nick has improved the error messages so it is easier to debug at least. We should make sure gcc 9 works before 7.1 release. We realized we weren't testing gcc in our VFX2021 test, but clang instead. So needed to move the CI to gcc. Some errors are simple, but at least one is tricky. Implicit conversion of bool to int. LOD fails, prolong operators use + rather than |, for example. Ken should try to build LOD tet. Unit tests don't build bool and mask grids. Nick has encountered LOD not working. Can't find the fix. He does not think it should be rushed for 7.1. We should remove the multires grid of boolean from LOD in 7.1. We can fix the tranform grid as it is simpler. But some are concerned this might cause problems. Maybe a way to disable the warning, but the warning doesn't exist in gcc 6. Should we get the Blosc change in? Agreement to send it in. Ken will commit it. 6) 7.1 Release, who does it? Ken will attempt the release following the instructions. 7) VDB Point Move On pause waiting for the VDB Merge. 8) Leaf manager/Node manager. Moved to a 7.2 change. Possibly pushed into a branch? Will be removing the ability to create a node manager from a leaf manager as that got complicated. 9) Next meeting August 4th, 2020. 1pm-2pm EDT (GMT-4).
3,671
Markdown
46.688311
415
0.765459
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2020-03-05.md
Minutes from 42nd OpenVDB TSC meeting, Match 5th, 2020, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M. Additional Attendees: Bruce Chernia (Intel), Johannes Meng (Intel), JT Nelson (Blender) Regrets: *Peter* C. Agenda: 1) Confirm quorum 2) Secretary 3) Release notes build section 4) Update on Windows changes 5) Houdini -> VDB voxel size conversion 6) CI and Github Actions 7) LevelSet Tracking 8) Active value deep copy 9) Volume Advection grain size and topology options 10) Next meeting 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) Release notes build section Proposal to separate out the build related release notes from the other release note sub-headings. It was noted that these notes have changed already in the past to adapt to new formatting being needed. Unanimous consensus, with the addition to additionally include any large changes in the highlights section. Nick to reformat the current release notes. 4) Update on Windows changes Nick has been looking through the Windows build and attempting to resolve various issues that have been raised. The following issues in particular are currently being addressed: #627, #624, #620, #611, #603 Main issues with the windows build continues to be the ability to support the both the Release and Debug builds in a single configuration as well as the both static and shared builds. vdb_view continues to be unsupported, but the work required to fix it is minimal. Ken to connect Nick to community who have reached out expressing desire to help improve the windows builds. 5) Houdini -> VDB voxel size conversion Nick has reported an issue to SideFX regarding conversion of Houdini DOP volumes to SOPs and subsequently to VDB volumes. The issue that manifests is that a volume which is intrinsically uniform in terms of its voxel size components can end up being represented as a non uniform scale map when converted to VDB. This can stop various optimizations from being performed further down the line and can additionally cause unnecessary re-sampling operations to occur. Jeff explained that Houdini volumes to not have an explicit representation of their voxel size. Instead, The voxel size of a Houdini volume is inferred from its transform projected onto a canonical unit cube. This can cause floating point differences to manifest when this value is calculated per component and presented to the user, as well during conversion to VDB. Nick mentioned that an ideal solution would be to be able to query some intrinsic details from the Houdini Volume's configuration and use this information to determine whether checks should be made on the final voxel size computations to ensure they are all equal. Jeff mentioned that Houdini Volumes currently contain no such information and instead proposed that a clamp can instead be used to ensure components match to a given tolerance. Note that whilst this problem originally focused on improving the conversion from Houdini->VDB, this issue could also be addressed by improving the comparisons between various VDB Map types. Nick to verify that the example provided to SideFX exhibits the above behaviour. 6) CI and GitHub Actions Dan has put a request in to the LF to disable appveyor and instead switch to github actions for our CI. Additional discussion around the new develop branch system. Jeff and Dan have both been caught out by having to switch the target branch to Master for TSC PRs. Nick suggested that we all should instead be merging into develop. Dan proposed an ad hoc target branch to instead be created for PRs which require additional changes from the TSC (coined "develop on demand"). Jeff, Ken and Dan voted in favour of this system vs maintaining a develop and master branch, with the condition a test is first demonstrated on an existing PR. 7) LevelSet Tracking 8) Active value deep copy 9) Volume Advection grain size and topology options Time 10) Additional Discussion Proposal to make IlmBase/OpenEXR optional currently in review but needs more changes. Questions were raised on why this would be useful, however various users of VDB seem to not require the writing and reading of half grids. Ultimately this further progresses the ability to build VDB with less enforced dependencies and makes it easier to build for users who do not need Half support. 11) Next meeting March 12th 2020. 2pm-3pm EDT (GMT-4).
4,396
Markdown
40.87619
87
0.80323
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-05-23.md
Minutes from 17th OpenVDB TSC meeting, May 23, 2019 Attendees: *Nick* A., *Dan* B., *Peter* C., *Jeff* L. Additional Attendees: Bruce Chernia (Intel), John Mertic (LF) Regrets: *Ken* M. Agenda: 1) Quorum Confirmation 2) Secretary Selection 3) CI Update (Dan) 4) CLA Update (John) 5) Mainline Remote Branches (Nick) 6) Switch to Relative Header Paths (Peter) 7) OpHiding Next Steps (Dan) 8) GitHub Issues Cleanup / Process for Submitting Bug Reports (Nick/Jeff) 9) VDB Delayed Loading (Dan) 10) Maya Plugin (Nick) 11) Level Target Pruning/Voxelization (Nick) 12) Speed of hasActiveTiles/voxelizeActiveTiles (Nick) 13) Schedule Next Meeting 1) A quorum was confirmed 2) Secretary - Nick Avramoussis 3) CI Update The ASWF is moving towards Azure Pipelines. We will again have to migrate over our CircleCI implementation. Tests using docker have already been put together and show promising results. We'll most likely support both Circle and Azure during Azure adoption, then deprecate and remove Circle. 4) CLA Update Up until now there has been no automatic process whilst we've been waiting for the LF automated CLA system. This new system can be triggered through first commits or pull requests to the repository. The DCO will still be required - there are a bunch of command line signoff tools to help with this, John has compiled a list here: https://github.com/jmertic/dco-org-check#useful-tools-to-make-doing-dco-signoffs-easier TSC members should look to sign the digital CLA as a CLA manager and get signoff from a CLA Signatory from within their organization. Consensus from the TSC was to wait for all 5 TSC members to have gone through the new digital CLA process before enabling it on the OpenVDB repository. 5) Mainline Remote Branches A brief comment about keeping the repository clear of remote branches. Nick to remove the last remote created by Ken and to close off that work. Informal agreement to keep the main repository clear of remote branches bar release branches. 6) Switch to Relative Header Paths OpenVDB currently uses absolutely header paths to pull in other OpenVDB headers. This may be an issue if you already have an installed version of OpenVDB on your system, those headers could be pulled in during compilation. This was true with the old Make system but may not be an issue anymore with how the CMake is set-up. There are three avenues to explore here - changing the Core library to use relative headers to itself, the plugins to use relative headers to themselves and finally the plugins to use relative header paths to the core library. The latter will impact Jeffs/SESIs Houdini integration. Peter to do the bare minimum update. Nick to investigate if this is still an issue with CMake. 7) OpHiding Next Steps A idea to expand the Houdini menu system via xml/python to provide options to disable/enable OpenVDB Nodes could potentially help users switch between SESI/ASWF nodes interactively, though is orthogonal to the issue of site set-up. The SESI nodes have different opnames to the ASWF ones which complicates generating an opcustomize file - though the current way around this is hard coding them in the AWSF SOPs so this could potentially be duplicated. The SOPs themselves could also be used to generate the opcustomize script. Dan to investigate command line tooling during or post compilation to achieve this. 8) GitHub Issues Cleanup / Process for Submitting Bug Reports Forums vs github Issues. At first glance the forum seems to be mainly for discussion where as github issues are being used far more for bug reporting. github issues can only be open or closed - there is no intermediate state such as 'Stalled' or 'Awaiting Response' which makes them harder to use for communicating with users and tracking status. We have set no expectations for users who are reporting bugs/issues/questions. It was generally agreed to continue monitoring the forums and github issues, creating tickets in Jira from verified posts and closing out github issues which have received no update in over a week. Nick to write up a proposal for this which, once agreed, can also be posted/pinned to the forum. 9) VDB Delayed Loading Currently uses OpenVDB's metadata system to track the enabling/disabling of the improved delay loading implementation. A more ideal solution would be to expose a hook in the file io code, but this will take time to filter down due to ABI/file format compatibility. We don't really want two APIs achieving the same result, it would be good if an attempt at implementing a hook in the file io could be proposed to achieve later forwards compatibility. It was decided that the metadata should at least be cleaned-up on read so it is in no way exposed to the user. We should also investigate warning/skipping on internally known types and ensure the metadata name is prefixed with "__". 10) Maya Plugin There are issues with the Maya plugin, specifically the OpenVDB Visualize node, in Maya's view-port 2. We should create tickets to get Maya into CI and at a minimum produce documentation explaining how to enable the OpenVDB Visualize node in Maya 2018/2019. 11) Level Target Pruning/Voxelization (Nick) Using VDB topology for load balancing operations which are sensitive to the node size used i.e. voxel, tile, internal node. Nick to send around a proposal for what this implementation could look like and the desired functionality. 12) Speed of hasActiveTiles/voxelizeActiveTiles (Nick) Time. 13) Schedule Next Meeting May 30th 2019. 3pm-4pm EDT (GMT-4).
5,550
Markdown
42.708661
87
0.797117
NVIDIA-Omniverse/ext-openvdb/tsc/meetings/2019-09-26.md
Minutes from 28th OpenVDB TSC meeting, September 26th, 2019, (GMT) Attendees: *Nick* A., *Dan* B., *Jeff* L., *Ken* M., *Peter* C. Agenda: 1) Confirm quorum 2) Select TSC member to take minutes 3) OpenVDB 6.2 postmortem 4) Proposal for new improved interpolates 5) (OVDB-117) MeshToVolume non-deterministic bug 6) Windows CI 7) Update on copyright notices 8) User feedback (usd/python bindings/Maya support)
 9) Plans for ABI=7 1) Quorum was confirmed. 2) Secretary was Nick Avramoussis 3) OpenVDB 6.2 postmortem An issue was reported with the OpenVDB 6.2.0 release which broke ABI compatibility with Houdini. This was reproducible with a vanilla install of the toolkit (on Linux) and manifested during serialization of OpenVDB Point Data Grids into Houdini file formats (.bgeo and subsequent derivations) as a segmentation fault. Generally, the definition of ABI compatibility that we apply to the software is isolated to the Grid and Transform objects. Ken mentioned that the major ABI definition should guarantee a reinterpret cast of the Grid. However the io::StreamMetadata and internally held io::StreamMetadata::Impl objects can also be passed between libraries using OpenVDB. These classes are used for file IO and hold information about the current state of the IO stream. In the above case, the StreamMetadata object was being constructed by Houdini and passed across the library boundary to the writeBuffers() methods. This function is unique for OpenVDB Point serialization in that it accesses auxiliary stream metadata. Changes to the memory layout of the io::StreamMetadata::Impl class made in PR #436 (OVDB-91) meant that the relative position of the auxiliary stream metadata was no longer correct when passed across the library boundary. Similar changes have been made to this class before, but have been fortunate enough to be 1) small enough in bytes that the padding and alignment resulted in the memory layout remaining the same, 2) members after the inserted members were not accessed in the serialization routine or 3) they were only appended to the end of the class. The proposed and agreed upon fix is to move the new members added in #436 to the end of the class and to consider the StreamMetadata object as part of the major ABI. Note that this is not an issue for deserialization. The problem arises when creating a VDB using a native SOP and serializating into a .vdb file or creating the VDB using an ASWF SOP and serializing into a .bgeo file. Despite being casted across the library boundary, the virtual functions still point at the library definition from which the grid was authored. There was discussion in regards to creating tests for problems specifically related to Houdini ABI. Dan suggested that whilst a fully Houdini integration test would be useful, it was probably not necessary for testing this particular bug - instead, a binary which links to both the Houdini deployed version of OpenVDB and the CI built OpenVDB could be used to test compatibility. 4) Proposal for new improved interpolates A proposal from Ken to introduce new interpolators. Currently the existing methods in Interpolation.h support zero, first and second order interpolation with collocated and staggered grids. They're implemented as static classes with no members which, whilst providing a very simple and easy to use interface, requires them to fetch all stencil points on every sample call. There are additional issues with some of the staggered implementations which perform unnecessary operations on unused extents. Ken proposed a re-work of the current methods to solve these issues and potentially introduce new third order interpolators. Discussion on how this would differ from the methods available in math/Stencils.h which currently provide various interpolation methods from cached buffers of grid points. It could be possible to use the stencil framework for efficient caching and fetching of values with a separate framework for the interpolation algorithms. Discussion on how introducing new non-static classes would impact the API of methods which are templated on the current static interpolators, as these methods would need to construct these objects. Suggestion would be to update all uses of the interpolation with any new ones and mark the old ones as deprecated. Note that these interpolators are currently not being unit tested. Any new interpolators should attempt to match the output of the existing methods exactly. Ken to create a jira ticket with a description of the proposal and notify the mailing list. 5) (OVDB-117) MeshToVolume non-deterministic bug Nick reported a non deterministic bug with the MeshToVolume algorithm which is producing different results with the same input. This originally came from a regression test testing a P2LS operation followed by a level set rebuild. OVDB-117 contains a main which has isolated the issue down to the tools::meshToVolume call and, more specifically, to the first threaded operation mesh_to_volume_internal::VoxelizePolygons. Attempts have been made to isolate this down even further, however the suspicion is that it is related to threading and the TLS primitive ID tree which is being used to track triangle visits to individual voxels. Has not been reproduced unthreaded and requires multiple instances of the binary to catch quickly. More testing to be performed. 6) Windows CI A number of github issues regarding the windows CMake build have been raised. Nick attempting to field these, however it has been challenging without a Windows CI matrix. Dan mentioned that github actions has recently been made available (open BETA). Dan to share an example implementation of github actions which can be run from a forked repository and potentially share a version for Windows which could be extended. 7) Update on copyright notices DreamWorks are currently in discussions with the Linux Foundation on the best way to proceed with any potential copyright notice changes. Changes to copyright notices stalled on this decision. 8) User feedback (usd/python bindings/Maya support)
 9) Plans for ABI=7 Time. 11) Next Meeting Next planned meeting is: October 10th 2019. 3pm-4pm EDT (GMT-4).
6,206
Markdown
50.29752
80
0.811473