file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Platform.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
// For Windows, we need these includes to ensure all OPENVDB_API
// functions/classes are compiled into the shared library.
#include "openvdb.h"
#include "Exceptions.h"
| 255 | C++ | 30.999996 | 64 | 0.768627 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/openvdb.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_OPENVDB_HAS_BEEN_INCLUDED
#define OPENVDB_OPENVDB_HAS_BEEN_INCLUDED
#include "Platform.h"
#include "Types.h"
#include "Metadata.h"
#include "math/Maps.h"
#include "math/Transform.h"
#include "Grid.h"
#include "tree/Tree.h"
#include "io/File.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// Common tree types
using BoolTree = tree::Tree4<bool, 5, 4, 3>::Type;
using DoubleTree = tree::Tree4<double, 5, 4, 3>::Type;
using FloatTree = tree::Tree4<float, 5, 4, 3>::Type;
using Int32Tree = tree::Tree4<int32_t, 5, 4, 3>::Type;
using Int64Tree = tree::Tree4<int64_t, 5, 4, 3>::Type;
using MaskTree = tree::Tree4<ValueMask, 5, 4, 3>::Type;
using StringTree = tree::Tree4<std::string, 5, 4, 3>::Type;
using UInt32Tree = tree::Tree4<uint32_t, 5, 4, 3>::Type;
using Vec2DTree = tree::Tree4<Vec2d, 5, 4, 3>::Type;
using Vec2ITree = tree::Tree4<Vec2i, 5, 4, 3>::Type;
using Vec2STree = tree::Tree4<Vec2s, 5, 4, 3>::Type;
using Vec3DTree = tree::Tree4<Vec3d, 5, 4, 3>::Type;
using Vec3ITree = tree::Tree4<Vec3i, 5, 4, 3>::Type;
using Vec3STree = tree::Tree4<Vec3f, 5, 4, 3>::Type;
using ScalarTree = FloatTree;
using TopologyTree = MaskTree;
using Vec3dTree = Vec3DTree;
using Vec3fTree = Vec3STree;
using VectorTree = Vec3fTree;
/// Common grid types
using BoolGrid = Grid<BoolTree>;
using DoubleGrid = Grid<DoubleTree>;
using FloatGrid = Grid<FloatTree>;
using Int32Grid = Grid<Int32Tree>;
using Int64Grid = Grid<Int64Tree>;
using MaskGrid = Grid<MaskTree>;
using StringGrid = Grid<StringTree>;
using Vec3DGrid = Grid<Vec3DTree>;
using Vec3IGrid = Grid<Vec3ITree>;
using Vec3SGrid = Grid<Vec3STree>;
using ScalarGrid = FloatGrid;
using TopologyGrid = MaskGrid;
using Vec3dGrid = Vec3DGrid;
using Vec3fGrid = Vec3SGrid;
using VectorGrid = Vec3fGrid;
/// Global registration of basic types
OPENVDB_API void initialize();
/// Global deregistration of basic types
OPENVDB_API void uninitialize();
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_OPENVDB_HAS_BEEN_INCLUDED
| 2,295 | C | 32.275362 | 61 | 0.679303 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Grid.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Grid.h"
#include <openvdb/Metadata.h>
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <tbb/mutex.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// @note For Houdini compatibility, boolean-valued metadata names
/// should begin with "is_".
const char
* const GridBase::META_GRID_CLASS = "class",
* const GridBase::META_GRID_CREATOR = "creator",
* const GridBase::META_GRID_NAME = "name",
* const GridBase::META_SAVE_HALF_FLOAT = "is_saved_as_half_float",
* const GridBase::META_IS_LOCAL_SPACE = "is_local_space",
* const GridBase::META_VECTOR_TYPE = "vector_type",
* const GridBase::META_FILE_BBOX_MIN = "file_bbox_min",
* const GridBase::META_FILE_BBOX_MAX = "file_bbox_max",
* const GridBase::META_FILE_COMPRESSION = "file_compression",
* const GridBase::META_FILE_MEM_BYTES = "file_mem_bytes",
* const GridBase::META_FILE_VOXEL_COUNT = "file_voxel_count",
* const GridBase::META_FILE_DELAYED_LOAD = "file_delayed_load";
////////////////////////////////////////
namespace {
using GridFactoryMap = std::map<Name, GridBase::GridFactory>;
using GridFactoryMapCIter = GridFactoryMap::const_iterator;
using Mutex = tbb::mutex;
using Lock = Mutex::scoped_lock;
struct LockedGridRegistry {
LockedGridRegistry() {}
Mutex mMutex;
GridFactoryMap mMap;
};
// Global function for accessing the registry
LockedGridRegistry*
getGridRegistry()
{
static LockedGridRegistry registry;
return ®istry;
}
} // unnamed namespace
bool
GridBase::isRegistered(const Name& name)
{
LockedGridRegistry* registry = getGridRegistry();
Lock lock(registry->mMutex);
return (registry->mMap.find(name) != registry->mMap.end());
}
void
GridBase::registerGrid(const Name& name, GridFactory factory)
{
LockedGridRegistry* registry = getGridRegistry();
Lock lock(registry->mMutex);
if (registry->mMap.find(name) != registry->mMap.end()) {
OPENVDB_THROW(KeyError, "Grid type " << name << " is already registered");
}
registry->mMap[name] = factory;
}
void
GridBase::unregisterGrid(const Name& name)
{
LockedGridRegistry* registry = getGridRegistry();
Lock lock(registry->mMutex);
registry->mMap.erase(name);
}
GridBase::Ptr
GridBase::createGrid(const Name& name)
{
LockedGridRegistry* registry = getGridRegistry();
Lock lock(registry->mMutex);
GridFactoryMapCIter iter = registry->mMap.find(name);
if (iter == registry->mMap.end()) {
OPENVDB_THROW(LookupError, "Cannot create grid of unregistered type " << name);
}
return (iter->second)();
}
void
GridBase::clearRegistry()
{
LockedGridRegistry* registry = getGridRegistry();
Lock lock(registry->mMutex);
registry->mMap.clear();
}
////////////////////////////////////////
GridClass
GridBase::stringToGridClass(const std::string& s)
{
GridClass ret = GRID_UNKNOWN;
std::string str = s;
boost::trim(str);
boost::to_lower(str);
if (str == gridClassToString(GRID_LEVEL_SET)) {
ret = GRID_LEVEL_SET;
} else if (str == gridClassToString(GRID_FOG_VOLUME)) {
ret = GRID_FOG_VOLUME;
} else if (str == gridClassToString(GRID_STAGGERED)) {
ret = GRID_STAGGERED;
}
return ret;
}
std::string
GridBase::gridClassToString(GridClass cls)
{
std::string ret;
switch (cls) {
case GRID_UNKNOWN: ret = "unknown"; break;
case GRID_LEVEL_SET: ret = "level set"; break;
case GRID_FOG_VOLUME: ret = "fog volume"; break;
case GRID_STAGGERED: ret = "staggered"; break;
}
return ret;
}
std::string
GridBase::gridClassToMenuName(GridClass cls)
{
std::string ret;
switch (cls) {
case GRID_UNKNOWN: ret = "Other"; break;
case GRID_LEVEL_SET: ret = "Level Set"; break;
case GRID_FOG_VOLUME: ret = "Fog Volume"; break;
case GRID_STAGGERED: ret = "Staggered Vector Field"; break;
}
return ret;
}
GridClass
GridBase::getGridClass() const
{
GridClass cls = GRID_UNKNOWN;
if (StringMetadata::ConstPtr s = this->getMetadata<StringMetadata>(META_GRID_CLASS)) {
cls = stringToGridClass(s->value());
}
return cls;
}
void
GridBase::setGridClass(GridClass cls)
{
this->insertMeta(META_GRID_CLASS, StringMetadata(gridClassToString(cls)));
}
void
GridBase::clearGridClass()
{
this->removeMeta(META_GRID_CLASS);
}
////////////////////////////////////////
VecType
GridBase::stringToVecType(const std::string& s)
{
VecType ret = VEC_INVARIANT;
std::string str = s;
boost::trim(str);
boost::to_lower(str);
if (str == vecTypeToString(VEC_COVARIANT)) {
ret = VEC_COVARIANT;
} else if (str == vecTypeToString(VEC_COVARIANT_NORMALIZE)) {
ret = VEC_COVARIANT_NORMALIZE;
} else if (str == vecTypeToString(VEC_CONTRAVARIANT_RELATIVE)) {
ret = VEC_CONTRAVARIANT_RELATIVE;
} else if (str == vecTypeToString(VEC_CONTRAVARIANT_ABSOLUTE)) {
ret = VEC_CONTRAVARIANT_ABSOLUTE;
}
return ret;
}
std::string
GridBase::vecTypeToString(VecType typ)
{
std::string ret;
switch (typ) {
case VEC_INVARIANT: ret = "invariant"; break;
case VEC_COVARIANT: ret = "covariant"; break;
case VEC_COVARIANT_NORMALIZE: ret = "covariant normalize"; break;
case VEC_CONTRAVARIANT_RELATIVE: ret = "contravariant relative"; break;
case VEC_CONTRAVARIANT_ABSOLUTE: ret = "contravariant absolute"; break;
}
return ret;
}
std::string
GridBase::vecTypeExamples(VecType typ)
{
std::string ret;
switch (typ) {
case VEC_INVARIANT: ret = "Tuple/Color/UVW"; break;
case VEC_COVARIANT: ret = "Gradient/Normal"; break;
case VEC_COVARIANT_NORMALIZE: ret = "Unit Normal"; break;
case VEC_CONTRAVARIANT_RELATIVE: ret = "Displacement/Velocity/Acceleration"; break;
case VEC_CONTRAVARIANT_ABSOLUTE: ret = "Position"; break;
}
return ret;
}
std::string
GridBase::vecTypeDescription(VecType typ)
{
std::string ret;
switch (typ) {
case VEC_INVARIANT:
ret = "Does not transform";
break;
case VEC_COVARIANT:
ret = "Apply the inverse-transpose transform matrix but ignore translation";
break;
case VEC_COVARIANT_NORMALIZE:
ret = "Apply the inverse-transpose transform matrix but ignore translation"
" and renormalize vectors";
break;
case VEC_CONTRAVARIANT_RELATIVE:
ret = "Apply the forward transform matrix but ignore translation";
break;
case VEC_CONTRAVARIANT_ABSOLUTE:
ret = "Apply the forward transform matrix, including translation";
break;
}
return ret;
}
VecType
GridBase::getVectorType() const
{
VecType typ = VEC_INVARIANT;
if (StringMetadata::ConstPtr s = this->getMetadata<StringMetadata>(META_VECTOR_TYPE)) {
typ = stringToVecType(s->value());
}
return typ;
}
void
GridBase::setVectorType(VecType typ)
{
this->insertMeta(META_VECTOR_TYPE, StringMetadata(vecTypeToString(typ)));
}
void
GridBase::clearVectorType()
{
this->removeMeta(META_VECTOR_TYPE);
}
////////////////////////////////////////
std::string
GridBase::getName() const
{
if (Metadata::ConstPtr meta = (*this)[META_GRID_NAME]) return meta->str();
return "";
}
void
GridBase::setName(const std::string& name)
{
this->removeMeta(META_GRID_NAME);
this->insertMeta(META_GRID_NAME, StringMetadata(name));
}
////////////////////////////////////////
std::string
GridBase::getCreator() const
{
if (Metadata::ConstPtr meta = (*this)[META_GRID_CREATOR]) return meta->str();
return "";
}
void
GridBase::setCreator(const std::string& creator)
{
this->removeMeta(META_GRID_CREATOR);
this->insertMeta(META_GRID_CREATOR, StringMetadata(creator));
}
////////////////////////////////////////
bool
GridBase::saveFloatAsHalf() const
{
if (Metadata::ConstPtr meta = (*this)[META_SAVE_HALF_FLOAT]) {
return meta->asBool();
}
return false;
}
void
GridBase::setSaveFloatAsHalf(bool saveAsHalf)
{
this->removeMeta(META_SAVE_HALF_FLOAT);
this->insertMeta(META_SAVE_HALF_FLOAT, BoolMetadata(saveAsHalf));
}
////////////////////////////////////////
bool
GridBase::isInWorldSpace() const
{
bool local = false;
if (Metadata::ConstPtr meta = (*this)[META_IS_LOCAL_SPACE]) {
local = meta->asBool();
}
return !local;
}
void
GridBase::setIsInWorldSpace(bool world)
{
this->removeMeta(META_IS_LOCAL_SPACE);
this->insertMeta(META_IS_LOCAL_SPACE, BoolMetadata(!world));
}
////////////////////////////////////////
void
GridBase::addStatsMetadata()
{
const CoordBBox bbox = this->evalActiveVoxelBoundingBox();
this->removeMeta(META_FILE_BBOX_MIN);
this->removeMeta(META_FILE_BBOX_MAX);
this->removeMeta(META_FILE_MEM_BYTES);
this->removeMeta(META_FILE_VOXEL_COUNT);
this->insertMeta(META_FILE_BBOX_MIN, Vec3IMetadata(bbox.min().asVec3i()));
this->insertMeta(META_FILE_BBOX_MAX, Vec3IMetadata(bbox.max().asVec3i()));
this->insertMeta(META_FILE_MEM_BYTES, Int64Metadata(this->memUsage()));
this->insertMeta(META_FILE_VOXEL_COUNT, Int64Metadata(this->activeVoxelCount()));
}
MetaMap::Ptr
GridBase::getStatsMetadata() const
{
const char* const fields[] = {
META_FILE_BBOX_MIN,
META_FILE_BBOX_MAX,
META_FILE_MEM_BYTES,
META_FILE_VOXEL_COUNT,
nullptr
};
/// @todo Check that the fields are of the correct type?
MetaMap::Ptr ret(new MetaMap);
for (int i = 0; fields[i] != nullptr; ++i) {
if (Metadata::ConstPtr m = (*this)[fields[i]]) {
ret->insertMeta(fields[i], *m);
}
}
return ret;
}
////////////////////////////////////////
void
GridBase::clipGrid(const BBoxd& worldBBox)
{
const CoordBBox indexBBox =
this->constTransform().worldToIndexNodeCentered(worldBBox);
this->clip(indexBBox);
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 10,282 | C++ | 22.693548 | 91 | 0.636452 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/MetaMap.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_METADATA_METAMAP_HAS_BEEN_INCLUDED
#define OPENVDB_METADATA_METAMAP_HAS_BEEN_INCLUDED
#include "Metadata.h"
#include "Types.h"
#include "Exceptions.h"
#include <iosfwd>
#include <map>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// Container that maps names (strings) to values of arbitrary types
class OPENVDB_API MetaMap
{
public:
using Ptr = SharedPtr<MetaMap>;
using ConstPtr = SharedPtr<const MetaMap>;
using MetadataMap = std::map<Name, Metadata::Ptr>;
using MetaIterator = MetadataMap::iterator;
using ConstMetaIterator = MetadataMap::const_iterator;
///< @todo this should really iterate over a map of Metadata::ConstPtrs
MetaMap() {}
MetaMap(const MetaMap& other);
virtual ~MetaMap() {}
/// Return a copy of this map whose fields are shared with this map.
MetaMap::Ptr copyMeta() const;
/// Return a deep copy of this map that shares no data with this map.
MetaMap::Ptr deepCopyMeta() const;
/// Assign a deep copy of another map to this map.
MetaMap& operator=(const MetaMap&);
/// Unserialize metadata from the given stream.
void readMeta(std::istream&);
/// Serialize metadata to the given stream.
void writeMeta(std::ostream&) const;
/// @brief Insert a new metadata field or overwrite the value of an existing field.
/// @details If a field with the given name doesn't already exist, add a new field.
/// Otherwise, if the new value's type is the same as the existing field's value type,
/// overwrite the existing value with new value.
/// @throw TypeError if a field with the given name already exists, but its value type
/// is not the same as the new value's
/// @throw ValueError if the given field name is empty.
void insertMeta(const Name&, const Metadata& value);
/// @brief Deep copy all of the metadata fields from the given map into this map.
/// @throw TypeError if any field in the given map has the same name as
/// but a different value type than one of this map's fields.
void insertMeta(const MetaMap&);
/// Remove the given metadata field if it exists.
void removeMeta(const Name&);
//@{
/// @brief Return a pointer to the metadata with the given name.
/// If no such field exists, return a null pointer.
Metadata::Ptr operator[](const Name&);
Metadata::ConstPtr operator[](const Name&) const;
//@}
//@{
/// @brief Return a pointer to a TypedMetadata object of type @c T and with the given name.
/// If no such field exists or if there is a type mismatch, return a null pointer.
template<typename T> typename T::Ptr getMetadata(const Name&);
template<typename T> typename T::ConstPtr getMetadata(const Name&) const;
//@}
/// @brief Return a reference to the value of type @c T stored in the given metadata field.
/// @throw LookupError if no field with the given name exists.
/// @throw TypeError if the given field is not of type @c T.
template<typename T> T& metaValue(const Name&);
template<typename T> const T& metaValue(const Name&) const;
// Functions for iterating over the metadata
MetaIterator beginMeta() { return mMeta.begin(); }
MetaIterator endMeta() { return mMeta.end(); }
ConstMetaIterator beginMeta() const { return mMeta.begin(); }
ConstMetaIterator endMeta() const { return mMeta.end(); }
void clearMetadata() { mMeta.clear(); }
size_t metaCount() const { return mMeta.size(); }
/// Return a string describing this metadata map. Prefix each line with @a indent.
std::string str(const std::string& indent = "") const;
/// Return @c true if the given map is equivalent to this map.
bool operator==(const MetaMap& other) const;
/// Return @c true if the given map is different from this map.
bool operator!=(const MetaMap& other) const { return !(*this == other); }
private:
/// @brief Return a pointer to TypedMetadata with the given template parameter.
/// @throw LookupError if no field with the given name is found.
/// @throw TypeError if the given field is not of type T.
template<typename T>
typename TypedMetadata<T>::Ptr getValidTypedMetadata(const Name&) const;
MetadataMap mMeta;
};
/// Write a MetaMap to an output stream
std::ostream& operator<<(std::ostream&, const MetaMap&);
////////////////////////////////////////
inline Metadata::Ptr
MetaMap::operator[](const Name& name)
{
MetaIterator iter = mMeta.find(name);
return (iter == mMeta.end() ? Metadata::Ptr() : iter->second);
}
inline Metadata::ConstPtr
MetaMap::operator[](const Name &name) const
{
ConstMetaIterator iter = mMeta.find(name);
return (iter == mMeta.end() ? Metadata::Ptr() : iter->second);
}
////////////////////////////////////////
template<typename T>
inline typename T::Ptr
MetaMap::getMetadata(const Name &name)
{
ConstMetaIterator iter = mMeta.find(name);
if (iter == mMeta.end()) return typename T::Ptr{};
// To ensure that we get valid conversion if the metadata pointers cross dso
// boundaries, we have to check the qualified typename and then do a static
// cast. This is slower than doing a dynamic_pointer_cast, but is safer when
// pointers cross dso boundaries.
if (iter->second->typeName() == T::staticTypeName()) {
return StaticPtrCast<T, Metadata>(iter->second);
} // else
return typename T::Ptr{};
}
template<typename T>
inline typename T::ConstPtr
MetaMap::getMetadata(const Name &name) const
{
ConstMetaIterator iter = mMeta.find(name);
if (iter == mMeta.end()) return typename T::ConstPtr{};
// To ensure that we get valid conversion if the metadata pointers cross dso
// boundaries, we have to check the qualified typename and then do a static
// cast. This is slower than doing a dynamic_pointer_cast, but is safer when
// pointers cross dso boundaries.
if (iter->second->typeName() == T::staticTypeName()) {
return StaticPtrCast<const T, const Metadata>(iter->second);
} // else
return typename T::ConstPtr{};
}
////////////////////////////////////////
template<typename T>
inline typename TypedMetadata<T>::Ptr
MetaMap::getValidTypedMetadata(const Name &name) const
{
ConstMetaIterator iter = mMeta.find(name);
if (iter == mMeta.end()) OPENVDB_THROW(LookupError, "Cannot find metadata " << name);
// To ensure that we get valid conversion if the metadata pointers cross dso
// boundaries, we have to check the qualified typename and then do a static
// cast. This is slower than doing a dynamic_pointer_cast, but is safer when
// pointers cross dso boundaries.
typename TypedMetadata<T>::Ptr m;
if (iter->second->typeName() == TypedMetadata<T>::staticTypeName()) {
m = StaticPtrCast<TypedMetadata<T>, Metadata>(iter->second);
}
if (!m) OPENVDB_THROW(TypeError, "Invalid type for metadata " << name);
return m;
}
////////////////////////////////////////
template<typename T>
inline T&
MetaMap::metaValue(const Name &name)
{
typename TypedMetadata<T>::Ptr m = getValidTypedMetadata<T>(name);
return m->value();
}
template<typename T>
inline const T&
MetaMap::metaValue(const Name &name) const
{
typename TypedMetadata<T>::Ptr m = getValidTypedMetadata<T>(name);
return m->value();
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_METADATA_METAMAP_HAS_BEEN_INCLUDED
| 7,591 | C | 33.825688 | 95 | 0.676722 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/PlatformConfig.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
///
/// @file PlatformConfig.h
#ifndef OPENVDB_PLATFORMCONFIG_HAS_BEEN_INCLUDED
#define OPENVDB_PLATFORMCONFIG_HAS_BEEN_INCLUDED
// Windows specific configuration
#ifdef _WIN32
// By default, assume we're building OpenVDB as a DLL if we're dynamically
// linking in the CRT, unless OPENVDB_STATICLIB is defined.
#if defined(_DLL) && !defined(OPENVDB_STATICLIB) && !defined(OPENVDB_DLL)
#define OPENVDB_DLL
#endif
// By default, assume that we're dynamically linking OpenEXR, unless
// OPENVDB_OPENEXR_STATICLIB is defined.
#if !defined(OPENVDB_OPENEXR_STATICLIB) && !defined(OPENEXR_DLL)
#define OPENEXR_DLL
#endif
#endif // _WIN32
#endif // OPENVDB_PLATFORMCONFIG_HAS_BEEN_INCLUDED
| 822 | C | 29.48148 | 78 | 0.717762 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Exceptions.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_EXCEPTIONS_HAS_BEEN_INCLUDED
#define OPENVDB_EXCEPTIONS_HAS_BEEN_INCLUDED
#include <openvdb/version.h>
#include <exception>
#include <sstream>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
class OPENVDB_API Exception: public std::exception
{
public:
Exception(const Exception&) = default;
Exception(Exception&&) = default;
Exception& operator=(const Exception&) = default;
Exception& operator=(Exception&&) = default;
~Exception() override = default;
const char* what() const noexcept override
{
try { return mMessage.c_str(); } catch (...) {}
return nullptr;
}
protected:
Exception() noexcept {}
explicit Exception(const char* eType, const std::string* const msg = nullptr) noexcept
{
try {
if (eType) mMessage = eType;
if (msg) mMessage += ": " + (*msg);
} catch (...) {}
}
private:
std::string mMessage;
};
#define OPENVDB_EXCEPTION(_classname) \
class OPENVDB_API _classname: public Exception \
{ \
public: \
_classname() noexcept: Exception( #_classname ) {} \
explicit _classname(const std::string& msg) noexcept: Exception( #_classname , &msg) {} \
}
OPENVDB_EXCEPTION(ArithmeticError);
OPENVDB_EXCEPTION(IndexError);
OPENVDB_EXCEPTION(IoError);
OPENVDB_EXCEPTION(KeyError);
OPENVDB_EXCEPTION(LookupError);
OPENVDB_EXCEPTION(NotImplementedError);
OPENVDB_EXCEPTION(ReferenceError);
OPENVDB_EXCEPTION(RuntimeError);
OPENVDB_EXCEPTION(TypeError);
OPENVDB_EXCEPTION(ValueError);
#undef OPENVDB_EXCEPTION
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#define OPENVDB_THROW(exception, message) \
{ \
std::string _openvdb_throw_msg; \
try { \
std::ostringstream _openvdb_throw_os; \
_openvdb_throw_os << message; \
_openvdb_throw_msg = _openvdb_throw_os.str(); \
} catch (...) {} \
throw exception(_openvdb_throw_msg); \
} // OPENVDB_THROW
#endif // OPENVDB_EXCEPTIONS_HAS_BEEN_INCLUDED
| 2,131 | C | 23.790697 | 93 | 0.679962 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Metadata.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_METADATA_HAS_BEEN_INCLUDED
#define OPENVDB_METADATA_HAS_BEEN_INCLUDED
#include "version.h"
#include "Exceptions.h"
#include "Types.h"
#include "math/Math.h" // for math::isZero()
#include "util/Name.h"
#include <cstdint>
#include <iostream>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// @brief Base class for storing metadata information in a grid.
class OPENVDB_API Metadata
{
public:
using Ptr = SharedPtr<Metadata>;
using ConstPtr = SharedPtr<const Metadata>;
Metadata() {}
virtual ~Metadata() {}
// Disallow copying of instances of this class.
Metadata(const Metadata&) = delete;
Metadata& operator=(const Metadata&) = delete;
/// Return the type name of the metadata.
virtual Name typeName() const = 0;
/// Return a copy of the metadata.
virtual Metadata::Ptr copy() const = 0;
/// Copy the given metadata into this metadata.
virtual void copy(const Metadata& other) = 0;
/// Return a textual representation of this metadata.
virtual std::string str() const = 0;
/// Return the boolean representation of this metadata (empty strings
/// and zeroVals evaluate to false; most other values evaluate to true).
virtual bool asBool() const = 0;
/// Return @c true if the given metadata is equivalent to this metadata.
bool operator==(const Metadata& other) const;
/// Return @c true if the given metadata is different from this metadata.
bool operator!=(const Metadata& other) const { return !(*this == other); }
/// Return the size of this metadata in bytes.
virtual Index32 size() const = 0;
/// Unserialize this metadata from a stream.
void read(std::istream&);
/// Serialize this metadata to a stream.
void write(std::ostream&) const;
/// Create new metadata of the given type.
static Metadata::Ptr createMetadata(const Name& typeName);
/// Return @c true if the given type is known by the metadata type registry.
static bool isRegisteredType(const Name& typeName);
/// Clear out the metadata registry.
static void clearRegistry();
/// Register the given metadata type along with a factory function.
static void registerType(const Name& typeName, Metadata::Ptr (*createMetadata)());
static void unregisterType(const Name& typeName);
protected:
/// Read the size of the metadata from a stream.
static Index32 readSize(std::istream&);
/// Write the size of the metadata to a stream.
void writeSize(std::ostream&) const;
/// Read the metadata from a stream.
virtual void readValue(std::istream&, Index32 numBytes) = 0;
/// Write the metadata to a stream.
virtual void writeValue(std::ostream&) const = 0;
};
/// @brief Subclass to hold raw data of an unregistered type
class OPENVDB_API UnknownMetadata: public Metadata
{
public:
using ByteVec = std::vector<uint8_t>;
explicit UnknownMetadata(const Name& typ = "<unknown>"): mTypeName(typ) {}
Name typeName() const override { return mTypeName; }
Metadata::Ptr copy() const override;
void copy(const Metadata&) override;
std::string str() const override { return (mBytes.empty() ? "" : "<binary data>"); }
bool asBool() const override { return !mBytes.empty(); }
Index32 size() const override { return static_cast<Index32>(mBytes.size()); }
void setValue(const ByteVec& bytes) { mBytes = bytes; }
const ByteVec& value() const { return mBytes; }
protected:
void readValue(std::istream&, Index32 numBytes) override;
void writeValue(std::ostream&) const override;
private:
Name mTypeName;
ByteVec mBytes;
};
/// @brief Templated metadata class to hold specific types.
template<typename T>
class TypedMetadata: public Metadata
{
public:
using Ptr = SharedPtr<TypedMetadata<T>>;
using ConstPtr = SharedPtr<const TypedMetadata<T>>;
TypedMetadata();
TypedMetadata(const T& value);
TypedMetadata(const TypedMetadata<T>& other);
~TypedMetadata() override;
Name typeName() const override;
Metadata::Ptr copy() const override;
void copy(const Metadata& other) override;
std::string str() const override;
bool asBool() const override;
Index32 size() const override { return static_cast<Index32>(sizeof(T)); }
/// Set this metadata's value.
void setValue(const T&);
/// Return this metadata's value.
T& value();
const T& value() const;
// Static specialized function for the type name. This function must be
// template specialized for each type T.
static Name staticTypeName() { return typeNameAsString<T>(); }
/// Create new metadata of this type.
static Metadata::Ptr createMetadata();
static void registerType();
static void unregisterType();
static bool isRegisteredType();
protected:
void readValue(std::istream&, Index32 numBytes) override;
void writeValue(std::ostream&) const override;
private:
T mValue;
};
/// Write a Metadata to an output stream
std::ostream& operator<<(std::ostream& ostr, const Metadata& metadata);
////////////////////////////////////////
inline void
Metadata::writeSize(std::ostream& os) const
{
const Index32 n = this->size();
os.write(reinterpret_cast<const char*>(&n), sizeof(Index32));
}
inline Index32
Metadata::readSize(std::istream& is)
{
Index32 n = 0;
is.read(reinterpret_cast<char*>(&n), sizeof(Index32));
return n;
}
inline void
Metadata::read(std::istream& is)
{
const Index32 numBytes = this->readSize(is);
this->readValue(is, numBytes);
}
inline void
Metadata::write(std::ostream& os) const
{
this->writeSize(os);
this->writeValue(os);
}
////////////////////////////////////////
template <typename T>
inline
TypedMetadata<T>::TypedMetadata() : mValue(T())
{
}
template <typename T>
inline
TypedMetadata<T>::TypedMetadata(const T &value) : mValue(value)
{
}
template <typename T>
inline
TypedMetadata<T>::TypedMetadata(const TypedMetadata<T> &other) :
Metadata(),
mValue(other.mValue)
{
}
template <typename T>
inline
TypedMetadata<T>::~TypedMetadata()
{
}
template <typename T>
inline Name
TypedMetadata<T>::typeName() const
{
return TypedMetadata<T>::staticTypeName();
}
template <typename T>
inline void
TypedMetadata<T>::setValue(const T& val)
{
mValue = val;
}
template <typename T>
inline T&
TypedMetadata<T>::value()
{
return mValue;
}
template <typename T>
inline const T&
TypedMetadata<T>::value() const
{
return mValue;
}
template <typename T>
inline Metadata::Ptr
TypedMetadata<T>::copy() const
{
Metadata::Ptr metadata(new TypedMetadata<T>());
metadata->copy(*this);
return metadata;
}
template <typename T>
inline void
TypedMetadata<T>::copy(const Metadata &other)
{
const TypedMetadata<T>* t = dynamic_cast<const TypedMetadata<T>*>(&other);
if (t == nullptr) OPENVDB_THROW(TypeError, "Incompatible type during copy");
mValue = t->mValue;
}
template<typename T>
inline void
TypedMetadata<T>::readValue(std::istream& is, Index32 /*numBytes*/)
{
//assert(this->size() == numBytes);
is.read(reinterpret_cast<char*>(&mValue), this->size());
}
template<typename T>
inline void
TypedMetadata<T>::writeValue(std::ostream& os) const
{
os.write(reinterpret_cast<const char*>(&mValue), this->size());
}
template <typename T>
inline std::string
TypedMetadata<T>::str() const
{
std::ostringstream ostr;
ostr << mValue;
return ostr.str();
}
template<typename T>
inline bool
TypedMetadata<T>::asBool() const
{
return !math::isZero(mValue);
}
template <typename T>
inline Metadata::Ptr
TypedMetadata<T>::createMetadata()
{
Metadata::Ptr ret(new TypedMetadata<T>());
return ret;
}
template <typename T>
inline void
TypedMetadata<T>::registerType()
{
Metadata::registerType(TypedMetadata<T>::staticTypeName(),
TypedMetadata<T>::createMetadata);
}
template <typename T>
inline void
TypedMetadata<T>::unregisterType()
{
Metadata::unregisterType(TypedMetadata<T>::staticTypeName());
}
template <typename T>
inline bool
TypedMetadata<T>::isRegisteredType()
{
return Metadata::isRegisteredType(TypedMetadata<T>::staticTypeName());
}
template<>
inline std::string
TypedMetadata<bool>::str() const
{
return (mValue ? "true" : "false");
}
inline std::ostream&
operator<<(std::ostream& ostr, const Metadata& metadata)
{
ostr << metadata.str();
return ostr;
}
using BoolMetadata = TypedMetadata<bool>;
using DoubleMetadata = TypedMetadata<double>;
using FloatMetadata = TypedMetadata<float>;
using Int32Metadata = TypedMetadata<int32_t>;
using Int64Metadata = TypedMetadata<int64_t>;
using StringMetadata = TypedMetadata<std::string>;
using Vec2DMetadata = TypedMetadata<Vec2d>;
using Vec2IMetadata = TypedMetadata<Vec2i>;
using Vec2SMetadata = TypedMetadata<Vec2s>;
using Vec3DMetadata = TypedMetadata<Vec3d>;
using Vec3IMetadata = TypedMetadata<Vec3i>;
using Vec3SMetadata = TypedMetadata<Vec3s>;
using Vec4DMetadata = TypedMetadata<Vec4d>;
using Vec4IMetadata = TypedMetadata<Vec4i>;
using Vec4SMetadata = TypedMetadata<Vec4s>;
using Mat4SMetadata = TypedMetadata<Mat4s>;
using Mat4DMetadata = TypedMetadata<Mat4d>;
////////////////////////////////////////
template<>
inline Index32
StringMetadata::size() const
{
return static_cast<Index32>(mValue.size());
}
template<>
inline std::string
StringMetadata::str() const
{
return mValue;
}
template<>
inline void
StringMetadata::readValue(std::istream& is, Index32 size)
{
mValue.resize(size, '\0');
is.read(&mValue[0], size);
}
template<>
inline void
StringMetadata::writeValue(std::ostream& os) const
{
os.write(reinterpret_cast<const char*>(&mValue[0]), this->size());
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_METADATA_HAS_BEEN_INCLUDED
| 10,017 | C | 23.139759 | 88 | 0.690826 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Grid.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_GRID_HAS_BEEN_INCLUDED
#define OPENVDB_GRID_HAS_BEEN_INCLUDED
#include "Exceptions.h"
#include "MetaMap.h"
#include "Types.h"
#include "io/io.h"
#include "math/Transform.h"
#include "tree/Tree.h"
#include "util/logging.h"
#include "util/Name.h"
#include <cassert>
#include <iostream>
#include <set>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
using TreeBase = tree::TreeBase;
template<typename> class Grid; // forward declaration
/// @brief Create a new grid of type @c GridType with a given background value.
///
/// @note Calling createGrid<GridType>(background) is equivalent to calling
/// GridType::create(background).
template<typename GridType>
inline typename GridType::Ptr createGrid(const typename GridType::ValueType& background);
/// @brief Create a new grid of type @c GridType with background value zero.
///
/// @note Calling createGrid<GridType>() is equivalent to calling GridType::create().
template<typename GridType>
inline typename GridType::Ptr createGrid();
/// @brief Create a new grid of the appropriate type that wraps the given tree.
///
/// @note This function can be called without specifying the template argument,
/// i.e., as createGrid(tree).
template<typename TreePtrType>
inline typename Grid<typename TreePtrType::element_type>::Ptr createGrid(TreePtrType);
/// @brief Create a new grid of type @c GridType classified as a "Level Set",
/// i.e., a narrow-band level set.
///
/// @note @c GridType::ValueType must be a floating-point scalar.
///
/// @param voxelSize the size of a voxel in world units
/// @param halfWidth the half width of the narrow band in voxel units
///
/// @details The voxel size and the narrow band half width define the grid's
/// background value as halfWidth*voxelWidth. The transform is linear
/// with a uniform scaling only corresponding to the specified voxel size.
///
/// @note It is generally advisable to specify a half-width of the narrow band
/// that is larger than one voxel unit, otherwise zero crossings are not guaranteed.
template<typename GridType>
typename GridType::Ptr createLevelSet(
Real voxelSize = 1.0, Real halfWidth = LEVEL_SET_HALF_WIDTH);
////////////////////////////////////////
/// @brief Abstract base class for typed grids
class OPENVDB_API GridBase: public MetaMap
{
public:
using Ptr = SharedPtr<GridBase>;
using ConstPtr = SharedPtr<const GridBase>;
using GridFactory = Ptr (*)();
~GridBase() override {}
/// @name Copying
/// @{
/// @brief Return a new grid of the same type as this grid whose metadata is a
/// deep copy of this grid's and whose tree and transform are shared with this grid.
virtual GridBase::Ptr copyGrid() = 0;
/// @brief Return a new grid of the same type as this grid whose metadata is a
/// deep copy of this grid's and whose tree and transform are shared with this grid.
virtual GridBase::ConstPtr copyGrid() const = 0;
/// @brief Return a new grid of the same type as this grid whose metadata and
/// transform are deep copies of this grid's and whose tree is default-constructed.
virtual GridBase::Ptr copyGridWithNewTree() const = 0;
#if OPENVDB_ABI_VERSION_NUMBER >= 7
/// @brief Return a new grid of the same type as this grid whose tree and transform
/// is shared with this grid and whose metadata is provided as an argument.
virtual GridBase::ConstPtr copyGridReplacingMetadata(const MetaMap& meta) const = 0;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid, whose metadata is a deep copy of this grid's and whose transform is
/// provided as an argument.
/// @throw ValueError if the transform pointer is null
virtual GridBase::ConstPtr copyGridReplacingTransform(math::Transform::Ptr xform) const = 0;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid and whose transform and metadata are provided as arguments.
/// @throw ValueError if the transform pointer is null
virtual GridBase::ConstPtr copyGridReplacingMetadataAndTransform(const MetaMap& meta,
math::Transform::Ptr xform) const = 0;
#endif
/// Return a new grid whose metadata, transform and tree are deep copies of this grid's.
virtual GridBase::Ptr deepCopyGrid() const = 0;
/// @}
/// @name Registry
/// @{
/// Create a new grid of the given (registered) type.
static Ptr createGrid(const Name& type);
/// Return @c true if the given grid type name is registered.
static bool isRegistered(const Name &type);
/// Clear the grid type registry.
static void clearRegistry();
/// @}
/// @name Type access
/// @{
/// Return the name of this grid's type.
virtual Name type() const = 0;
/// Return the name of the type of a voxel's value (e.g., "float" or "vec3d").
virtual Name valueType() const = 0;
/// Return @c true if this grid is of the same type as the template parameter.
template<typename GridType>
bool isType() const { return (this->type() == GridType::gridType()); }
/// @}
//@{
/// @brief Return the result of downcasting a GridBase pointer to a Grid pointer
/// of the specified type, or return a null pointer if the types are incompatible.
template<typename GridType>
static typename GridType::Ptr grid(const GridBase::Ptr&);
template<typename GridType>
static typename GridType::ConstPtr grid(const GridBase::ConstPtr&);
template<typename GridType>
static typename GridType::ConstPtr constGrid(const GridBase::Ptr&);
template<typename GridType>
static typename GridType::ConstPtr constGrid(const GridBase::ConstPtr&);
//@}
/// @name Tree
/// @{
/// @brief Return a pointer to this grid's tree, which might be
/// shared with other grids. The pointer is guaranteed to be non-null.
TreeBase::Ptr baseTreePtr();
/// @brief Return a pointer to this grid's tree, which might be
/// shared with other grids. The pointer is guaranteed to be non-null.
TreeBase::ConstPtr baseTreePtr() const { return this->constBaseTreePtr(); }
/// @brief Return a pointer to this grid's tree, which might be
/// shared with other grids. The pointer is guaranteed to be non-null.
virtual TreeBase::ConstPtr constBaseTreePtr() const = 0;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @brief Return true if tree is not shared with another grid.
virtual bool isTreeUnique() const = 0;
#endif
/// @brief Return a reference to this grid's tree, which might be
/// shared with other grids.
/// @note Calling @vdblink::GridBase::setTree() setTree@endlink
/// on this grid invalidates all references previously returned by this method.
TreeBase& baseTree() { return const_cast<TreeBase&>(this->constBaseTree()); }
/// @brief Return a reference to this grid's tree, which might be
/// shared with other grids.
/// @note Calling @vdblink::GridBase::setTree() setTree@endlink
/// on this grid invalidates all references previously returned by this method.
const TreeBase& baseTree() const { return this->constBaseTree(); }
/// @brief Return a reference to this grid's tree, which might be
/// shared with other grids.
/// @note Calling @vdblink::GridBase::setTree() setTree@endlink
/// on this grid invalidates all references previously returned by this method.
const TreeBase& constBaseTree() const { return *(this->constBaseTreePtr()); }
/// @brief Associate the given tree with this grid, in place of its existing tree.
/// @throw ValueError if the tree pointer is null
/// @throw TypeError if the tree is not of the appropriate type
/// @note Invalidates all references previously returned by
/// @vdblink::GridBase::baseTree() baseTree@endlink
/// or @vdblink::GridBase::constBaseTree() constBaseTree@endlink.
virtual void setTree(TreeBase::Ptr) = 0;
/// Set a new tree with the same background value as the previous tree.
virtual void newTree() = 0;
/// @}
/// Return @c true if this grid contains only background voxels.
virtual bool empty() const = 0;
/// Empty this grid, setting all voxels to the background.
virtual void clear() = 0;
/// @name Tools
/// @{
/// @brief Reduce the memory footprint of this grid by increasing its sparseness
/// either losslessly (@a tolerance = 0) or lossily (@a tolerance > 0).
/// @details With @a tolerance > 0, sparsify regions where voxels have the same
/// active state and have values that differ by no more than the tolerance
/// (converted to this grid's value type).
virtual void pruneGrid(float tolerance = 0.0) = 0;
/// @brief Clip this grid to the given world-space bounding box.
/// @details Voxels that lie outside the bounding box are set to the background.
/// @warning Clipping a level set will likely produce a grid that is
/// no longer a valid level set.
void clipGrid(const BBoxd&);
/// @brief Clip this grid to the given index-space bounding box.
/// @details Voxels that lie outside the bounding box are set to the background.
/// @warning Clipping a level set will likely produce a grid that is
/// no longer a valid level set.
virtual void clip(const CoordBBox&) = 0;
/// @}
/// @{
/// @brief If this grid resolves to one of the listed grid types,
/// invoke the given functor on the resolved grid.
/// @return @c false if this grid's type is not one of the listed types
///
/// @par Example:
/// @code
/// using AllowedGridTypes = openvdb::TypeList<
/// openvdb::Int32Grid, openvdb::Int64Grid,
/// openvdb::FloatGrid, openvdb::DoubleGrid>;
///
/// const openvdb::CoordBBox bbox{
/// openvdb::Coord{0,0,0}, openvdb::Coord{10,10,10}};
///
/// // Fill the grid if it is one of the allowed types.
/// myGridBasePtr->apply<AllowedGridTypes>(
/// [&bbox](auto& grid) { // C++14
/// using GridType = typename std::decay<decltype(grid)>::type;
/// grid.fill(bbox, typename GridType::ValueType(1));
/// }
/// );
/// @endcode
///
/// @see @vdblink::TypeList TypeList@endlink
template<typename GridTypeListT, typename OpT> inline bool apply(OpT&) const;
template<typename GridTypeListT, typename OpT> inline bool apply(OpT&);
template<typename GridTypeListT, typename OpT> inline bool apply(const OpT&) const;
template<typename GridTypeListT, typename OpT> inline bool apply(const OpT&);
/// @}
/// @name Metadata
/// @{
/// Return this grid's user-specified name.
std::string getName() const;
/// Specify a name for this grid.
void setName(const std::string&);
/// Return the user-specified description of this grid's creator.
std::string getCreator() const;
/// Provide a description of this grid's creator.
void setCreator(const std::string&);
/// @brief Return @c true if this grid should be written out with floating-point
/// voxel values (including components of vectors) quantized to 16 bits.
bool saveFloatAsHalf() const;
void setSaveFloatAsHalf(bool);
/// @brief Return the class of volumetric data (level set, fog volume, etc.)
/// that is stored in this grid.
/// @sa gridClassToString, gridClassToMenuName, stringToGridClass
GridClass getGridClass() const;
/// @brief Specify the class of volumetric data (level set, fog volume, etc.)
/// that is stored in this grid.
/// @sa gridClassToString, gridClassToMenuName, stringToGridClass
void setGridClass(GridClass);
/// Remove the setting specifying the class of this grid's volumetric data.
void clearGridClass();
/// @}
/// Return the metadata string value for the given class of volumetric data.
static std::string gridClassToString(GridClass);
/// Return a formatted string version of the grid class.
static std::string gridClassToMenuName(GridClass);
/// @brief Return the class of volumetric data specified by the given string.
/// @details If the string is not one of the ones returned by
/// @vdblink::GridBase::gridClassToString() gridClassToString@endlink,
/// return @c GRID_UNKNOWN.
static GridClass stringToGridClass(const std::string&);
/// @name Metadata
/// @{
/// @brief Return the type of vector data (invariant, covariant, etc.) stored
/// in this grid, assuming that this grid contains a vector-valued tree.
/// @sa vecTypeToString, vecTypeExamples, vecTypeDescription, stringToVecType
VecType getVectorType() const;
/// @brief Specify the type of vector data (invariant, covariant, etc.) stored
/// in this grid, assuming that this grid contains a vector-valued tree.
/// @sa vecTypeToString, vecTypeExamples, vecTypeDescription, stringToVecType
void setVectorType(VecType);
/// Remove the setting specifying the type of vector data stored in this grid.
void clearVectorType();
/// @}
/// Return the metadata string value for the given type of vector data.
static std::string vecTypeToString(VecType);
/// Return a string listing examples of the given type of vector data
/// (e.g., "Gradient/Normal", given VEC_COVARIANT).
static std::string vecTypeExamples(VecType);
/// @brief Return a string describing how the given type of vector data is affected
/// by transformations (e.g., "Does not transform", given VEC_INVARIANT).
static std::string vecTypeDescription(VecType);
static VecType stringToVecType(const std::string&);
/// @name Metadata
/// @{
/// Return @c true if this grid's voxel values are in world space and should be
/// affected by transformations, @c false if they are in local space and should
/// not be affected by transformations.
bool isInWorldSpace() const;
/// Specify whether this grid's voxel values are in world space or in local space.
void setIsInWorldSpace(bool);
/// @}
// Standard metadata field names
// (These fields should normally not be accessed directly, but rather
// via the accessor methods above, when available.)
// Note: Visual C++ requires these declarations to be separate statements.
static const char* const META_GRID_CLASS;
static const char* const META_GRID_CREATOR;
static const char* const META_GRID_NAME;
static const char* const META_SAVE_HALF_FLOAT;
static const char* const META_IS_LOCAL_SPACE;
static const char* const META_VECTOR_TYPE;
static const char* const META_FILE_BBOX_MIN;
static const char* const META_FILE_BBOX_MAX;
static const char* const META_FILE_COMPRESSION;
static const char* const META_FILE_MEM_BYTES;
static const char* const META_FILE_VOXEL_COUNT;
static const char* const META_FILE_DELAYED_LOAD;
/// @name Statistics
/// @{
/// Return the number of active voxels.
virtual Index64 activeVoxelCount() const = 0;
/// Return the axis-aligned bounding box of all active voxels. If
/// the grid is empty a default bbox is returned.
virtual CoordBBox evalActiveVoxelBoundingBox() const = 0;
/// Return the dimensions of the axis-aligned bounding box of all active voxels.
virtual Coord evalActiveVoxelDim() const = 0;
/// Return the number of bytes of memory used by this grid.
virtual Index64 memUsage() const = 0;
/// @brief Add metadata to this grid comprising the current values
/// of statistics like the active voxel count and bounding box.
/// @note This metadata is not automatically kept up-to-date with
/// changes to this grid.
void addStatsMetadata();
/// @brief Return a new MetaMap containing just the metadata that
/// was added to this grid with @vdblink::GridBase::addStatsMetadata()
/// addStatsMetadata@endlink.
/// @details If @vdblink::GridBase::addStatsMetadata() addStatsMetadata@endlink
/// was never called on this grid, return an empty MetaMap.
MetaMap::Ptr getStatsMetadata() const;
/// @}
/// @name Transform
/// @{
//@{
/// @brief Return a pointer to this grid's transform, which might be
/// shared with other grids.
math::Transform::Ptr transformPtr() { return mTransform; }
math::Transform::ConstPtr transformPtr() const { return mTransform; }
math::Transform::ConstPtr constTransformPtr() const { return mTransform; }
//@}
//@{
/// @brief Return a reference to this grid's transform, which might be
/// shared with other grids.
/// @note Calling @vdblink::GridBase::setTransform() setTransform@endlink
/// on this grid invalidates all references previously returned by this method.
math::Transform& transform() { return *mTransform; }
const math::Transform& transform() const { return *mTransform; }
const math::Transform& constTransform() const { return *mTransform; }
//@}
/// @}
/// @name Transform
/// @{
/// @brief Associate the given transform with this grid, in place of
/// its existing transform.
/// @throw ValueError if the transform pointer is null
/// @note Invalidates all references previously returned by
/// @vdblink::GridBase::transform() transform@endlink
/// or @vdblink::GridBase::constTransform() constTransform@endlink.
void setTransform(math::Transform::Ptr);
/// Return the size of this grid's voxels.
Vec3d voxelSize() const { return transform().voxelSize(); }
/// @brief Return the size of this grid's voxel at position (x, y, z).
/// @note Frustum and perspective transforms have position-dependent voxel size.
Vec3d voxelSize(const Vec3d& xyz) const { return transform().voxelSize(xyz); }
/// Return true if the voxels in world space are uniformly sized cubes
bool hasUniformVoxels() const { return mTransform->hasUniformScale(); }
/// Apply this grid's transform to the given coordinates.
Vec3d indexToWorld(const Vec3d& xyz) const { return transform().indexToWorld(xyz); }
/// Apply this grid's transform to the given coordinates.
Vec3d indexToWorld(const Coord& ijk) const { return transform().indexToWorld(ijk); }
/// Apply the inverse of this grid's transform to the given coordinates.
Vec3d worldToIndex(const Vec3d& xyz) const { return transform().worldToIndex(xyz); }
/// @}
/// @name I/O
/// @{
/// @brief Read the grid topology from a stream.
/// This will read only the grid structure, not the actual data buffers.
virtual void readTopology(std::istream&) = 0;
/// @brief Write the grid topology to a stream.
/// This will write only the grid structure, not the actual data buffers.
virtual void writeTopology(std::ostream&) const = 0;
/// Read all data buffers for this grid.
virtual void readBuffers(std::istream&) = 0;
/// Read all of this grid's data buffers that intersect the given index-space bounding box.
virtual void readBuffers(std::istream&, const CoordBBox&) = 0;
/// @brief Read all of this grid's data buffers that are not yet resident in memory
/// (because delayed loading is in effect).
/// @details If this grid was read from a memory-mapped file, this operation
/// disconnects the grid from the file.
/// @sa io::File::open, io::MappedFile
virtual void readNonresidentBuffers() const = 0;
/// Write out all data buffers for this grid.
virtual void writeBuffers(std::ostream&) const = 0;
/// Read in the transform for this grid.
void readTransform(std::istream& is) { transform().read(is); }
/// Write out the transform for this grid.
void writeTransform(std::ostream& os) const { transform().write(os); }
/// Output a human-readable description of this grid.
virtual void print(std::ostream& = std::cout, int verboseLevel = 1) const = 0;
/// @}
protected:
/// @brief Initialize with an identity linear transform.
GridBase(): mTransform(math::Transform::createLinearTransform()) {}
#if OPENVDB_ABI_VERSION_NUMBER >= 7
/// @brief Initialize with metadata and a transform.
/// @throw ValueError if the transform pointer is null
GridBase(const MetaMap& meta, math::Transform::Ptr xform);
#endif
/// @brief Deep copy another grid's metadata and transform.
GridBase(const GridBase& other): MetaMap(other), mTransform(other.mTransform->copy()) {}
/// @brief Copy another grid's metadata but share its transform.
GridBase(GridBase& other, ShallowCopy): MetaMap(other), mTransform(other.mTransform) {}
/// Register a grid type along with a factory function.
static void registerGrid(const Name& type, GridFactory);
/// Remove a grid type from the registry.
static void unregisterGrid(const Name& type);
private:
math::Transform::Ptr mTransform;
}; // class GridBase
////////////////////////////////////////
using GridPtrVec = std::vector<GridBase::Ptr>;
using GridPtrVecIter = GridPtrVec::iterator;
using GridPtrVecCIter = GridPtrVec::const_iterator;
using GridPtrVecPtr = SharedPtr<GridPtrVec>;
using GridCPtrVec = std::vector<GridBase::ConstPtr>;
using GridCPtrVecIter = GridCPtrVec::iterator;
using GridCPtrVecCIter = GridCPtrVec::const_iterator;
using GridCPtrVecPtr = SharedPtr<GridCPtrVec>;
using GridPtrSet = std::set<GridBase::Ptr>;
using GridPtrSetIter = GridPtrSet::iterator;
using GridPtrSetCIter = GridPtrSet::const_iterator;
using GridPtrSetPtr = SharedPtr<GridPtrSet>;
using GridCPtrSet = std::set<GridBase::ConstPtr>;
using GridCPtrSetIter = GridCPtrSet::iterator;
using GridCPtrSetCIter = GridCPtrSet::const_iterator;
using GridCPtrSetPtr = SharedPtr<GridCPtrSet>;
/// @brief Predicate functor that returns @c true for grids that have a specified name
struct OPENVDB_API GridNamePred
{
GridNamePred(const Name& _name): name(_name) {}
bool operator()(const GridBase::ConstPtr& g) const { return g && g->getName() == name; }
Name name;
};
/// Return the first grid in the given container whose name is @a name.
template<typename GridPtrContainerT>
inline typename GridPtrContainerT::value_type
findGridByName(const GridPtrContainerT& container, const Name& name)
{
using GridPtrT = typename GridPtrContainerT::value_type;
typename GridPtrContainerT::const_iterator it =
std::find_if(container.begin(), container.end(), GridNamePred(name));
return (it == container.end() ? GridPtrT() : *it);
}
/// Return the first grid in the given map whose name is @a name.
template<typename KeyT, typename GridPtrT>
inline GridPtrT
findGridByName(const std::map<KeyT, GridPtrT>& container, const Name& name)
{
using GridPtrMapT = std::map<KeyT, GridPtrT>;
for (typename GridPtrMapT::const_iterator it = container.begin(), end = container.end();
it != end; ++it)
{
const GridPtrT& grid = it->second;
if (grid && grid->getName() == name) return grid;
}
return GridPtrT();
}
//@}
////////////////////////////////////////
/// @brief Container class that associates a tree with a transform and metadata
template<typename _TreeType>
class Grid: public GridBase
{
public:
using Ptr = SharedPtr<Grid>;
using ConstPtr = SharedPtr<const Grid>;
using TreeType = _TreeType;
using TreePtrType = typename _TreeType::Ptr;
using ConstTreePtrType = typename _TreeType::ConstPtr;
using ValueType = typename _TreeType::ValueType;
using BuildType = typename _TreeType::BuildType;
using ValueOnIter = typename _TreeType::ValueOnIter;
using ValueOnCIter = typename _TreeType::ValueOnCIter;
using ValueOffIter = typename _TreeType::ValueOffIter;
using ValueOffCIter = typename _TreeType::ValueOffCIter;
using ValueAllIter = typename _TreeType::ValueAllIter;
using ValueAllCIter = typename _TreeType::ValueAllCIter;
using Accessor = typename tree::ValueAccessor<_TreeType, true>;
using ConstAccessor = typename tree::ValueAccessor<const _TreeType, true>;
using UnsafeAccessor = typename tree::ValueAccessor<_TreeType, false>;
using ConstUnsafeAccessor = typename tree::ValueAccessor<const _TreeType, false>;
/// @brief ValueConverter<T>::Type is the type of a grid having the same
/// hierarchy as this grid but a different value type, T.
///
/// For example, FloatGrid::ValueConverter<double>::Type is equivalent to DoubleGrid.
/// @note If the source grid type is a template argument, it might be necessary
/// to write "typename SourceGrid::template ValueConverter<T>::Type".
template<typename OtherValueType>
struct ValueConverter {
using Type = Grid<typename TreeType::template ValueConverter<OtherValueType>::Type>;
};
/// Return a new grid with the given background value.
static Ptr create(const ValueType& background);
/// Return a new grid with background value zero.
static Ptr create();
/// @brief Return a new grid that contains the given tree.
/// @throw ValueError if the tree pointer is null
static Ptr create(TreePtrType);
/// @brief Return a new, empty grid with the same transform and metadata as the
/// given grid and with background value zero.
static Ptr create(const GridBase& other);
/// Construct a new grid with background value zero.
Grid();
/// Construct a new grid with the given background value.
explicit Grid(const ValueType& background);
/// @brief Construct a new grid that shares the given tree and associates with it
/// an identity linear transform.
/// @throw ValueError if the tree pointer is null
explicit Grid(TreePtrType);
/// Deep copy another grid's metadata, transform and tree.
Grid(const Grid&);
/// @brief Deep copy the metadata, transform and tree of another grid whose tree
/// configuration is the same as this grid's but whose value type is different.
/// Cast the other grid's values to this grid's value type.
/// @throw TypeError if the other grid's tree configuration doesn't match this grid's
/// or if this grid's ValueType is not constructible from the other grid's ValueType.
template<typename OtherTreeType>
explicit Grid(const Grid<OtherTreeType>&);
/// Deep copy another grid's metadata and transform, but share its tree.
Grid(Grid&, ShallowCopy);
/// @brief Deep copy another grid's metadata and transform, but construct a new tree
/// with background value zero.
explicit Grid(const GridBase&);
~Grid() override {}
/// Disallow assignment, since it wouldn't be obvious whether the copy is deep or shallow.
Grid& operator=(const Grid&) = delete;
/// @name Copying
/// @{
/// @brief Return a new grid of the same type as this grid whose metadata and
/// transform are deep copies of this grid's and whose tree is shared with this grid.
Ptr copy();
/// @brief Return a new grid of the same type as this grid whose metadata and
/// transform are deep copies of this grid's and whose tree is shared with this grid.
ConstPtr copy() const;
/// @brief Return a new grid of the same type as this grid whose metadata and
/// transform are deep copies of this grid's and whose tree is default-constructed.
Ptr copyWithNewTree() const;
/// @brief Return a new grid of the same type as this grid whose metadata is a
/// deep copy of this grid's and whose tree and transform are shared with this grid.
GridBase::Ptr copyGrid() override;
/// @brief Return a new grid of the same type as this grid whose metadata is a
/// deep copy of this grid's and whose tree and transform are shared with this grid.
GridBase::ConstPtr copyGrid() const override;
/// @brief Return a new grid of the same type as this grid whose metadata and
/// transform are deep copies of this grid's and whose tree is default-constructed.
GridBase::Ptr copyGridWithNewTree() const override;
//@}
/// @name Copying
/// @{
#if OPENVDB_ABI_VERSION_NUMBER >= 7
/// @brief Return a new grid of the same type as this grid whose tree and transform
/// is shared with this grid and whose metadata is provided as an argument.
ConstPtr copyReplacingMetadata(const MetaMap& meta) const;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid, whose metadata is a deep copy of this grid's and whose transform is
/// provided as an argument.
/// @throw ValueError if the transform pointer is null
ConstPtr copyReplacingTransform(math::Transform::Ptr xform) const;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid and whose transform and metadata are provided as arguments.
/// @throw ValueError if the transform pointer is null
ConstPtr copyReplacingMetadataAndTransform(const MetaMap& meta,
math::Transform::Ptr xform) const;
/// @brief Return a new grid of the same type as this grid whose tree and transform
/// is shared with this grid and whose metadata is provided as an argument.
GridBase::ConstPtr copyGridReplacingMetadata(const MetaMap& meta) const override;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid, whose metadata is a deep copy of this grid's and whose transform is
/// provided as an argument.
/// @throw ValueError if the transform pointer is null
GridBase::ConstPtr copyGridReplacingTransform(math::Transform::Ptr xform) const override;
/// @brief Return a new grid of the same type as this grid whose tree is shared with
/// this grid and whose transform and metadata are provided as arguments.
/// @throw ValueError if the transform pointer is null
GridBase::ConstPtr copyGridReplacingMetadataAndTransform(const MetaMap& meta,
math::Transform::Ptr xform) const override;
#endif
/// @brief Return a new grid whose metadata, transform and tree are deep copies of this grid's.
Ptr deepCopy() const { return Ptr(new Grid(*this)); }
/// @brief Return a new grid whose metadata, transform and tree are deep copies of this grid's.
GridBase::Ptr deepCopyGrid() const override { return this->deepCopy(); }
//@}
/// Return the name of this grid's type.
Name type() const override { return this->gridType(); }
/// Return the name of this type of grid.
static Name gridType() { return TreeType::treeType(); }
/// Return the name of the type of a voxel's value (e.g., "float" or "vec3d").
Name valueType() const override { return tree().valueType(); }
/// @name Voxel access
/// @{
/// @brief Return this grid's background value.
/// @note Use tools::changeBackground to efficiently modify the background value.
const ValueType& background() const { return mTree->background(); }
/// Return @c true if this grid contains only inactive background voxels.
bool empty() const override { return tree().empty(); }
/// Empty this grid, so that all voxels become inactive background voxels.
void clear() override { tree().clear(); }
/// @brief Return an accessor that provides random read and write access
/// to this grid's voxels.
/// @details The accessor is safe in the sense that it is registered with this grid's tree.
Accessor getAccessor() { return Accessor(tree()); }
/// @brief Return an unsafe accessor that provides random read and write access
/// to this grid's voxels.
/// @details The accessor is unsafe in the sense that it is not registered
/// with this grid's tree. In some rare cases this can give a performance advantage
/// over a registered accessor, but it is unsafe if the tree topology is modified.
/// @warning Only use this method if you're an expert and know the
/// risks of using an unregistered accessor (see tree/ValueAccessor.h)
UnsafeAccessor getUnsafeAccessor() { return UnsafeAccessor(tree()); }
/// Return an accessor that provides random read-only access to this grid's voxels.
ConstAccessor getAccessor() const { return ConstAccessor(tree()); }
/// Return an accessor that provides random read-only access to this grid's voxels.
ConstAccessor getConstAccessor() const { return ConstAccessor(tree()); }
/// @brief Return an unsafe accessor that provides random read-only access
/// to this grid's voxels.
/// @details The accessor is unsafe in the sense that it is not registered
/// with this grid's tree. In some rare cases this can give a performance advantage
/// over a registered accessor, but it is unsafe if the tree topology is modified.
/// @warning Only use this method if you're an expert and know the
/// risks of using an unregistered accessor (see tree/ValueAccessor.h)
ConstUnsafeAccessor getConstUnsafeAccessor() const { return ConstUnsafeAccessor(tree()); }
/// Return an iterator over all of this grid's active values (tile and voxel).
ValueOnIter beginValueOn() { return tree().beginValueOn(); }
/// Return an iterator over all of this grid's active values (tile and voxel).
ValueOnCIter beginValueOn() const { return tree().cbeginValueOn(); }
/// Return an iterator over all of this grid's active values (tile and voxel).
ValueOnCIter cbeginValueOn() const { return tree().cbeginValueOn(); }
/// Return an iterator over all of this grid's inactive values (tile and voxel).
ValueOffIter beginValueOff() { return tree().beginValueOff(); }
/// Return an iterator over all of this grid's inactive values (tile and voxel).
ValueOffCIter beginValueOff() const { return tree().cbeginValueOff(); }
/// Return an iterator over all of this grid's inactive values (tile and voxel).
ValueOffCIter cbeginValueOff() const { return tree().cbeginValueOff(); }
/// Return an iterator over all of this grid's values (tile and voxel).
ValueAllIter beginValueAll() { return tree().beginValueAll(); }
/// Return an iterator over all of this grid's values (tile and voxel).
ValueAllCIter beginValueAll() const { return tree().cbeginValueAll(); }
/// Return an iterator over all of this grid's values (tile and voxel).
ValueAllCIter cbeginValueAll() const { return tree().cbeginValueAll(); }
/// @}
/// @name Tools
/// @{
/// @brief Set all voxels within a given axis-aligned box to a constant value.
/// @param bbox inclusive coordinates of opposite corners of an axis-aligned box
/// @param value the value to which to set voxels within the box
/// @param active if true, mark voxels within the box as active,
/// otherwise mark them as inactive
/// @note This operation generates a sparse, but not always optimally sparse,
/// representation of the filled box. Follow fill operations with a prune()
/// operation for optimal sparseness.
void sparseFill(const CoordBBox& bbox, const ValueType& value, bool active = true);
/// @brief Set all voxels within a given axis-aligned box to a constant value.
/// @param bbox inclusive coordinates of opposite corners of an axis-aligned box
/// @param value the value to which to set voxels within the box
/// @param active if true, mark voxels within the box as active,
/// otherwise mark them as inactive
/// @note This operation generates a sparse, but not always optimally sparse,
/// representation of the filled box. Follow fill operations with a prune()
/// operation for optimal sparseness.
void fill(const CoordBBox& bbox, const ValueType& value, bool active = true);
/// @brief Set all voxels within a given axis-aligned box to a constant value
/// and ensure that those voxels are all represented at the leaf level.
/// @param bbox inclusive coordinates of opposite corners of an axis-aligned box.
/// @param value the value to which to set voxels within the box.
/// @param active if true, mark voxels within the box as active,
/// otherwise mark them as inactive.
void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true);
/// Reduce the memory footprint of this grid by increasing its sparseness.
void pruneGrid(float tolerance = 0.0) override;
/// @brief Clip this grid to the given index-space bounding box.
/// @details Voxels that lie outside the bounding box are set to the background.
/// @warning Clipping a level set will likely produce a grid that is
/// no longer a valid level set.
void clip(const CoordBBox&) override;
/// @brief Efficiently merge another grid into this grid using one of several schemes.
/// @details This operation is primarily intended to combine grids that are mostly
/// non-overlapping (for example, intermediate grids from computations that are
/// parallelized across disjoint regions of space).
/// @warning This operation always empties the other grid.
void merge(Grid& other, MergePolicy policy = MERGE_ACTIVE_STATES);
/// @brief Union this grid's set of active values with the active values
/// of the other grid, whose value type may be different.
/// @details The resulting state of a value is active if the corresponding value
/// was already active OR if it is active in the other grid. Also, a resulting
/// value maps to a voxel if the corresponding value already mapped to a voxel
/// OR if it is a voxel in the other grid. Thus, a resulting value can only
/// map to a tile if the corresponding value already mapped to a tile
/// AND if it is a tile value in the other grid.
///
/// @note This operation modifies only active states, not values.
/// Specifically, active tiles and voxels in this grid are not changed, and
/// tiles or voxels that were inactive in this grid but active in the other grid
/// are marked as active in this grid but left with their original values.
template<typename OtherTreeType>
void topologyUnion(const Grid<OtherTreeType>& other);
/// @brief Intersect this grid's set of active values with the active values
/// of the other grid, whose value type may be different.
/// @details The resulting state of a value is active only if the corresponding
/// value was already active AND if it is active in the other tree. Also, a
/// resulting value maps to a voxel if the corresponding value
/// already mapped to an active voxel in either of the two grids
/// and it maps to an active tile or voxel in the other grid.
///
/// @note This operation can delete branches of this grid that overlap with
/// inactive tiles in the other grid. Also, because it can deactivate voxels,
/// it can create leaf nodes with no active values. Thus, it is recommended
/// to prune this grid after calling this method.
template<typename OtherTreeType>
void topologyIntersection(const Grid<OtherTreeType>& other);
/// @brief Difference this grid's set of active values with the active values
/// of the other grid, whose value type may be different.
/// @details After this method is called, voxels in this grid will be active
/// only if they were active to begin with and if the corresponding voxels
/// in the other grid were inactive.
///
/// @note This operation can delete branches of this grid that overlap with
/// active tiles in the other grid. Also, because it can deactivate voxels,
/// it can create leaf nodes with no active values. Thus, it is recommended
/// to prune this grid after calling this method.
template<typename OtherTreeType>
void topologyDifference(const Grid<OtherTreeType>& other);
/// @}
/// @name Statistics
/// @{
/// Return the number of active voxels.
Index64 activeVoxelCount() const override { return tree().activeVoxelCount(); }
/// Return the axis-aligned bounding box of all active voxels.
CoordBBox evalActiveVoxelBoundingBox() const override;
/// Return the dimensions of the axis-aligned bounding box of all active voxels.
Coord evalActiveVoxelDim() const override;
/// Return the minimum and maximum active values in this grid.
void evalMinMax(ValueType& minVal, ValueType& maxVal) const;
/// Return the number of bytes of memory used by this grid.
/// @todo Add transform().memUsage()
Index64 memUsage() const override { return tree().memUsage(); }
/// @}
/// @name Tree
/// @{
//@{
/// @brief Return a pointer to this grid's tree, which might be
/// shared with other grids. The pointer is guaranteed to be non-null.
TreePtrType treePtr() { return mTree; }
ConstTreePtrType treePtr() const { return mTree; }
ConstTreePtrType constTreePtr() const { return mTree; }
TreeBase::ConstPtr constBaseTreePtr() const override { return mTree; }
//@}
/// @brief Return true if tree is not shared with another grid.
/// @note This is a virtual function with ABI=8
#if OPENVDB_ABI_VERSION_NUMBER >= 8
bool isTreeUnique() const final;
#else
bool isTreeUnique() const;
#endif
//@{
/// @brief Return a reference to this grid's tree, which might be
/// shared with other grids.
/// @note Calling setTree() on this grid invalidates all references
/// previously returned by this method.
TreeType& tree() { return *mTree; }
const TreeType& tree() const { return *mTree; }
const TreeType& constTree() const { return *mTree; }
//@}
/// @}
/// @name Tree
/// @{
/// @brief Associate the given tree with this grid, in place of its existing tree.
/// @throw ValueError if the tree pointer is null
/// @throw TypeError if the tree is not of type TreeType
/// @note Invalidates all references previously returned by baseTree(),
/// constBaseTree(), tree() or constTree().
void setTree(TreeBase::Ptr) override;
/// @brief Associate a new, empty tree with this grid, in place of its existing tree.
/// @note The new tree has the same background value as the existing tree.
void newTree() override;
/// @}
/// @name I/O
/// @{
/// @brief Read the grid topology from a stream.
/// This will read only the grid structure, not the actual data buffers.
void readTopology(std::istream&) override;
/// @brief Write the grid topology to a stream.
/// This will write only the grid structure, not the actual data buffers.
void writeTopology(std::ostream&) const override;
/// Read all data buffers for this grid.
void readBuffers(std::istream&) override;
/// Read all of this grid's data buffers that intersect the given index-space bounding box.
void readBuffers(std::istream&, const CoordBBox&) override;
/// @brief Read all of this grid's data buffers that are not yet resident in memory
/// (because delayed loading is in effect).
/// @details If this grid was read from a memory-mapped file, this operation
/// disconnects the grid from the file.
/// @sa io::File::open, io::MappedFile
void readNonresidentBuffers() const override;
/// Write out all data buffers for this grid.
void writeBuffers(std::ostream&) const override;
/// Output a human-readable description of this grid.
void print(std::ostream& = std::cout, int verboseLevel = 1) const override;
/// @}
/// @brief Return @c true if grids of this type require multiple I/O passes
/// to read and write data buffers.
/// @sa HasMultiPassIO
static inline bool hasMultiPassIO();
/// @name Registry
/// @{
/// Return @c true if this grid type is registered.
static bool isRegistered() { return GridBase::isRegistered(Grid::gridType()); }
/// Register this grid type along with a factory function.
static void registerGrid()
{
GridBase::registerGrid(Grid::gridType(), Grid::factory);
if (!tree::internal::LeafBufferFlags<ValueType>::IsAtomic) {
OPENVDB_LOG_WARN("delayed loading of grids of type " << Grid::gridType()
<< " might not be threadsafe on this platform");
}
}
/// Remove this grid type from the registry.
static void unregisterGrid() { GridBase::unregisterGrid(Grid::gridType()); }
/// @}
private:
#if OPENVDB_ABI_VERSION_NUMBER >= 7
/// Deep copy metadata, but share tree and transform.
Grid(TreePtrType tree, const MetaMap& meta, math::Transform::Ptr xform);
#endif
/// Helper function for use with registerGrid()
static GridBase::Ptr factory() { return Grid::create(); }
TreePtrType mTree;
}; // class Grid
////////////////////////////////////////
/// @brief Cast a generic grid pointer to a pointer to a grid of a concrete class.
///
/// Return a null pointer if the input pointer is null or if it
/// points to a grid that is not of type @c GridType.
///
/// @note Calling gridPtrCast<GridType>(grid) is equivalent to calling
/// GridBase::grid<GridType>(grid).
template<typename GridType>
inline typename GridType::Ptr
gridPtrCast(const GridBase::Ptr& grid)
{
return GridBase::grid<GridType>(grid);
}
/// @brief Cast a generic const grid pointer to a const pointer to a grid
/// of a concrete class.
///
/// Return a null pointer if the input pointer is null or if it
/// points to a grid that is not of type @c GridType.
///
/// @note Calling gridConstPtrCast<GridType>(grid) is equivalent to calling
/// GridBase::constGrid<GridType>(grid).
template<typename GridType>
inline typename GridType::ConstPtr
gridConstPtrCast(const GridBase::ConstPtr& grid)
{
return GridBase::constGrid<GridType>(grid);
}
////////////////////////////////////////
/// @{
/// @brief Return a pointer to a deep copy of the given grid, provided that
/// the grid's concrete type is @c GridType.
///
/// Return a null pointer if the input pointer is null or if it
/// points to a grid that is not of type @c GridType.
template<typename GridType>
inline typename GridType::Ptr
deepCopyTypedGrid(const GridBase::ConstPtr& grid)
{
if (!grid || !grid->isType<GridType>()) return typename GridType::Ptr();
return gridPtrCast<GridType>(grid->deepCopyGrid());
}
template<typename GridType>
inline typename GridType::Ptr
deepCopyTypedGrid(const GridBase& grid)
{
if (!grid.isType<GridType>()) return typename GridType::Ptr();
return gridPtrCast<GridType>(grid.deepCopyGrid());
}
/// @}
////////////////////////////////////////
//@{
/// @brief This adapter allows code that is templated on a Tree type to
/// accept either a Tree type or a Grid type.
template<typename _TreeType>
struct TreeAdapter
{
using TreeType = _TreeType;
using NonConstTreeType = typename std::remove_const<TreeType>::type;
using TreePtrType = typename TreeType::Ptr;
using ConstTreePtrType = typename TreeType::ConstPtr;
using NonConstTreePtrType = typename NonConstTreeType::Ptr;
using GridType = Grid<TreeType>;
using NonConstGridType = Grid<NonConstTreeType>;
using GridPtrType = typename GridType::Ptr;
using NonConstGridPtrType = typename NonConstGridType::Ptr;
using ConstGridPtrType = typename GridType::ConstPtr;
using ValueType = typename TreeType::ValueType;
using AccessorType = typename tree::ValueAccessor<TreeType>;
using ConstAccessorType = typename tree::ValueAccessor<const TreeType>;
using NonConstAccessorType = typename tree::ValueAccessor<NonConstTreeType>;
static TreeType& tree(TreeType& t) { return t; }
static TreeType& tree(GridType& g) { return g.tree(); }
static const TreeType& tree(const TreeType& t) { return t; }
static const TreeType& tree(const GridType& g) { return g.tree(); }
static const TreeType& constTree(TreeType& t) { return t; }
static const TreeType& constTree(GridType& g) { return g.constTree(); }
static const TreeType& constTree(const TreeType& t) { return t; }
static const TreeType& constTree(const GridType& g) { return g.constTree(); }
};
/// Partial specialization for Grid types
template<typename _TreeType>
struct TreeAdapter<Grid<_TreeType> >
{
using TreeType = _TreeType;
using NonConstTreeType = typename std::remove_const<TreeType>::type;
using TreePtrType = typename TreeType::Ptr;
using ConstTreePtrType = typename TreeType::ConstPtr;
using NonConstTreePtrType = typename NonConstTreeType::Ptr;
using GridType = Grid<TreeType>;
using NonConstGridType = Grid<NonConstTreeType>;
using GridPtrType = typename GridType::Ptr;
using NonConstGridPtrType = typename NonConstGridType::Ptr;
using ConstGridPtrType = typename GridType::ConstPtr;
using ValueType = typename TreeType::ValueType;
using AccessorType = typename tree::ValueAccessor<TreeType>;
using ConstAccessorType = typename tree::ValueAccessor<const TreeType>;
using NonConstAccessorType = typename tree::ValueAccessor<NonConstTreeType>;
static TreeType& tree(TreeType& t) { return t; }
static TreeType& tree(GridType& g) { return g.tree(); }
static const TreeType& tree(const TreeType& t) { return t; }
static const TreeType& tree(const GridType& g) { return g.tree(); }
static const TreeType& constTree(TreeType& t) { return t; }
static const TreeType& constTree(GridType& g) { return g.constTree(); }
static const TreeType& constTree(const TreeType& t) { return t; }
static const TreeType& constTree(const GridType& g) { return g.constTree(); }
};
/// Partial specialization for ValueAccessor types
template<typename _TreeType>
struct TreeAdapter<tree::ValueAccessor<_TreeType> >
{
using TreeType = _TreeType;
using NonConstTreeType = typename std::remove_const<TreeType>::type;
using TreePtrType = typename TreeType::Ptr;
using ConstTreePtrType = typename TreeType::ConstPtr;
using NonConstTreePtrType = typename NonConstTreeType::Ptr;
using GridType = Grid<TreeType>;
using NonConstGridType = Grid<NonConstTreeType>;
using GridPtrType = typename GridType::Ptr;
using NonConstGridPtrType = typename NonConstGridType::Ptr;
using ConstGridPtrType = typename GridType::ConstPtr;
using ValueType = typename TreeType::ValueType;
using AccessorType = typename tree::ValueAccessor<TreeType>;
using ConstAccessorType = typename tree::ValueAccessor<const TreeType>;
using NonConstAccessorType = typename tree::ValueAccessor<NonConstTreeType>;
static TreeType& tree(TreeType& t) { return t; }
static TreeType& tree(GridType& g) { return g.tree(); }
static TreeType& tree(AccessorType& a) { return a.tree(); }
static const TreeType& tree(const TreeType& t) { return t; }
static const TreeType& tree(const GridType& g) { return g.tree(); }
static const TreeType& tree(const AccessorType& a) { return a.tree(); }
static const TreeType& constTree(TreeType& t) { return t; }
static const TreeType& constTree(GridType& g) { return g.constTree(); }
static const TreeType& constTree(const TreeType& t) { return t; }
static const TreeType& constTree(const GridType& g) { return g.constTree(); }
};
//@}
////////////////////////////////////////
/// @brief Metafunction that specifies whether a given leaf node, tree, or grid type
/// requires multiple passes to read and write voxel data
/// @details Multi-pass I/O allows one to optimize the data layout of leaf nodes
/// for certain access patterns during delayed loading.
/// @sa io::MultiPass
template<typename LeafNodeType>
struct HasMultiPassIO {
static const bool value = std::is_base_of<io::MultiPass, LeafNodeType>::value;
};
// Partial specialization for Tree types
template<typename RootNodeType>
struct HasMultiPassIO<tree::Tree<RootNodeType>> {
// A tree is multi-pass if its (root node's) leaf node type is multi-pass.
static const bool value = HasMultiPassIO<typename RootNodeType::LeafNodeType>::value;
};
// Partial specialization for Grid types
template<typename TreeType>
struct HasMultiPassIO<Grid<TreeType>> {
// A grid is multi-pass if its tree's leaf node type is multi-pass.
static const bool value = HasMultiPassIO<typename TreeType::LeafNodeType>::value;
};
////////////////////////////////////////
#if OPENVDB_ABI_VERSION_NUMBER >= 7
inline GridBase::GridBase(const MetaMap& meta, math::Transform::Ptr xform)
: MetaMap(meta)
, mTransform(xform)
{
if (!xform) OPENVDB_THROW(ValueError, "Transform pointer is null");
}
#endif
template<typename GridType>
inline typename GridType::Ptr
GridBase::grid(const GridBase::Ptr& grid)
{
// The string comparison on type names is slower than a dynamic pointer cast, but
// it is safer when pointers cross DSO boundaries, as they do in many Houdini nodes.
if (grid && grid->type() == GridType::gridType()) {
return StaticPtrCast<GridType>(grid);
}
return typename GridType::Ptr();
}
template<typename GridType>
inline typename GridType::ConstPtr
GridBase::grid(const GridBase::ConstPtr& grid)
{
return ConstPtrCast<const GridType>(
GridBase::grid<GridType>(ConstPtrCast<GridBase>(grid)));
}
template<typename GridType>
inline typename GridType::ConstPtr
GridBase::constGrid(const GridBase::Ptr& grid)
{
return ConstPtrCast<const GridType>(GridBase::grid<GridType>(grid));
}
template<typename GridType>
inline typename GridType::ConstPtr
GridBase::constGrid(const GridBase::ConstPtr& grid)
{
return ConstPtrCast<const GridType>(
GridBase::grid<GridType>(ConstPtrCast<GridBase>(grid)));
}
inline TreeBase::Ptr
GridBase::baseTreePtr()
{
return ConstPtrCast<TreeBase>(this->constBaseTreePtr());
}
inline void
GridBase::setTransform(math::Transform::Ptr xform)
{
if (!xform) OPENVDB_THROW(ValueError, "Transform pointer is null");
mTransform = xform;
}
////////////////////////////////////////
template<typename TreeT>
inline Grid<TreeT>::Grid(): mTree(new TreeType)
{
}
template<typename TreeT>
inline Grid<TreeT>::Grid(const ValueType &background): mTree(new TreeType(background))
{
}
template<typename TreeT>
inline Grid<TreeT>::Grid(TreePtrType tree): mTree(tree)
{
if (!tree) OPENVDB_THROW(ValueError, "Tree pointer is null");
}
#if OPENVDB_ABI_VERSION_NUMBER >= 7
template<typename TreeT>
inline Grid<TreeT>::Grid(TreePtrType tree, const MetaMap& meta, math::Transform::Ptr xform):
GridBase(meta, xform),
mTree(tree)
{
if (!tree) OPENVDB_THROW(ValueError, "Tree pointer is null");
}
#endif
template<typename TreeT>
inline Grid<TreeT>::Grid(const Grid& other):
GridBase(other),
mTree(StaticPtrCast<TreeType>(other.mTree->copy()))
{
}
template<typename TreeT>
template<typename OtherTreeType>
inline Grid<TreeT>::Grid(const Grid<OtherTreeType>& other):
GridBase(other),
mTree(new TreeType(other.constTree()))
{
}
template<typename TreeT>
inline Grid<TreeT>::Grid(Grid& other, ShallowCopy):
GridBase(other),
mTree(other.mTree)
{
}
template<typename TreeT>
inline Grid<TreeT>::Grid(const GridBase& other):
GridBase(other),
mTree(new TreeType)
{
}
//static
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::create()
{
return Grid::create(zeroVal<ValueType>());
}
//static
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::create(const ValueType& background)
{
return Ptr(new Grid(background));
}
//static
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::create(TreePtrType tree)
{
return Ptr(new Grid(tree));
}
//static
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::create(const GridBase& other)
{
return Ptr(new Grid(other));
}
////////////////////////////////////////
template<typename TreeT>
inline typename Grid<TreeT>::ConstPtr
Grid<TreeT>::copy() const
{
return ConstPtr{new Grid{*const_cast<Grid*>(this), ShallowCopy{}}};
}
#if OPENVDB_ABI_VERSION_NUMBER >= 7
template<typename TreeT>
inline typename Grid<TreeT>::ConstPtr
Grid<TreeT>::copyReplacingMetadata(const MetaMap& meta) const
{
math::Transform::Ptr transformPtr = ConstPtrCast<math::Transform>(
this->constTransformPtr());
TreePtrType treePtr = ConstPtrCast<TreeT>(this->constTreePtr());
return ConstPtr{new Grid<TreeT>{treePtr, meta, transformPtr}};
}
template<typename TreeT>
inline typename Grid<TreeT>::ConstPtr
Grid<TreeT>::copyReplacingTransform(math::Transform::Ptr xform) const
{
return this->copyReplacingMetadataAndTransform(*this, xform);
}
template<typename TreeT>
inline typename Grid<TreeT>::ConstPtr
Grid<TreeT>::copyReplacingMetadataAndTransform(const MetaMap& meta,
math::Transform::Ptr xform) const
{
TreePtrType treePtr = ConstPtrCast<TreeT>(this->constTreePtr());
return ConstPtr{new Grid<TreeT>{treePtr, meta, xform}};
}
#endif
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::copy()
{
return Ptr{new Grid{*this, ShallowCopy{}}};
}
template<typename TreeT>
inline typename Grid<TreeT>::Ptr
Grid<TreeT>::copyWithNewTree() const
{
Ptr result{new Grid{*const_cast<Grid*>(this), ShallowCopy{}}};
result->newTree();
return result;
}
template<typename TreeT>
inline GridBase::Ptr
Grid<TreeT>::copyGrid()
{
return this->copy();
}
template<typename TreeT>
inline GridBase::ConstPtr
Grid<TreeT>::copyGrid() const
{
return this->copy();
}
#if OPENVDB_ABI_VERSION_NUMBER >= 7
template<typename TreeT>
inline GridBase::ConstPtr
Grid<TreeT>::copyGridReplacingMetadata(const MetaMap& meta) const
{
return this->copyReplacingMetadata(meta);
}
template<typename TreeT>
inline GridBase::ConstPtr
Grid<TreeT>::copyGridReplacingTransform(math::Transform::Ptr xform) const
{
return this->copyReplacingTransform(xform);
}
template<typename TreeT>
inline GridBase::ConstPtr
Grid<TreeT>::copyGridReplacingMetadataAndTransform(const MetaMap& meta,
math::Transform::Ptr xform) const
{
return this->copyReplacingMetadataAndTransform(meta, xform);
}
#endif
template<typename TreeT>
inline GridBase::Ptr
Grid<TreeT>::copyGridWithNewTree() const
{
return this->copyWithNewTree();
}
////////////////////////////////////////
template<typename TreeT>
inline bool
Grid<TreeT>::isTreeUnique() const
{
return mTree.use_count() == 1;
}
template<typename TreeT>
inline void
Grid<TreeT>::setTree(TreeBase::Ptr tree)
{
if (!tree) OPENVDB_THROW(ValueError, "Tree pointer is null");
if (tree->type() != TreeType::treeType()) {
OPENVDB_THROW(TypeError, "Cannot assign a tree of type "
+ tree->type() + " to a grid of type " + this->type());
}
mTree = StaticPtrCast<TreeType>(tree);
}
template<typename TreeT>
inline void
Grid<TreeT>::newTree()
{
mTree.reset(new TreeType(this->background()));
}
////////////////////////////////////////
template<typename TreeT>
inline void
Grid<TreeT>::sparseFill(const CoordBBox& bbox, const ValueType& value, bool active)
{
tree().sparseFill(bbox, value, active);
}
template<typename TreeT>
inline void
Grid<TreeT>::fill(const CoordBBox& bbox, const ValueType& value, bool active)
{
this->sparseFill(bbox, value, active);
}
template<typename TreeT>
inline void
Grid<TreeT>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active)
{
tree().denseFill(bbox, value, active);
}
template<typename TreeT>
inline void
Grid<TreeT>::pruneGrid(float tolerance)
{
const auto value = math::cwiseAdd(zeroVal<ValueType>(), tolerance);
this->tree().prune(static_cast<ValueType>(value));
}
template<typename TreeT>
inline void
Grid<TreeT>::clip(const CoordBBox& bbox)
{
tree().clip(bbox);
}
template<typename TreeT>
inline void
Grid<TreeT>::merge(Grid& other, MergePolicy policy)
{
tree().merge(other.tree(), policy);
}
template<typename TreeT>
template<typename OtherTreeType>
inline void
Grid<TreeT>::topologyUnion(const Grid<OtherTreeType>& other)
{
tree().topologyUnion(other.tree());
}
template<typename TreeT>
template<typename OtherTreeType>
inline void
Grid<TreeT>::topologyIntersection(const Grid<OtherTreeType>& other)
{
tree().topologyIntersection(other.tree());
}
template<typename TreeT>
template<typename OtherTreeType>
inline void
Grid<TreeT>::topologyDifference(const Grid<OtherTreeType>& other)
{
tree().topologyDifference(other.tree());
}
////////////////////////////////////////
template<typename TreeT>
inline void
Grid<TreeT>::evalMinMax(ValueType& minVal, ValueType& maxVal) const
{
tree().evalMinMax(minVal, maxVal);
}
template<typename TreeT>
inline CoordBBox
Grid<TreeT>::evalActiveVoxelBoundingBox() const
{
CoordBBox bbox;
tree().evalActiveVoxelBoundingBox(bbox);
return bbox;
}
template<typename TreeT>
inline Coord
Grid<TreeT>::evalActiveVoxelDim() const
{
Coord dim;
const bool nonempty = tree().evalActiveVoxelDim(dim);
return (nonempty ? dim : Coord());
}
////////////////////////////////////////
/// @internal Consider using the stream tagging mechanism (see io::Archive)
/// to specify the float precision, but note that the setting is per-grid.
template<typename TreeT>
inline void
Grid<TreeT>::readTopology(std::istream& is)
{
tree().readTopology(is, saveFloatAsHalf());
}
template<typename TreeT>
inline void
Grid<TreeT>::writeTopology(std::ostream& os) const
{
tree().writeTopology(os, saveFloatAsHalf());
}
template<typename TreeT>
inline void
Grid<TreeT>::readBuffers(std::istream& is)
{
if (!hasMultiPassIO() || (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_MULTIPASS_IO)) {
tree().readBuffers(is, saveFloatAsHalf());
} else {
uint16_t numPasses = 1;
is.read(reinterpret_cast<char*>(&numPasses), sizeof(uint16_t));
const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(is);
assert(bool(meta));
for (uint16_t passIndex = 0; passIndex < numPasses; ++passIndex) {
uint32_t pass = (uint32_t(numPasses) << 16) | uint32_t(passIndex);
meta->setPass(pass);
tree().readBuffers(is, saveFloatAsHalf());
}
}
}
/// @todo Refactor this and the readBuffers() above
/// once support for ABI 2 compatibility is dropped.
template<typename TreeT>
inline void
Grid<TreeT>::readBuffers(std::istream& is, const CoordBBox& bbox)
{
if (!hasMultiPassIO() || (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_MULTIPASS_IO)) {
tree().readBuffers(is, bbox, saveFloatAsHalf());
} else {
uint16_t numPasses = 1;
is.read(reinterpret_cast<char*>(&numPasses), sizeof(uint16_t));
const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(is);
assert(bool(meta));
for (uint16_t passIndex = 0; passIndex < numPasses; ++passIndex) {
uint32_t pass = (uint32_t(numPasses) << 16) | uint32_t(passIndex);
meta->setPass(pass);
tree().readBuffers(is, saveFloatAsHalf());
}
// Cannot clip inside readBuffers() when using multiple passes,
// so instead clip afterwards.
tree().clip(bbox);
}
}
template<typename TreeT>
inline void
Grid<TreeT>::readNonresidentBuffers() const
{
tree().readNonresidentBuffers();
}
template<typename TreeT>
inline void
Grid<TreeT>::writeBuffers(std::ostream& os) const
{
if (!hasMultiPassIO()) {
tree().writeBuffers(os, saveFloatAsHalf());
} else {
// Determine how many leaf buffer passes are required for this grid
const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(os);
assert(bool(meta));
uint16_t numPasses = 1;
meta->setCountingPasses(true);
meta->setPass(0);
tree().writeBuffers(os, saveFloatAsHalf());
numPasses = static_cast<uint16_t>(meta->pass());
os.write(reinterpret_cast<const char*>(&numPasses), sizeof(uint16_t));
meta->setCountingPasses(false);
// Save out the data blocks of the grid.
for (uint16_t passIndex = 0; passIndex < numPasses; ++passIndex) {
uint32_t pass = (uint32_t(numPasses) << 16) | uint32_t(passIndex);
meta->setPass(pass);
tree().writeBuffers(os, saveFloatAsHalf());
}
}
}
//static
template<typename TreeT>
inline bool
Grid<TreeT>::hasMultiPassIO()
{
return HasMultiPassIO<Grid>::value;
}
template<typename TreeT>
inline void
Grid<TreeT>::print(std::ostream& os, int verboseLevel) const
{
tree().print(os, verboseLevel);
if (metaCount() > 0) {
os << "Additional metadata:" << std::endl;
for (ConstMetaIterator it = beginMeta(), end = endMeta(); it != end; ++it) {
os << " " << it->first;
if (it->second) {
const std::string value = it->second->str();
if (!value.empty()) os << ": " << value;
}
os << "\n";
}
}
os << "Transform:" << std::endl;
transform().print(os, /*indent=*/" ");
os << std::endl;
}
////////////////////////////////////////
template<typename GridType>
inline typename GridType::Ptr
createGrid(const typename GridType::ValueType& background)
{
return GridType::create(background);
}
template<typename GridType>
inline typename GridType::Ptr
createGrid()
{
return GridType::create();
}
template<typename TreePtrType>
inline typename Grid<typename TreePtrType::element_type>::Ptr
createGrid(TreePtrType tree)
{
using TreeType = typename TreePtrType::element_type;
return Grid<TreeType>::create(tree);
}
template<typename GridType>
typename GridType::Ptr
createLevelSet(Real voxelSize, Real halfWidth)
{
using ValueType = typename GridType::ValueType;
// GridType::ValueType is required to be a floating-point scalar.
static_assert(std::is_floating_point<ValueType>::value,
"level-set grids must be floating-point-valued");
typename GridType::Ptr grid = GridType::create(
/*background=*/static_cast<ValueType>(voxelSize * halfWidth));
grid->setTransform(math::Transform::createLinearTransform(voxelSize));
grid->setGridClass(GRID_LEVEL_SET);
return grid;
}
////////////////////////////////////////
namespace internal {
/// @private
template<typename OpT, typename GridBaseT, typename T, typename ...Ts>
struct GridApplyImpl { static bool apply(GridBaseT&, OpT&) { return false; } };
// Partial specialization for (nonempty) TypeLists
/// @private
template<typename OpT, typename GridBaseT, typename GridT, typename ...GridTs>
struct GridApplyImpl<OpT, GridBaseT, TypeList<GridT, GridTs...>>
{
static bool apply(GridBaseT& grid, OpT& op)
{
if (grid.template isType<GridT>()) {
op(static_cast<typename CopyConstness<GridBaseT, GridT>::Type&>(grid));
return true;
}
return GridApplyImpl<OpT, GridBaseT, TypeList<GridTs...>>::apply(grid, op);
}
};
} // namespace internal
template<typename GridTypeListT, typename OpT>
inline bool
GridBase::apply(OpT& op) const
{
return internal::GridApplyImpl<OpT, const GridBase, GridTypeListT>::apply(*this, op);
}
template<typename GridTypeListT, typename OpT>
inline bool
GridBase::apply(OpT& op)
{
return internal::GridApplyImpl<OpT, GridBase, GridTypeListT>::apply(*this, op);
}
template<typename GridTypeListT, typename OpT>
inline bool
GridBase::apply(const OpT& op) const
{
return internal::GridApplyImpl<const OpT, const GridBase, GridTypeListT>::apply(*this, op);
}
template<typename GridTypeListT, typename OpT>
inline bool
GridBase::apply(const OpT& op)
{
return internal::GridApplyImpl<const OpT, GridBase, GridTypeListT>::apply(*this, op);
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_GRID_HAS_BEEN_INCLUDED
| 68,407 | C | 36.057421 | 99 | 0.687283 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/openvdb.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "openvdb.h"
#include "io/DelayedLoadMetadata.h"
//#ifdef OPENVDB_ENABLE_POINTS
#include "points/PointDataGrid.h"
//#endif
#include "tools/PointIndexGrid.h"
#include "util/logging.h"
#include <tbb/mutex.h>
#ifdef OPENVDB_USE_BLOSC
#include <blosc.h>
#endif
#if OPENVDB_ABI_VERSION_NUMBER < 5
#error ABI <= 4 is no longer supported
#endif
// If using an OPENVDB_ABI_VERSION_NUMBER that has been deprecated, issue an error
// directive. This can be optionally suppressed by setting the CMake option
// OPENVDB_USE_DEPRECATED_ABI_<VERSION>=ON.
#ifndef OPENVDB_USE_DEPRECATED_ABI_5
#if OPENVDB_ABI_VERSION_NUMBER == 5
#error ABI = 5 is deprecated, CMake option OPENVDB_USE_DEPRECATED_ABI_5 suppresses this error
#endif
#endif
#ifndef OPENVDB_USE_DEPRECATED_ABI_6
#if OPENVDB_ABI_VERSION_NUMBER == 6
#error ABI = 6 is deprecated, CMake option OPENVDB_USE_DEPRECATED_ABI_6 suppresses this error
#endif
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
typedef tbb::mutex Mutex;
typedef Mutex::scoped_lock Lock;
namespace {
// Declare this at file scope to ensure thread-safe initialization.
Mutex sInitMutex;
bool sIsInitialized = false;
}
void
initialize()
{
Lock lock(sInitMutex);
if (sIsInitialized) return;
logging::initialize();
// Register metadata.
Metadata::clearRegistry();
BoolMetadata::registerType();
DoubleMetadata::registerType();
FloatMetadata::registerType();
Int32Metadata::registerType();
Int64Metadata::registerType();
StringMetadata::registerType();
Vec2IMetadata::registerType();
Vec2SMetadata::registerType();
Vec2DMetadata::registerType();
Vec3IMetadata::registerType();
Vec3SMetadata::registerType();
Vec3DMetadata::registerType();
Vec4IMetadata::registerType();
Vec4SMetadata::registerType();
Vec4DMetadata::registerType();
Mat4SMetadata::registerType();
Mat4DMetadata::registerType();
// Register maps
math::MapRegistry::clear();
math::AffineMap::registerMap();
math::UnitaryMap::registerMap();
math::ScaleMap::registerMap();
math::UniformScaleMap::registerMap();
math::TranslationMap::registerMap();
math::ScaleTranslateMap::registerMap();
math::UniformScaleTranslateMap::registerMap();
math::NonlinearFrustumMap::registerMap();
// Register common grid types.
GridBase::clearRegistry();
BoolGrid::registerGrid();
MaskGrid::registerGrid();
FloatGrid::registerGrid();
DoubleGrid::registerGrid();
Int32Grid::registerGrid();
Int64Grid::registerGrid();
StringGrid::registerGrid();
Vec3IGrid::registerGrid();
Vec3SGrid::registerGrid();
Vec3DGrid::registerGrid();
// Register types associated with point index grids.
Metadata::registerType(typeNameAsString<PointIndex32>(), Int32Metadata::createMetadata);
Metadata::registerType(typeNameAsString<PointIndex64>(), Int64Metadata::createMetadata);
tools::PointIndexGrid::registerGrid();
// Register types associated with point data grids.
//#ifdef OPENVDB_ENABLE_POINTS
points::internal::initialize();
//#endif
// Register delay load metadata
io::DelayedLoadMetadata::registerType();
#ifdef OPENVDB_USE_BLOSC
blosc_init();
if (blosc_set_compressor("lz4") < 0) {
OPENVDB_LOG_WARN("Blosc LZ4 compressor is unavailable");
}
/// @todo blosc_set_nthreads(int nthreads);
#endif
#ifdef __ICC
// Disable ICC "assignment to statically allocated variable" warning.
// This assignment is mutex-protected and therefore thread-safe.
__pragma(warning(disable:1711))
#endif
sIsInitialized = true;
#ifdef __ICC
__pragma(warning(default:1711))
#endif
}
void
uninitialize()
{
Lock lock(sInitMutex);
#ifdef __ICC
// Disable ICC "assignment to statically allocated variable" warning.
// This assignment is mutex-protected and therefore thread-safe.
__pragma(warning(disable:1711))
#endif
sIsInitialized = false;
#ifdef __ICC
__pragma(warning(default:1711))
#endif
Metadata::clearRegistry();
GridBase::clearRegistry();
math::MapRegistry::clear();
//#ifdef OPENVDB_ENABLE_POINTS
points::internal::uninitialize();
//#endif
#ifdef OPENVDB_USE_BLOSC
// We don't want to destroy Blosc, because it might have been
// initialized by some other library.
//blosc_destroy();
#endif
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 4,541 | C++ | 26.035714 | 101 | 0.716362 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Platform.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
///
/// @file Platform.h
#ifndef OPENVDB_PLATFORM_HAS_BEEN_INCLUDED
#define OPENVDB_PLATFORM_HAS_BEEN_INCLUDED
#include "PlatformConfig.h"
#define PRAGMA(x) _Pragma(#x)
/// @name Utilities
/// @{
/// @cond OPENVDB_VERSION_INTERNAL
#define OPENVDB_PREPROC_STRINGIFY_(x) #x
/// @endcond
/// @brief Return @a x as a string literal. If @a x is a macro,
/// return its value as a string literal.
/// @hideinitializer
#define OPENVDB_PREPROC_STRINGIFY(x) OPENVDB_PREPROC_STRINGIFY_(x)
/// @cond OPENVDB_VERSION_INTERNAL
#define OPENVDB_PREPROC_CONCAT_(x, y) x ## y
/// @endcond
/// @brief Form a new token by concatenating two existing tokens.
/// If either token is a macro, concatenate its value.
/// @hideinitializer
#define OPENVDB_PREPROC_CONCAT(x, y) OPENVDB_PREPROC_CONCAT_(x, y)
/// @}
/// Macro for determining if GCC version is >= than X.Y
#if defined(__GNUC__)
#define OPENVDB_CHECK_GCC(MAJOR, MINOR) \
(__GNUC__ > MAJOR || (__GNUC__ == MAJOR && __GNUC_MINOR__ >= MINOR))
#else
#define OPENVDB_CHECK_GCC(MAJOR, MINOR) 0
#endif
/// OpenVDB now requires C++11
#define OPENVDB_HAS_CXX11 1
/// SIMD Intrinsic Headers
#if defined(OPENVDB_USE_SSE42) || defined(OPENVDB_USE_AVX)
#if defined(_WIN32)
#include <intrin.h>
#elif defined(__GNUC__)
#if defined(__x86_64__) || defined(__i386__)
#include <x86intrin.h>
#elif defined(__ARM_NEON__)
#include <arm_neon.h>
#endif
#endif
#endif
/// Bracket code with OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN/_END,
/// as in the following example, to inhibit ICC remarks about unreachable code:
/// @code
/// template<typename NodeType>
/// void processNode(NodeType& node)
/// {
/// OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
/// if (NodeType::LEVEL == 0) return; // ignore leaf nodes
/// int i = 0;
/// ...
/// OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
/// }
/// @endcode
/// In the above, <tt>NodeType::LEVEL == 0</tt> is a compile-time constant expression,
/// so for some template instantiations, the line below it is unreachable.
#if defined(__INTEL_COMPILER)
// Disable ICC remarks 111 ("statement is unreachable"), 128 ("loop is not reachable"),
// 185 ("dynamic initialization in unreachable code"), and 280 ("selector expression
// is constant").
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN \
_Pragma("warning (push)") \
_Pragma("warning (disable:111)") \
_Pragma("warning (disable:128)") \
_Pragma("warning (disable:185)") \
_Pragma("warning (disable:280)")
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_END \
_Pragma("warning (pop)")
#elif defined(__clang__)
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN \
PRAGMA(clang diagnostic push) \
PRAGMA(clang diagnostic ignored "-Wunreachable-code")
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_END \
PRAGMA(clang diagnostic pop)
#else
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
#define OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
#endif
/// @brief Bracket code with OPENVDB_NO_DEPRECATION_WARNING_BEGIN/_END,
/// to inhibit warnings about deprecated code.
/// @note Use this sparingly. Remove references to deprecated code if at all possible.
/// @details Example:
/// @code
/// [[deprecated]] void myDeprecatedFunction() {}
///
/// {
/// OPENVDB_NO_DEPRECATION_WARNING_BEGIN
/// myDeprecatedFunction();
/// OPENVDB_NO_DEPRECATION_WARNING_END
/// }
/// @endcode
#if defined __INTEL_COMPILER
#define OPENVDB_NO_DEPRECATION_WARNING_BEGIN \
_Pragma("warning (push)") \
_Pragma("warning (disable:1478)") \
PRAGMA(message("NOTE: ignoring deprecation warning at " __FILE__ \
":" OPENVDB_PREPROC_STRINGIFY(__LINE__)))
#define OPENVDB_NO_DEPRECATION_WARNING_END \
_Pragma("warning (pop)")
#elif defined __clang__
#define OPENVDB_NO_DEPRECATION_WARNING_BEGIN \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
// note: no #pragma message, since Clang treats them as warnings
#define OPENVDB_NO_DEPRECATION_WARNING_END \
_Pragma("clang diagnostic pop")
#elif defined __GNUC__
#define OPENVDB_NO_DEPRECATION_WARNING_BEGIN \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
_Pragma("message(\"NOTE: ignoring deprecation warning\")")
#define OPENVDB_NO_DEPRECATION_WARNING_END \
_Pragma("GCC diagnostic pop")
#elif defined _MSC_VER
#define OPENVDB_NO_DEPRECATION_WARNING_BEGIN \
__pragma(warning(push)) \
__pragma(warning(disable : 4996)) \
__pragma(message("NOTE: ignoring deprecation warning at " __FILE__ \
":" OPENVDB_PREPROC_STRINGIFY(__LINE__)))
#define OPENVDB_NO_DEPRECATION_WARNING_END \
__pragma(warning(pop))
#else
#define OPENVDB_NO_DEPRECATION_WARNING_BEGIN
#define OPENVDB_NO_DEPRECATION_WARNING_END
#endif
/// @brief Bracket code with OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN/_END,
/// to inhibit warnings about type conversion.
/// @note Use this sparingly. Use static casts and explicit type conversion if at all possible.
/// @details Example:
/// @code
/// float value = 0.1f;
/// OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
/// int valueAsInt = value;
/// OPENVDB_NO_TYPE_CONVERSION_WARNING_END
/// @endcode
#if defined __INTEL_COMPILER
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_END
#elif defined __GNUC__
// -Wfloat-conversion was only introduced in GCC 4.9
#if OPENVDB_CHECK_GCC(4, 9)
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wconversion\"") \
_Pragma("GCC diagnostic ignored \"-Wfloat-conversion\"")
#else
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wconversion\"")
#endif
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_END \
_Pragma("GCC diagnostic pop")
#else
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
#define OPENVDB_NO_TYPE_CONVERSION_WARNING_END
#endif
/// Helper macros for defining library symbol visibility
#ifdef OPENVDB_EXPORT
#undef OPENVDB_EXPORT
#endif
#ifdef OPENVDB_IMPORT
#undef OPENVDB_IMPORT
#endif
#ifdef __GNUC__
#define OPENVDB_EXPORT __attribute__((visibility("default")))
#define OPENVDB_IMPORT __attribute__((visibility("default")))
#endif
#ifdef _WIN32
#ifdef OPENVDB_DLL
#define OPENVDB_EXPORT __declspec(dllexport)
#define OPENVDB_IMPORT __declspec(dllimport)
#else
#define OPENVDB_EXPORT
#define OPENVDB_IMPORT
#endif
#endif
/// All classes and public free standing functions must be explicitly marked
/// as \<lib\>_API to be exported. The \<lib\>_PRIVATE macros are defined when
/// building that particular library.
#ifdef OPENVDB_API
#undef OPENVDB_API
#endif
#ifdef OPENVDB_PRIVATE
#define OPENVDB_API OPENVDB_EXPORT
#else
#define OPENVDB_API OPENVDB_IMPORT
#endif
#ifdef OPENVDB_HOUDINI_API
#undef OPENVDB_HOUDINI_API
#endif
#ifdef OPENVDB_HOUDINI_PRIVATE
#define OPENVDB_HOUDINI_API OPENVDB_EXPORT
#else
#define OPENVDB_HOUDINI_API OPENVDB_IMPORT
#endif
#if defined(__ICC)
// Use these defines to bracket a region of code that has safe static accesses.
// Keep the region as small as possible.
#define OPENVDB_START_THREADSAFE_STATIC_REFERENCE __pragma(warning(disable:1710))
#define OPENVDB_FINISH_THREADSAFE_STATIC_REFERENCE __pragma(warning(default:1710))
#define OPENVDB_START_THREADSAFE_STATIC_WRITE __pragma(warning(disable:1711))
#define OPENVDB_FINISH_THREADSAFE_STATIC_WRITE __pragma(warning(default:1711))
#define OPENVDB_START_THREADSAFE_STATIC_ADDRESS __pragma(warning(disable:1712))
#define OPENVDB_FINISH_THREADSAFE_STATIC_ADDRESS __pragma(warning(default:1712))
// Use these defines to bracket a region of code that has unsafe static accesses.
// Keep the region as small as possible.
#define OPENVDB_START_NON_THREADSAFE_STATIC_REFERENCE __pragma(warning(disable:1710))
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_REFERENCE __pragma(warning(default:1710))
#define OPENVDB_START_NON_THREADSAFE_STATIC_WRITE __pragma(warning(disable:1711))
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_WRITE __pragma(warning(default:1711))
#define OPENVDB_START_NON_THREADSAFE_STATIC_ADDRESS __pragma(warning(disable:1712))
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_ADDRESS __pragma(warning(default:1712))
// Simpler version for one-line cases
#define OPENVDB_THREADSAFE_STATIC_REFERENCE(CODE) \
__pragma(warning(disable:1710)); CODE; __pragma(warning(default:1710))
#define OPENVDB_THREADSAFE_STATIC_WRITE(CODE) \
__pragma(warning(disable:1711)); CODE; __pragma(warning(default:1711))
#define OPENVDB_THREADSAFE_STATIC_ADDRESS(CODE) \
__pragma(warning(disable:1712)); CODE; __pragma(warning(default:1712))
#else // GCC does not support these compiler warnings
#define OPENVDB_START_THREADSAFE_STATIC_REFERENCE
#define OPENVDB_FINISH_THREADSAFE_STATIC_REFERENCE
#define OPENVDB_START_THREADSAFE_STATIC_WRITE
#define OPENVDB_FINISH_THREADSAFE_STATIC_WRITE
#define OPENVDB_START_THREADSAFE_STATIC_ADDRESS
#define OPENVDB_FINISH_THREADSAFE_STATIC_ADDRESS
#define OPENVDB_START_NON_THREADSAFE_STATIC_REFERENCE
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_REFERENCE
#define OPENVDB_START_NON_THREADSAFE_STATIC_WRITE
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_WRITE
#define OPENVDB_START_NON_THREADSAFE_STATIC_ADDRESS
#define OPENVDB_FINISH_NON_THREADSAFE_STATIC_ADDRESS
#define OPENVDB_THREADSAFE_STATIC_REFERENCE(CODE) CODE
#define OPENVDB_THREADSAFE_STATIC_WRITE(CODE) CODE
#define OPENVDB_THREADSAFE_STATIC_ADDRESS(CODE) CODE
#endif // defined(__ICC)
#endif // OPENVDB_PLATFORM_HAS_BEEN_INCLUDED
| 10,119 | C | 36.481481 | 96 | 0.701749 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/TypeList.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file TypeList.h
///
/// @brief A TypeList provides a compile time sequence of heterogeneous types
/// which can be accessed, transformed and executed over in various ways.
/// It incorporates a subset of functionality similar to boost::mpl::vector
/// however provides most of its content through using declarations rather
/// than additional typed classes.
#ifndef OPENVDB_TYPELIST_HAS_BEEN_INCLUDED
#define OPENVDB_TYPELIST_HAS_BEEN_INCLUDED
#include "version.h"
#include <tuple>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// @cond OPENVDB_TYPES_INTERNAL
template<typename... Ts> struct TypeList; // forward declaration
namespace typelist_internal {
// Implementation details of @c TypeList
/// @brief Dummy struct, used as the return type from invalid or out-of-range
/// @c TypeList queries.
struct NullType {};
/// @brief Type resolver for index queries
/// @details Defines a type at a given location within a @c TypeList or the
/// @c NullType if the index is out-of-range. The last template
/// parameter is used to determine if the index is in range.
/// @tparam ListT The @c TypeList
/// @tparam Idx The index of the type to get
template<typename ListT, size_t Idx, typename = void> struct TSGetElementImpl;
/// @brief Partial specialization for valid (in range) index queries.
/// @tparam Ts Unpacked types from a @c TypeList
/// @tparam Idx The index of the type to get
template<typename... Ts, size_t Idx>
struct TSGetElementImpl<TypeList<Ts...>, Idx,
typename std::enable_if<(Idx < sizeof...(Ts) && sizeof...(Ts))>::type> {
using type = typename std::tuple_element<Idx, std::tuple<Ts...>>::type;
};
/// @brief Partial specialization for invalid index queries (i.e. out-of-range
/// indices such as @c TypeList<Int32>::Get<1>). Defines the NullType.
/// @tparam Ts Unpacked types from a @c TypeList
/// @tparam Idx The index of the type to get
template<typename... Ts, size_t Idx>
struct TSGetElementImpl<TypeList<Ts...>, Idx,
typename std::enable_if<!(Idx < sizeof...(Ts) && sizeof...(Ts))>::type> {
using type = NullType;
};
/// @brief Search for a given type within a @c TypeList.
/// @details If the type is found, a @c bool constant @c Value is set to true
/// and an @c int64_t @c Index points to the location of the type. If
/// multiple versions of the types exist, the value of @c Index is
/// always the location of the first matching type. If the type is not
/// found, @c Value is set to false and @c Index is set to -1.
/// @note This implementation is recursively defined until the type is found
/// or until the end of the list is reached. The last template argument
/// is used as an internal counter to track the current index being
/// evaluated.
/// @tparam ListT The @c TypeList
/// @tparam T The type to find
template <typename ListT, typename T, size_t=0>
struct TSHasTypeImpl;
/// @brief Partial specialization on an empty @c TypeList, instantiated when
/// @c TSHasTypeImpl has been invoked with an empty @c TypeList or when
/// a recursive search reaches the end of a @c TypeList.
/// @tparam T The type to find
/// @tparam Idx Current index
template <typename T, size_t Idx>
struct TSHasTypeImpl<TypeList<>, T, Idx> {
static constexpr bool Value = false;
static constexpr int64_t Index = -1;
};
/// @brief Partial specialization on a @c TypeList which still contains types,
/// but the current type being evaluated @c U does not match the given
/// type @C T.
/// @tparam U The current type being evaluated within the @c TypeList
/// @tparam T The type to find
/// @tparam Ts Remaining types
/// @tparam Idx Current index
template <typename U, typename T, typename... Ts, size_t Idx>
struct TSHasTypeImpl<TypeList<U, Ts...>, T, Idx> :
TSHasTypeImpl<TypeList<Ts...>, T, Idx+1> {};
/// @brief Partial specialization on a @c TypeList where @c T matches the
/// current type (i.e. the type has been found).
/// @tparam T The type to find
/// @tparam Ts Remaining types
/// @tparam Idx Current index
template <typename T, typename... Ts, size_t Idx>
struct TSHasTypeImpl<TypeList<T, Ts...>, T, Idx>
{
static constexpr bool Value = true;
static constexpr int64_t Index = static_cast<int64_t>(Idx);
};
/// @brief Remove any duplicate types from a @c TypeList.
/// @details This implementation effectively rebuilds a @c TypeList by starting
/// with an empty @c TypeList and recursively defining an expanded
/// @c TypeList for every type (first to last), only if the type does
/// not already exist in the new @c TypeList. This has the effect of
/// dropping all but the first of duplicate types.
/// @note Each type must define a new instantiation of this object.
/// @tparam ListT The starting @c TypeList, usually (but not limited to) an
/// empty @c TypeList
/// @tparam Ts The list of types to make unique
template <typename ListT, typename... Ts>
struct TSMakeUniqueImpl {
using type = ListT;
};
/// @brief Partial specialization for type packs, where by the next type @c U
/// is checked in the existing type set @c Ts for duplication. If the
/// type does not exist, it is added to the new @c TypeList definition,
/// otherwise it is dropped. In either case, this class is recursively
/// defined with the remaining types @c Us.
/// @tparam Ts Current types in the @c TypeList
/// @tparam U Type to check for duplication in @c Ts
/// @tparam Us Remaining types
template <typename... Ts, typename U, typename... Us>
struct TSMakeUniqueImpl<TypeList<Ts...>, U, Us...>
{
using type = typename std::conditional<
TSHasTypeImpl<TypeList<Ts...>, U>::Value,
typename TSMakeUniqueImpl<TypeList<Ts...>, Us...>::type,
typename TSMakeUniqueImpl<TypeList<Ts..., U>, Us...>::type >::type;
};
/// @brief Append any number of types to a @c TypeList
/// @details Defines a new @c TypeList with the provided types appended
/// @tparam ListT The @c TypeList to append to
/// @tparam Ts Types to append
template<typename ListT, typename... Ts> struct TSAppendImpl;
/// @brief Partial specialization for a @c TypeList with a list of zero or more
/// types to append
/// @tparam Ts Current types within the @c TypeList
/// @tparam OtherTs Other types to append
template<typename... Ts, typename... OtherTs>
struct TSAppendImpl<TypeList<Ts...>, OtherTs...> {
using type = TypeList<Ts..., OtherTs...>;
};
/// @brief Partial specialization for a @c TypeList with another @c TypeList.
/// Appends the other TypeList's members.
/// @tparam Ts Types within the first @c TypeList
/// @tparam OtherTs Types within the second @c TypeList
template<typename... Ts, typename... OtherTs>
struct TSAppendImpl<TypeList<Ts...>, TypeList<OtherTs...>> {
using type = TypeList<Ts..., OtherTs...>;
};
/// @brief Remove all occurrences of type T from a @c TypeList
/// @details Defines a new @c TypeList with the provided types removed
/// @tparam ListT The @c TypeList
/// @tparam T Type to remove
template<typename ListT, typename T> struct TSEraseImpl;
/// @brief Partial specialization for an empty @c TypeList
/// @tparam T Type to remove, has no effect
template<typename T>
struct TSEraseImpl<TypeList<>, T> { using type = TypeList<>; };
/// @brief Partial specialization where the currently evaluating type in a
/// @c TypeList matches the type to remove. Recursively defines this
/// implementation with the remaining types.
/// @tparam Ts Unpacked types within the @c TypeList
/// @tparam T Type to remove
template<typename... Ts, typename T>
struct TSEraseImpl<TypeList<T, Ts...>, T> {
using type = typename TSEraseImpl<TypeList<Ts...>, T>::type;
};
/// @brief Partial specialization where the currently evaluating type @c T2 in
/// a @c TypeList does not match the type to remove @c T. Recursively
/// defines this implementation with the remaining types.
/// @tparam T2 Current type within the @c TypeList, which does not match @c T
/// @tparam Ts Other types within the @c TypeList
/// @tparam T Type to remove
template<typename T2, typename... Ts, typename T>
struct TSEraseImpl<TypeList<T2, Ts...>, T> {
using type = typename TSAppendImpl<TypeList<T2>,
typename TSEraseImpl<TypeList<Ts...>, T>::type>::type;
};
/// @brief Front end implementation to call TSEraseImpl which removes all
/// occurrences of a type from a @c TypeList. This struct handles the
/// case where the type to remove is another @c TypeList, in which case
/// all types in the second @c TypeList are removed from the first.
/// @tparam ListT The @c TypeList
/// @tparam Ts Types in the @c TypeList
template<typename ListT, typename... Ts> struct TSRemoveImpl;
/// @brief Partial specialization when there are no types in the @c TypeList.
/// @tparam ListT The @c TypeList
template<typename ListT>
struct TSRemoveImpl<ListT> { using type = ListT; };
/// @brief Partial specialization when the type to remove @c T is not another
/// @c TypeList. @c T is removed from the @c TypeList.
/// @tparam ListT The @c TypeList
/// @tparam T Type to remove
/// @tparam Ts Types in the @c TypeList
template<typename ListT, typename T, typename... Ts>
struct TSRemoveImpl<ListT, T, Ts...> {
using type = typename TSRemoveImpl<typename TSEraseImpl<ListT, T>::type, Ts...>::type;
};
/// @brief Partial specialization when the type to remove is another
/// @c TypeList. All types within the other type list are removed from
/// the first list.
/// @tparam ListT The @c TypeList
/// @tparam Ts Types from the second @c TypeList to remove from the first
template<typename ListT, typename... Ts>
struct TSRemoveImpl<ListT, TypeList<Ts...>> {
using type = typename TSRemoveImpl<ListT, Ts...>::type;
};
/// @brief Remove the first element of a type list. If the list is empty,
/// nothing is done. This base configuration handles the empty list.
/// @note Much cheaper to instantiate than TSRemoveIndicesImpl
/// @tparam T The @c TypeList
template<typename T>
struct TSRemoveFirstImpl {
using type = TypeList<>;
};
/// @brief Partial specialization for removing the first type of a @c TypeList
/// when the list is not empty i.e. does that actual work.
/// @tparam T The first type in the @c TypeList.
/// @tparam Ts Remaining types in the @c TypeList
template<typename T, typename... Ts>
struct TSRemoveFirstImpl<TypeList<T, Ts...>> {
using type = TypeList<Ts...>;
};
/// @brief Remove the last element of a type list. If the list is empty,
/// nothing is done. This base configuration handles the empty list.
/// @note Cheaper to instantiate than TSRemoveIndicesImpl
/// @tparam T The @c TypeList
template<typename T>
struct TSRemoveLastImpl { using type = TypeList<>; };
/// @brief Partial specialization for removing the last type of a @c TypeList.
/// This instance is instantiated when the @c TypeList contains a
/// single type, or the primary struct which recursively removes types
/// (see below) hits the last type. Evaluates the last type to the empty
/// list (see above).
/// @tparam T The last type in the @c TypeList
template<typename T>
struct TSRemoveLastImpl<TypeList<T>> : TSRemoveLastImpl<T> {};
/// @brief Partial specialization for removing the last type of a @c TypeList
/// with a type list size of two or more. Recursively defines this
/// implementation with the remaining types, effectively rebuilding the
/// @c TypeList until the last type is hit, which is dropped.
/// @tparam T The current type in the @c TypeList
/// @tparam Ts Remaining types in the @c TypeList
template<typename T, typename... Ts>
struct TSRemoveLastImpl<TypeList<T, Ts...>>
{
using type =
typename TypeList<T>::template
Append<typename TSRemoveLastImpl<TypeList<Ts...>>::type>;
};
/// @brief Remove a number of types from a @c TypeList based on a @c First and
/// @c Last index.
/// @details Both indices are inclusive, such that when <tt>First == Last</tt>
/// a single type is removed (assuming the index exists). If
/// <tt>Last < First</tt>, nothing is done. Any indices which do not
/// exist are ignored. If @c Last is greater than the number of types
/// in the @c TypeList, all types from @c First to the end of the list
/// are dropped.
/// @tparam ListT The @c TypeList
/// @tparam First The first index
/// @tparam Last The last index
/// @tparam Idx Internal counter for the current index
template<typename ListT, size_t First, size_t Last, size_t Idx=0>
struct TSRemoveIndicesImpl;
/// @brief Partial specialization for an empty @c TypeList
/// @tparam First The first index
/// @tparam Last The last index
/// @tparam Idx Internal counter for the current index
template<size_t First, size_t Last, size_t Idx>
struct TSRemoveIndicesImpl<TypeList<>, First, Last, Idx> {
using type = TypeList<>;
};
/// @brief Partial specialization for a @c TypeList containing a single element.
/// @tparam T The last or only type in a @c TypeList
/// @tparam First The first index
/// @tparam Last The last index
/// @tparam Idx Internal counter for the current index
template<typename T, size_t First, size_t Last, size_t Idx>
struct TSRemoveIndicesImpl<TypeList<T>, First, Last, Idx>
{
private:
static constexpr bool Remove = Idx >= First && Idx <= Last;
public:
using type = typename std::conditional<Remove, TypeList<>, TypeList<T>>::type;
};
/// @brief Partial specialization for a @c TypeList containing two or more types.
/// @details This implementation effectively rebuilds a @c TypeList by starting
/// with an empty @c TypeList and recursively defining an expanded
/// @c TypeList for every type (first to last), only if the type's
/// index does not fall within the range of indices defines by
/// @c First and @c Last. Recursively defines this implementation with
/// all but the last type.
/// @tparam T The currently evaluating type within a @c TypeList
/// @tparam Ts Remaining types in the @c TypeList
/// @tparam First The first index
/// @tparam Last The last index
/// @tparam Idx Internal counter for the current index
template<typename T, typename... Ts, size_t First, size_t Last, size_t Idx>
struct TSRemoveIndicesImpl<TypeList<T, Ts...>, First, Last, Idx>
{
private:
using ThisList = typename TSRemoveIndicesImpl<TypeList<T>, First, Last, Idx>::type;
using NextList = typename TSRemoveIndicesImpl<TypeList<Ts...>, First, Last, Idx+1>::type;
public:
using type = typename ThisList::template Append<NextList>;
};
template<typename OpT> inline void TSForEachImpl(OpT) {}
template<typename OpT, typename T, typename... Ts>
inline void TSForEachImpl(OpT op) { op(T()); TSForEachImpl<OpT, Ts...>(op); }
} // namespace internal
/// @endcond
/// @brief A list of types (not necessarily unique)
/// @details Example:
/// @code
/// using MyTypes = openvdb::TypeList<int, float, int, double, float>;
/// @endcode
template<typename... Ts>
struct TypeList
{
/// The type of this list
using Self = TypeList;
/// @brief The number of types in the type list
static constexpr size_t Size = sizeof...(Ts);
/// @brief Access a particular element of this type list. If the index
/// is out of range, typelist_internal::NullType is returned.
template<size_t N>
using Get = typename typelist_internal::TSGetElementImpl<Self, N>::type;
using Front = Get<0>;
using Back = Get<Size-1>;
/// @brief True if this list contains the given type, false otherwise
/// @details Example:
/// @code
/// {
/// using IntTypes = openvdb::TypeList<Int16, Int32, Int64>;
/// using RealTypes = openvdb::TypeList<float, double>;
/// }
/// {
/// openvdb::TypeList<IntTypes>::Contains<Int32>; // true
/// openvdb::TypeList<RealTypes>::Contains<Int32>; // false
/// }
/// @endcode
template<typename T>
static constexpr bool Contains = typelist_internal::TSHasTypeImpl<Self, T>::Value;
/// @brief Returns the index of the first found element of the given type, -1 if
/// no matching element exists.
/// @details Example:
/// @code
/// {
/// using IntTypes = openvdb::TypeList<Int16, Int32, Int64>;
/// using RealTypes = openvdb::TypeList<float, double>;
/// }
/// {
/// const int64_t L1 = openvdb::TypeList<IntTypes>::Index<Int32>; // 1
/// const int64_t L2 = openvdb::TypeList<RealTypes>::Index<Int32>; // -1
/// }
/// @endcode
template<typename T>
static constexpr int64_t Index = typelist_internal::TSHasTypeImpl<Self, T>::Index;
/// @brief Remove any duplicate types from this TypeList by rotating the
/// next valid type left (maintains the order of other types). Optionally
/// combine the result with another TypeList.
/// @details Example:
/// @code
/// {
/// using Types = openvdb::TypeList<Int16, Int32, Int16, float, float, Int64>;
/// }
/// {
/// using UniqueTypes = Types::Unique<>; // <Int16, Int32, float, Int64>
/// }
/// @endcode
template<typename ListT = TypeList<>>
using Unique = typename typelist_internal::TSMakeUniqueImpl<ListT, Ts...>::type;
/// @brief Append types, or the members of another TypeList, to this list.
/// @details Example:
/// @code
/// {
/// using IntTypes = openvdb::TypeList<Int16, Int32, Int64>;
/// using RealTypes = openvdb::TypeList<float, double>;
/// using NumericTypes = IntTypes::Append<RealTypes>;
/// }
/// {
/// using IntTypes = openvdb::TypeList<Int16>::Append<Int32, Int64>;
/// using NumericTypes = IntTypes::Append<float>::Append<double>;
/// }
/// @endcode
template<typename... TypesToAppend>
using Append = typename typelist_internal::TSAppendImpl<Self, TypesToAppend...>::type;
/// @brief Remove all occurrences of one or more types, or the members of
/// another TypeList, from this list.
/// @details Example:
/// @code
/// {
/// using NumericTypes = openvdb::TypeList<float, double, Int16, Int32, Int64>;
/// using LongTypes = openvdb::TypeList<Int64, double>;
/// using ShortTypes = NumericTypes::Remove<LongTypes>; // float, Int16, Int32
/// }
/// @endcode
template<typename... TypesToRemove>
using Remove = typename typelist_internal::TSRemoveImpl<Self, TypesToRemove...>::type;
/// @brief Remove the first element of this type list. Has no effect if the
/// type list is already empty.
/// @details Example:
/// @code
/// {
/// using IntTypes = openvdb::TypeList<Int16, Int32, Int64>;
/// using EmptyTypes = openvdb::TypeList<>;
/// }
/// {
/// IntTypes::PopFront; // openvdb::TypeList<Int32, Int64>;
/// EmptyTypes::PopFront; // openvdb::TypeList<>;
/// }
/// @endcode
using PopFront = typename typelist_internal::TSRemoveFirstImpl<Self>::type;
/// @brief Remove the last element of this type list. Has no effect if the
/// type list is already empty.
/// @details Example:
/// @code
/// {
/// using IntTypes = openvdb::TypeList<Int16, Int32, Int64>;
/// using EmptyTypes = openvdb::TypeList<>;
/// }
/// {
/// IntTypes::PopBack; // openvdb::TypeList<Int16, Int32>;
/// EmptyTypes::PopBack; // openvdb::TypeList<>;
/// }
/// @endcode
using PopBack = typename typelist_internal::TSRemoveLastImpl<Self>::type;
/// @brief Return a new list with types removed by their location within the list.
/// If First is equal to Last, a single element is removed (if it exists).
/// If First is greater than Last, the list remains unmodified.
/// @details Example:
/// @code
/// {
/// using NumericTypes = openvdb::TypeList<float, double, Int16, Int32, Int64>;
/// }
/// {
/// using IntTypes = NumericTypes::RemoveByIndex<0,1>; // openvdb::TypeList<Int16, Int32, Int64>;
/// using RealTypes = NumericTypes::RemoveByIndex<2,4>; // openvdb::TypeList<float, double>;
/// using RemoveFloat = NumericTypes::RemoveByIndex<0,0>; // openvdb::TypeList<double, Int16, Int32, Int64>;
/// }
/// @endcode
template <size_t First, size_t Last>
using RemoveByIndex = typename typelist_internal::TSRemoveIndicesImpl<Self, First, Last>::type;
/// @brief Invoke a templated, unary functor on a value of each type in this list.
/// @details Example:
/// @code
/// #include <typeinfo>
///
/// template<typename ListT>
/// void printTypeList()
/// {
/// std::string sep;
/// auto op = [&](auto x) { // C++14
/// std::cout << sep << typeid(decltype(x)).name(); sep = ", "; };
/// ListT::foreach(op);
/// }
///
/// using MyTypes = openvdb::TypeList<int, float, double>;
/// printTypeList<MyTypes>(); // "i, f, d" (exact output is compiler-dependent)
/// @endcode
///
/// @note The functor object is passed by value. Wrap it with @c std::ref
/// to use the same object for each type.
template<typename OpT>
static void foreach(OpT op) { typelist_internal::TSForEachImpl<OpT, Ts...>(op); }
};
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TYPELIST_HAS_BEEN_INCLUDED
| 21,968 | C | 40.295113 | 116 | 0.660825 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/Metadata.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Metadata.h"
#include <tbb/mutex.h>
#include <algorithm> // for std::min()
#include <map>
#include <sstream>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
using Mutex = tbb::mutex;
using Lock = Mutex::scoped_lock;
using createMetadata = Metadata::Ptr (*)();
using MetadataFactoryMap = std::map<Name, createMetadata>;
using MetadataFactoryMapCIter = MetadataFactoryMap::const_iterator;
struct LockedMetadataTypeRegistry {
LockedMetadataTypeRegistry() {}
~LockedMetadataTypeRegistry() {}
Mutex mMutex;
MetadataFactoryMap mMap;
};
// Global function for accessing the regsitry
static LockedMetadataTypeRegistry*
getMetadataTypeRegistry()
{
static LockedMetadataTypeRegistry registry;
return ®istry;
}
bool
Metadata::isRegisteredType(const Name &typeName)
{
LockedMetadataTypeRegistry *registry = getMetadataTypeRegistry();
Lock lock(registry->mMutex);
return (registry->mMap.find(typeName) != registry->mMap.end());
}
void
Metadata::registerType(const Name &typeName, Metadata::Ptr (*createMetadata)())
{
LockedMetadataTypeRegistry *registry = getMetadataTypeRegistry();
Lock lock(registry->mMutex);
if (registry->mMap.find(typeName) != registry->mMap.end()) {
OPENVDB_THROW(KeyError,
"Cannot register " << typeName << ". Type is already registered");
}
registry->mMap[typeName] = createMetadata;
}
void
Metadata::unregisterType(const Name &typeName)
{
LockedMetadataTypeRegistry *registry = getMetadataTypeRegistry();
Lock lock(registry->mMutex);
registry->mMap.erase(typeName);
}
Metadata::Ptr
Metadata::createMetadata(const Name &typeName)
{
LockedMetadataTypeRegistry *registry = getMetadataTypeRegistry();
Lock lock(registry->mMutex);
MetadataFactoryMapCIter iter = registry->mMap.find(typeName);
if (iter == registry->mMap.end()) {
OPENVDB_THROW(LookupError,
"Cannot create metadata for unregistered type " << typeName);
}
return (iter->second)();
}
void
Metadata::clearRegistry()
{
LockedMetadataTypeRegistry *registry = getMetadataTypeRegistry();
Lock lock(registry->mMutex);
registry->mMap.clear();
}
////////////////////////////////////////
bool
Metadata::operator==(const Metadata& other) const
{
if (other.size() != this->size()) return false;
if (other.typeName() != this->typeName()) return false;
std::ostringstream
bytes(std::ios_base::binary),
otherBytes(std::ios_base::binary);
try {
this->writeValue(bytes);
other.writeValue(otherBytes);
return (bytes.str() == otherBytes.str());
} catch (Exception&) {}
return false;
}
////////////////////////////////////////
Metadata::Ptr
UnknownMetadata::copy() const
{
Metadata::Ptr metadata{new UnknownMetadata{mTypeName}};
static_cast<UnknownMetadata*>(metadata.get())->setValue(mBytes);
return metadata;
}
void
UnknownMetadata::copy(const Metadata& other)
{
std::ostringstream ostr(std::ios_base::binary);
other.write(ostr);
std::istringstream istr(ostr.str(), std::ios_base::binary);
const auto numBytes = readSize(istr);
readValue(istr, numBytes);
}
void
UnknownMetadata::readValue(std::istream& is, Index32 numBytes)
{
mBytes.clear();
if (numBytes > 0) {
ByteVec buffer(numBytes);
is.read(reinterpret_cast<char*>(&buffer[0]), numBytes);
mBytes.swap(buffer);
}
}
void
UnknownMetadata::writeValue(std::ostream& os) const
{
if (!mBytes.empty()) {
os.write(reinterpret_cast<const char*>(&mBytes[0]), mBytes.size());
}
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 3,816 | C++ | 22.417178 | 79 | 0.677411 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/MetaMap.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "MetaMap.h"
#include "util/logging.h"
#include <sstream>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
MetaMap::MetaMap(const MetaMap& other)
{
this->insertMeta(other);
}
MetaMap::Ptr
MetaMap::copyMeta() const
{
MetaMap::Ptr ret(new MetaMap);
ret->mMeta = this->mMeta;
return ret;
}
MetaMap::Ptr
MetaMap::deepCopyMeta() const
{
return MetaMap::Ptr(new MetaMap(*this));
}
MetaMap&
MetaMap::operator=(const MetaMap& other)
{
if (&other != this) {
this->clearMetadata();
// Insert all metadata into this map.
ConstMetaIterator iter = other.beginMeta();
for ( ; iter != other.endMeta(); ++iter) {
this->insertMeta(iter->first, *(iter->second));
}
}
return *this;
}
void
MetaMap::readMeta(std::istream &is)
{
// Clear out the current metamap if need be.
this->clearMetadata();
// Read in the number of metadata items.
Index32 count = 0;
is.read(reinterpret_cast<char*>(&count), sizeof(Index32));
// Read in each metadata.
for (Index32 i = 0; i < count; ++i) {
// Read in the name.
Name name = readString(is);
// Read in the metadata typename.
Name typeName = readString(is);
// Read in the metadata value and add it to the map.
if (Metadata::isRegisteredType(typeName)) {
Metadata::Ptr metadata = Metadata::createMetadata(typeName);
metadata->read(is);
insertMeta(name, *metadata);
} else {
UnknownMetadata metadata(typeName);
metadata.read(is); // read raw bytes into an array
// only add unknown metadata to the grid if not temporary,
// denoted by a double underscore prefix (such as __metadata)
bool temporary = typeName.compare(0, 2, "__") == 0;
if (!temporary) {
insertMeta(name, metadata);
}
}
}
}
void
MetaMap::writeMeta(std::ostream &os) const
{
// Write out the number of metadata items we have in the map. Note that we
// save as Index32 to save a 32-bit number. Using size_t would be platform
// dependent.
Index32 count = static_cast<Index32>(metaCount());
os.write(reinterpret_cast<char*>(&count), sizeof(Index32));
// Iterate through each metadata and write it out.
for (ConstMetaIterator iter = beginMeta(); iter != endMeta(); ++iter) {
// Write the name of the metadata.
writeString(os, iter->first);
// Write the type name of the metadata.
writeString(os, iter->second->typeName());
// Write out the metadata value.
iter->second->write(os);
}
}
void
MetaMap::insertMeta(const Name &name, const Metadata &m)
{
if (name.size() == 0)
OPENVDB_THROW(ValueError, "Metadata name cannot be an empty string");
// See if the value already exists, if so then replace the existing one.
MetaIterator iter = mMeta.find(name);
if (iter == mMeta.end()) {
// Create a copy of the metadata and store it in the map
Metadata::Ptr tmp = m.copy();
mMeta[name] = tmp;
} else {
if (iter->second->typeName() != m.typeName()) {
std::ostringstream ostr;
ostr << "Cannot assign value of type "
<< m.typeName() << " to metadata attribute " << name
<< " of " << "type " << iter->second->typeName();
OPENVDB_THROW(TypeError, ostr.str());
}
// else
Metadata::Ptr tmp = m.copy();
iter->second = tmp;
}
}
void
MetaMap::insertMeta(const MetaMap& other)
{
for (ConstMetaIterator it = other.beginMeta(), end = other.endMeta(); it != end; ++it) {
if (it->second) this->insertMeta(it->first, *it->second);
}
}
void
MetaMap::removeMeta(const Name &name)
{
MetaIterator iter = mMeta.find(name);
if (iter != mMeta.end()) {
mMeta.erase(iter);
}
}
bool
MetaMap::operator==(const MetaMap& other) const
{
// Check if the two maps have the same number of elements.
if (this->mMeta.size() != other.mMeta.size()) return false;
// Iterate over the two maps in sorted order.
for (ConstMetaIterator it = beginMeta(), otherIt = other.beginMeta(), end = endMeta();
it != end; ++it, ++otherIt)
{
// Check if the two keys match.
if (it->first != otherIt->first) return false;
// Check if the two values are either both null or both non-null pointers.
if (bool(it->second) != bool(otherIt->second)) return false;
// If the two values are both non-null, compare their contents.
if (it->second && otherIt->second && *it->second != *otherIt->second) return false;
}
return true;
}
std::string
MetaMap::str(const std::string& indent) const
{
std::ostringstream ostr;
char sep[2] = { 0, 0 };
for (ConstMetaIterator iter = beginMeta(); iter != endMeta(); ++iter) {
ostr << sep << indent << iter->first;
if (iter->second) {
const std::string value = iter->second->str();
if (!value.empty()) ostr << ": " << value;
}
sep[0] = '\n';
}
return ostr.str();
}
std::ostream&
operator<<(std::ostream& ostr, const MetaMap& metamap)
{
ostr << metamap.str();
return ostr;
}
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 5,504 | C++ | 26.118226 | 92 | 0.59484 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/version.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file openvdb/version.h
/// @brief Library and file format version numbers
///
/// @details
/// When the library is built with the latest ABI, its namespace has the form
/// <B>openvdb::vX_Y</B>, where @e X and @e Y are the major and minor version numbers.
///
/// The library can be built using an older ABI by changing the value of the
/// @b OPENVDB_ABI_VERSION_NUMBER macro (e.g., via <TT>-DOPENVDB_ABI_VERSION_NUMBER=<I>N</I></TT>).
/// In that case, the namespace has the form <B>openvdb::vX_YabiN</B>,
/// where N is the ABI version number.
/// The ABI version must be set consistently when building code that depends on OpenVDB.
///
/// The ABI version number defaults to the library major version number,
/// which gets incremented whenever changes are made to the ABI of the
/// Grid class or related classes (Tree, Transform, Metadata, etc.).
/// Setting the ABI version number to an earlier library version number
/// disables grid ABI changes made since that library version.
/// The OpenVDB 1.x ABI is no longer supported, and support for other old ABIs
/// might also eventually be dropped.
///
/// The library minor version number gets incremented whenever a change is made
/// to any aspect of the public API (not just the grid API) that necessitates
/// changes to client code. Changes to APIs in private or internal namespaces
/// do not trigger a minor version number increment; such APIs should not be used
/// in client code.
///
/// A patch version number increment indicates a change—usually a new feature
/// or a bug fix—that does not necessitate changes to client code but rather
/// only recompilation of that code (because the library namespace incorporates
/// the version number).
///
/// The file format version number gets incremented when it becomes possible
/// to write files that cannot safely be read with older versions of the library.
/// Not all files written in a newer format are incompatible with older libraries, however.
/// And in general, files containing grids of unknown type can be read safely,
/// although the unknown grids will not be accessible.
#ifndef OPENVDB_VERSION_HAS_BEEN_INCLUDED
#define OPENVDB_VERSION_HAS_BEEN_INCLUDED
#include "Platform.h"
#include <cstddef> // size_t
#include <cstdint> // uint32_t
// Library major, minor and patch version numbers
#define OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER 8
#define OPENVDB_LIBRARY_MINOR_VERSION_NUMBER 0
#define OPENVDB_LIBRARY_PATCH_VERSION_NUMBER 1
// If OPENVDB_ABI_VERSION_NUMBER is already defined (e.g., via -DOPENVDB_ABI_VERSION_NUMBER=N)
// use that ABI version. Otherwise, use this library version's default ABI.
#ifdef OPENVDB_ABI_VERSION_NUMBER
#if OPENVDB_ABI_VERSION_NUMBER > OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER
#error expected OPENVDB_ABI_VERSION_NUMBER <= OPENVDB_LIBRARY_MAJOR VERSION_NUMBER
#endif
#else
#define OPENVDB_ABI_VERSION_NUMBER OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER
#endif
// If using an OPENVDB_ABI_VERSION_NUMBER that has been deprecated, issue a message
// directive. Note that an error is also set in openvdb.cc which enforces stricter
// behavior during compilation of the library. Both can be optionally suppressed
// by defining OPENVDB_USE_DEPRECATED_ABI_<VERSION>.
#ifndef OPENVDB_USE_DEPRECATED_ABI_5
#if OPENVDB_ABI_VERSION_NUMBER == 5
PRAGMA(message("NOTE: ABI = 5 is deprecated, CMake option OPENVDB_USE_DEPRECATED_ABI_5 "
"suppresses this message"))
#endif
#endif
#ifndef OPENVDB_USE_DEPRECATED_ABI_6
#if OPENVDB_ABI_VERSION_NUMBER == 6
PRAGMA(message("NOTE: ABI = 6 is deprecated, CMake option OPENVDB_USE_DEPRECATED_ABI_6 "
"suppresses this message"))
#endif
#endif
#if OPENVDB_ABI_VERSION_NUMBER == OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER
/// @brief The version namespace name for this library version
/// @hideinitializer
///
/// When the ABI version number matches the library major version number,
/// symbols are named as in the following examples:
/// - @b openvdb::vX_Y::Vec3i
/// - @b openvdb::vX_Y::io::File
/// - @b openvdb::vX_Y::tree::Tree
///
/// where X and Y are the major and minor version numbers.
///
/// When the ABI version number does not match the library major version number,
/// symbol names include the ABI version:
/// - @b openvdb::vX_YabiN::Vec3i
/// - @b openvdb::vX_YabiN::io::File
/// - @b openvdb::vX_YabiN::tree::Tree
///
/// where X, Y and N are the major, minor and ABI version numbers, respectively.
#define OPENVDB_VERSION_NAME \
OPENVDB_PREPROC_CONCAT(v, \
OPENVDB_PREPROC_CONCAT(OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER, \
OPENVDB_PREPROC_CONCAT(_, OPENVDB_LIBRARY_MINOR_VERSION_NUMBER)))
#else
// This duplication of code is necessary to avoid issues with recursive macro expansion.
#define OPENVDB_VERSION_NAME \
OPENVDB_PREPROC_CONCAT(v, \
OPENVDB_PREPROC_CONCAT(OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER, \
OPENVDB_PREPROC_CONCAT(_, \
OPENVDB_PREPROC_CONCAT(OPENVDB_LIBRARY_MINOR_VERSION_NUMBER, \
OPENVDB_PREPROC_CONCAT(abi, OPENVDB_ABI_VERSION_NUMBER)))))
#endif
/// @brief Library version number string of the form "<major>.<minor>.<patch>"
/// @details This is a macro rather than a static constant because we typically
/// want the compile-time version number, not the runtime version number
/// (although the two are usually the same).
/// @hideinitializer
#define OPENVDB_LIBRARY_VERSION_STRING \
OPENVDB_PREPROC_STRINGIFY(OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER) "." \
OPENVDB_PREPROC_STRINGIFY(OPENVDB_LIBRARY_MINOR_VERSION_NUMBER) "." \
OPENVDB_PREPROC_STRINGIFY(OPENVDB_LIBRARY_PATCH_VERSION_NUMBER)
/// @brief Library version number string of the form "<major>.<minor>.<patch>abi<abi>"
/// @details This is a macro rather than a static constant because we typically
/// want the compile-time version number, not the runtime version number
/// (although the two are usually the same).
/// @hideinitializer
#define OPENVDB_LIBRARY_ABI_VERSION_STRING \
OPENVDB_LIBRARY_VERSION_STRING "abi" OPENVDB_PREPROC_STRINGIFY(OPENVDB_ABI_VERSION_NUMBER)
/// Library version number as a packed integer ("%02x%02x%04x", major, minor, patch)
#define OPENVDB_LIBRARY_VERSION_NUMBER \
((OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER << 24) | \
((OPENVDB_LIBRARY_MINOR_VERSION_NUMBER & 0xFF) << 16) | \
(OPENVDB_LIBRARY_PATCH_VERSION_NUMBER & 0xFFFF))
/// By default, the @b OPENVDB_REQUIRE_VERSION_NAME macro is undefined, and
/// symbols from the version namespace are promoted to the top-level namespace
/// so that, for example, @b openvdb::v5_0::io::File can be referred to
/// simply as @b openvdb::io::File.
///
/// When @b OPENVDB_REQUIRE_VERSION_NAME is defined, symbols must be
/// fully namespace-qualified.
/// @hideinitializer
#ifdef OPENVDB_REQUIRE_VERSION_NAME
#define OPENVDB_USE_VERSION_NAMESPACE
#else
// The empty namespace clause below ensures that OPENVDB_VERSION_NAME
// is recognized as a namespace name.
#define OPENVDB_USE_VERSION_NAMESPACE \
namespace OPENVDB_VERSION_NAME {} \
using namespace OPENVDB_VERSION_NAME;
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// @brief The magic number is stored in the first four bytes of every VDB file.
/// @details This can be used to quickly test whether we have a valid file or not.
const int32_t OPENVDB_MAGIC = 0x56444220;
// Library major, minor and patch version numbers
const uint32_t
OPENVDB_LIBRARY_MAJOR_VERSION = OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER,
OPENVDB_LIBRARY_MINOR_VERSION = OPENVDB_LIBRARY_MINOR_VERSION_NUMBER,
OPENVDB_LIBRARY_PATCH_VERSION = OPENVDB_LIBRARY_PATCH_VERSION_NUMBER;
/// Library version number as a packed integer ("%02x%02x%04x", major, minor, patch)
const uint32_t OPENVDB_LIBRARY_VERSION = OPENVDB_LIBRARY_VERSION_NUMBER;
// ABI version number
const uint32_t OPENVDB_ABI_VERSION = OPENVDB_ABI_VERSION_NUMBER;
/// @brief The current version number of the VDB file format
/// @details This can be used to enable various backwards compatibility switches
/// or to reject files that cannot be read.
const uint32_t OPENVDB_FILE_VERSION = 224;
/// Notable file format version numbers
enum {
OPENVDB_FILE_VERSION_ROOTNODE_MAP = 213,
OPENVDB_FILE_VERSION_INTERNALNODE_COMPRESSION = 214,
OPENVDB_FILE_VERSION_SIMPLIFIED_GRID_TYPENAME = 215,
OPENVDB_FILE_VERSION_GRID_INSTANCING = 216,
OPENVDB_FILE_VERSION_BOOL_LEAF_OPTIMIZATION = 217,
OPENVDB_FILE_VERSION_BOOST_UUID = 218,
OPENVDB_FILE_VERSION_NO_GRIDMAP = 219,
OPENVDB_FILE_VERSION_NEW_TRANSFORM = 219,
OPENVDB_FILE_VERSION_SELECTIVE_COMPRESSION = 220,
OPENVDB_FILE_VERSION_FLOAT_FRUSTUM_BBOX = 221,
OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION = 222,
OPENVDB_FILE_VERSION_BLOSC_COMPRESSION = 223,
OPENVDB_FILE_VERSION_POINT_INDEX_GRID = 223,
OPENVDB_FILE_VERSION_MULTIPASS_IO = 224
};
/// Return a library version number string of the form "<major>.<minor>.<patch>".
inline constexpr const char* getLibraryVersionString() { return OPENVDB_LIBRARY_VERSION_STRING; }
/// Return a library version number string of the form "<major>.<minor>.<patch>abi<abi>".
inline constexpr const char* getLibraryAbiVersionString() {
return OPENVDB_LIBRARY_ABI_VERSION_STRING;
}
struct VersionId {
uint32_t first, second;
VersionId(): first(0), second(0) {}
VersionId(uint32_t major, uint32_t minor): first(major), second(minor) {}
};
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_VERSION_HAS_BEEN_INCLUDED
| 9,981 | C | 44.579908 | 99 | 0.712654 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Vec3.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_VEC3_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_VEC3_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include "Math.h"
#include "Tuple.h"
#include <algorithm>
#include <cmath>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Mat3;
template<typename T>
class Vec3: public Tuple<3, T>
{
public:
using value_type = T;
using ValueType = T;
/// Trivial constructor, the vector is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Vec3() = default;
#else
Vec3() {}
#endif
/// @brief Construct a vector all of whose components have the given value.
explicit Vec3(T val) { this->mm[0] = this->mm[1] = this->mm[2] = val; }
/// Constructor with three arguments, e.g. Vec3d v(1,2,3);
Vec3(T x, T y, T z)
{
this->mm[0] = x;
this->mm[1] = y;
this->mm[2] = z;
}
/// Constructor with array argument, e.g. double a[3]; Vec3d v(a);
template <typename Source>
Vec3(Source *a)
{
this->mm[0] = static_cast<T>(a[0]);
this->mm[1] = static_cast<T>(a[1]);
this->mm[2] = static_cast<T>(a[2]);
}
/// @brief Construct a Vec3 from a 3-Tuple with a possibly different value type.
/// @details Type conversion warnings are suppressed.
template<typename Source>
explicit Vec3(const Tuple<3, Source> &v)
{
this->mm[0] = static_cast<T>(v[0]);
this->mm[1] = static_cast<T>(v[1]);
this->mm[2] = static_cast<T>(v[2]);
}
/// @brief Construct a vector all of whose components have the given value,
/// which may be of an arithmetic type different from this vector's value type.
/// @details Type conversion warnings are suppressed.
template<typename Other>
explicit Vec3(Other val,
typename std::enable_if<std::is_arithmetic<Other>::value, Conversion>::type = Conversion{})
{
this->mm[0] = this->mm[1] = this->mm[2] = static_cast<T>(val);
}
/// @brief Construct a Vec3 from another Vec3 with a possibly different value type.
/// @details Type conversion warnings are suppressed.
template<typename Other>
Vec3(const Vec3<Other>& v)
{
this->mm[0] = static_cast<T>(v[0]);
this->mm[1] = static_cast<T>(v[1]);
this->mm[2] = static_cast<T>(v[2]);
}
/// Reference to the component, e.g. v.x() = 4.5f;
T& x() { return this->mm[0]; }
T& y() { return this->mm[1]; }
T& z() { return this->mm[2]; }
/// Get the component, e.g. float f = v.y();
T x() const { return this->mm[0]; }
T y() const { return this->mm[1]; }
T z() const { return this->mm[2]; }
T* asPointer() { return this->mm; }
const T* asPointer() const { return this->mm; }
/// Alternative indexed reference to the elements
T& operator()(int i) { return this->mm[i]; }
/// Alternative indexed constant reference to the elements,
T operator()(int i) const { return this->mm[i]; }
/// "this" vector gets initialized to [x, y, z],
/// calling v.init(); has same effect as calling v = Vec3::zero();
const Vec3<T>& init(T x=0, T y=0, T z=0)
{
this->mm[0] = x; this->mm[1] = y; this->mm[2] = z;
return *this;
}
/// Set "this" vector to zero
const Vec3<T>& setZero()
{
this->mm[0] = 0; this->mm[1] = 0; this->mm[2] = 0;
return *this;
}
/// @brief Assignment operator
/// @details Type conversion warnings are not suppressed.
template<typename Source>
const Vec3<T>& operator=(const Vec3<Source> &v)
{
// note: don't static_cast because that suppresses warnings
this->mm[0] = v[0];
this->mm[1] = v[1];
this->mm[2] = v[2];
return *this;
}
/// Test if "this" vector is equivalent to vector v with tolerance of eps
bool eq(const Vec3<T> &v, T eps = static_cast<T>(1.0e-7)) const
{
return isRelOrApproxEqual(this->mm[0], v.mm[0], eps, eps) &&
isRelOrApproxEqual(this->mm[1], v.mm[1], eps, eps) &&
isRelOrApproxEqual(this->mm[2], v.mm[2], eps, eps);
}
/// Negation operator, for e.g. v1 = -v2;
Vec3<T> operator-() const { return Vec3<T>(-this->mm[0], -this->mm[1], -this->mm[2]); }
/// this = v1 + v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.add(v1,v);
template <typename T0, typename T1>
const Vec3<T>& add(const Vec3<T0> &v1, const Vec3<T1> &v2)
{
this->mm[0] = v1[0] + v2[0];
this->mm[1] = v1[1] + v2[1];
this->mm[2] = v1[2] + v2[2];
return *this;
}
/// this = v1 - v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.sub(v1,v);
template <typename T0, typename T1>
const Vec3<T>& sub(const Vec3<T0> &v1, const Vec3<T1> &v2)
{
this->mm[0] = v1[0] - v2[0];
this->mm[1] = v1[1] - v2[1];
this->mm[2] = v1[2] - v2[2];
return *this;
}
/// this = scalar*v, v need not be a distinct object from "this",
/// e.g. v.scale(1.5,v1);
template <typename T0, typename T1>
const Vec3<T>& scale(T0 scale, const Vec3<T1> &v)
{
this->mm[0] = scale * v[0];
this->mm[1] = scale * v[1];
this->mm[2] = scale * v[2];
return *this;
}
template <typename T0, typename T1>
const Vec3<T> &div(T0 scale, const Vec3<T1> &v)
{
this->mm[0] = v[0] / scale;
this->mm[1] = v[1] / scale;
this->mm[2] = v[2] / scale;
return *this;
}
/// Dot product
T dot(const Vec3<T> &v) const
{
return
this->mm[0]*v.mm[0] +
this->mm[1]*v.mm[1] +
this->mm[2]*v.mm[2];
}
/// Length of the vector
T length() const
{
return static_cast<T>(sqrt(double(
this->mm[0]*this->mm[0] +
this->mm[1]*this->mm[1] +
this->mm[2]*this->mm[2])));
}
/// Squared length of the vector, much faster than length() as it
/// does not involve square root
T lengthSqr() const
{
return
this->mm[0]*this->mm[0] +
this->mm[1]*this->mm[1] +
this->mm[2]*this->mm[2];
}
/// Return the cross product of "this" vector and v;
Vec3<T> cross(const Vec3<T> &v) const
{
return Vec3<T>(this->mm[1]*v.mm[2] - this->mm[2]*v.mm[1],
this->mm[2]*v.mm[0] - this->mm[0]*v.mm[2],
this->mm[0]*v.mm[1] - this->mm[1]*v.mm[0]);
}
/// this = v1 cross v2, v1 and v2 must be distinct objects than "this"
const Vec3<T>& cross(const Vec3<T> &v1, const Vec3<T> &v2)
{
// assert(this!=&v1);
// assert(this!=&v2);
this->mm[0] = v1.mm[1]*v2.mm[2] - v1.mm[2]*v2.mm[1];
this->mm[1] = v1.mm[2]*v2.mm[0] - v1.mm[0]*v2.mm[2];
this->mm[2] = v1.mm[0]*v2.mm[1] - v1.mm[1]*v2.mm[0];
return *this;
}
/// Multiply each element of this vector by @a scalar.
template <typename S>
const Vec3<T> &operator*=(S scalar)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto value0 = this->mm[0] * scalar;
const auto value1 = this->mm[1] * scalar;
const auto value2 = this->mm[2] * scalar;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
this->mm[0] = static_cast<T>(value0);
this->mm[1] = static_cast<T>(value1);
this->mm[2] = static_cast<T>(value2);
return *this;
}
/// Multiply each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec3<T> &operator*=(const Vec3<S> &v1)
{
this->mm[0] *= v1[0];
this->mm[1] *= v1[1];
this->mm[2] *= v1[2];
return *this;
}
/// Divide each element of this vector by @a scalar.
template <typename S>
const Vec3<T> &operator/=(S scalar)
{
this->mm[0] /= scalar;
this->mm[1] /= scalar;
this->mm[2] /= scalar;
return *this;
}
/// Divide each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec3<T> &operator/=(const Vec3<S> &v1)
{
this->mm[0] /= v1[0];
this->mm[1] /= v1[1];
this->mm[2] /= v1[2];
return *this;
}
/// Add @a scalar to each element of this vector.
template <typename S>
const Vec3<T> &operator+=(S scalar)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto value0 = this->mm[0] + scalar;
const auto value1 = this->mm[1] + scalar;
const auto value2 = this->mm[2] + scalar;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
this->mm[0] = static_cast<T>(value0);
this->mm[1] = static_cast<T>(value1);
this->mm[2] = static_cast<T>(value2);
return *this;
}
/// Add each element of the given vector to the corresponding element of this vector.
template <typename S>
const Vec3<T> &operator+=(const Vec3<S> &v1)
{
this->mm[0] += v1[0];
this->mm[1] += v1[1];
this->mm[2] += v1[2];
return *this;
}
/// Subtract @a scalar from each element of this vector.
template <typename S>
const Vec3<T> &operator-=(S scalar)
{
this->mm[0] -= scalar;
this->mm[1] -= scalar;
this->mm[2] -= scalar;
return *this;
}
/// Subtract each element of the given vector from the corresponding element of this vector.
template <typename S>
const Vec3<T> &operator-=(const Vec3<S> &v1)
{
this->mm[0] -= v1[0];
this->mm[1] -= v1[1];
this->mm[2] -= v1[2];
return *this;
}
/// Return a reference to itself after the exponent has been
/// applied to all the vector components.
inline const Vec3<T>& exp()
{
this->mm[0] = std::exp(this->mm[0]);
this->mm[1] = std::exp(this->mm[1]);
this->mm[2] = std::exp(this->mm[2]);
return *this;
}
/// Return a reference to itself after log has been
/// applied to all the vector components.
inline const Vec3<T>& log()
{
this->mm[0] = std::log(this->mm[0]);
this->mm[1] = std::log(this->mm[1]);
this->mm[2] = std::log(this->mm[2]);
return *this;
}
/// Return the sum of all the vector components.
inline T sum() const
{
return this->mm[0] + this->mm[1] + this->mm[2];
}
/// Return the product of all the vector components.
inline T product() const
{
return this->mm[0] * this->mm[1] * this->mm[2];
}
/// this = normalized this
bool normalize(T eps = T(1.0e-7))
{
T d = length();
if (isApproxEqual(d, T(0), eps)) {
return false;
}
*this *= (T(1) / d);
return true;
}
/// return normalized this, throws if null vector
Vec3<T> unit(T eps=0) const
{
T d;
return unit(eps, d);
}
/// return normalized this and length, throws if null vector
Vec3<T> unit(T eps, T& len) const
{
len = length();
if (isApproxEqual(len, T(0), eps)) {
OPENVDB_THROW(ArithmeticError, "Normalizing null 3-vector");
}
return *this / len;
}
/// return normalized this, or (1, 0, 0) if this is null vector
Vec3<T> unitSafe() const
{
T l2 = lengthSqr();
return l2 ? *this / static_cast<T>(sqrt(l2)) : Vec3<T>(1, 0 ,0);
}
// Number of cols, rows, elements
static unsigned numRows() { return 1; }
static unsigned numColumns() { return 3; }
static unsigned numElements() { return 3; }
/// Returns the scalar component of v in the direction of onto, onto need
/// not be unit. e.g double c = Vec3d::component(v1,v2);
T component(const Vec3<T> &onto, T eps = static_cast<T>(1.0e-7)) const
{
T l = onto.length();
if (isApproxEqual(l, T(0), eps)) return 0;
return dot(onto)*(T(1)/l);
}
/// Return the projection of v onto the vector, onto need not be unit
/// e.g. Vec3d a = vprojection(n);
Vec3<T> projection(const Vec3<T> &onto, T eps = static_cast<T>(1.0e-7)) const
{
T l = onto.lengthSqr();
if (isApproxEqual(l, T(0), eps)) return Vec3::zero();
return onto*(dot(onto)*(T(1)/l));
}
/// Return an arbitrary unit vector perpendicular to v
/// Vector this must be a unit vector
/// e.g. v = v.normalize(); Vec3d n = v.getArbPerpendicular();
Vec3<T> getArbPerpendicular() const
{
Vec3<T> u;
T l;
if ( fabs(this->mm[0]) >= fabs(this->mm[1]) ) {
// v.x or v.z is the largest magnitude component, swap them
l = this->mm[0]*this->mm[0] + this->mm[2]*this->mm[2];
l = static_cast<T>(T(1)/sqrt(double(l)));
u.mm[0] = -this->mm[2]*l;
u.mm[1] = T(0);
u.mm[2] = +this->mm[0]*l;
} else {
// W.y or W.z is the largest magnitude component, swap them
l = this->mm[1]*this->mm[1] + this->mm[2]*this->mm[2];
l = static_cast<T>(T(1)/sqrt(double(l)));
u.mm[0] = T(0);
u.mm[1] = +this->mm[2]*l;
u.mm[2] = -this->mm[1]*l;
}
return u;
}
/// Return a vector with the components of this in ascending order
Vec3<T> sorted() const
{
Vec3<T> r(*this);
if( r.mm[0] > r.mm[1] ) std::swap(r.mm[0], r.mm[1]);
if( r.mm[1] > r.mm[2] ) std::swap(r.mm[1], r.mm[2]);
if( r.mm[0] > r.mm[1] ) std::swap(r.mm[0], r.mm[1]);
return r;
}
/// Return the vector (z, y, x)
Vec3<T> reversed() const
{
return Vec3<T>(this->mm[2], this->mm[1], this->mm[0]);
}
/// Predefined constants, e.g. Vec3d v = Vec3d::xNegAxis();
static Vec3<T> zero() { return Vec3<T>(0, 0, 0); }
static Vec3<T> ones() { return Vec3<T>(1, 1, 1); }
};
/// Equality operator, does exact floating point comparisons
template <typename T0, typename T1>
inline bool operator==(const Vec3<T0> &v0, const Vec3<T1> &v1)
{
return isExactlyEqual(v0[0], v1[0]) && isExactlyEqual(v0[1], v1[1])
&& isExactlyEqual(v0[2], v1[2]);
}
/// Inequality operator, does exact floating point comparisons
template <typename T0, typename T1>
inline bool operator!=(const Vec3<T0> &v0, const Vec3<T1> &v1) { return !(v0==v1); }
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator*(S scalar, const Vec3<T> &v) { return v*scalar; }
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator*(const Vec3<T> &v, S scalar)
{
Vec3<typename promote<S, T>::type> result(v);
result *= scalar;
return result;
}
/// Multiply corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec3<typename promote<T0, T1>::type> operator*(const Vec3<T0> &v0, const Vec3<T1> &v1)
{
Vec3<typename promote<T0, T1>::type> result(v0[0] * v1[0], v0[1] * v1[1], v0[2] * v1[2]);
return result;
}
/// Divide @a scalar by each element of the given vector and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator/(S scalar, const Vec3<T> &v)
{
return Vec3<typename promote<S, T>::type>(scalar/v[0], scalar/v[1], scalar/v[2]);
}
/// Divide each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator/(const Vec3<T> &v, S scalar)
{
Vec3<typename promote<S, T>::type> result(v);
result /= scalar;
return result;
}
/// Divide corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec3<typename promote<T0, T1>::type> operator/(const Vec3<T0> &v0, const Vec3<T1> &v1)
{
Vec3<typename promote<T0, T1>::type> result(v0[0] / v1[0], v0[1] / v1[1], v0[2] / v1[2]);
return result;
}
/// Add corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec3<typename promote<T0, T1>::type> operator+(const Vec3<T0> &v0, const Vec3<T1> &v1)
{
Vec3<typename promote<T0, T1>::type> result(v0);
result += v1;
return result;
}
/// Add @a scalar to each element of the given vector and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator+(const Vec3<T> &v, S scalar)
{
Vec3<typename promote<S, T>::type> result(v);
result += scalar;
return result;
}
/// Subtract corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec3<typename promote<T0, T1>::type> operator-(const Vec3<T0> &v0, const Vec3<T1> &v1)
{
Vec3<typename promote<T0, T1>::type> result(v0);
result -= v1;
return result;
}
/// Subtract @a scalar from each element of the given vector and return the result.
template <typename S, typename T>
inline Vec3<typename promote<S, T>::type> operator-(const Vec3<T> &v, S scalar)
{
Vec3<typename promote<S, T>::type> result(v);
result -= scalar;
return result;
}
/// Angle between two vectors, the result is between [0, pi],
/// e.g. double a = Vec3d::angle(v1,v2);
template <typename T>
inline T angle(const Vec3<T> &v1, const Vec3<T> &v2)
{
Vec3<T> c = v1.cross(v2);
return static_cast<T>(atan2(c.length(), v1.dot(v2)));
}
template <typename T>
inline bool
isApproxEqual(const Vec3<T>& a, const Vec3<T>& b)
{
return a.eq(b);
}
template <typename T>
inline bool
isApproxEqual(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& eps)
{
return isApproxEqual(a.x(), b.x(), eps.x()) &&
isApproxEqual(a.y(), b.y(), eps.y()) &&
isApproxEqual(a.z(), b.z(), eps.z());
}
template<typename T>
inline Vec3<T>
Abs(const Vec3<T>& v)
{
return Vec3<T>(Abs(v[0]), Abs(v[1]), Abs(v[2]));
}
/// Orthonormalize vectors v1, v2 and v3 and store back the resulting
/// basis e.g. Vec3d::orthonormalize(v1,v2,v3);
template <typename T>
inline void orthonormalize(Vec3<T> &v1, Vec3<T> &v2, Vec3<T> &v3)
{
// If the input vectors are v0, v1, and v2, then the Gram-Schmidt
// orthonormalization produces vectors u0, u1, and u2 as follows,
//
// u0 = v0/|v0|
// u1 = (v1-(u0*v1)u0)/|v1-(u0*v1)u0|
// u2 = (v2-(u0*v2)u0-(u1*v2)u1)/|v2-(u0*v2)u0-(u1*v2)u1|
//
// where |A| indicates length of vector A and A*B indicates dot
// product of vectors A and B.
// compute u0
v1.normalize();
// compute u1
T d0 = v1.dot(v2);
v2 -= v1*d0;
v2.normalize();
// compute u2
T d1 = v2.dot(v3);
d0 = v1.dot(v3);
v3 -= v1*d0 + v2*d1;
v3.normalize();
}
/// @remark We are switching to a more explicit name because the semantics
/// are different from std::min/max. In that case, the function returns a
/// reference to one of the objects based on a comparator. Here, we must
/// fabricate a new object which might not match either of the inputs.
/// Return component-wise minimum of the two vectors.
template <typename T>
inline Vec3<T> minComponent(const Vec3<T> &v1, const Vec3<T> &v2)
{
return Vec3<T>(
std::min(v1.x(), v2.x()),
std::min(v1.y(), v2.y()),
std::min(v1.z(), v2.z()));
}
/// Return component-wise maximum of the two vectors.
template <typename T>
inline Vec3<T> maxComponent(const Vec3<T> &v1, const Vec3<T> &v2)
{
return Vec3<T>(
std::max(v1.x(), v2.x()),
std::max(v1.y(), v2.y()),
std::max(v1.z(), v2.z()));
}
/// @brief Return a vector with the exponent applied to each of
/// the components of the input vector.
template <typename T>
inline Vec3<T> Exp(Vec3<T> v) { return v.exp(); }
/// @brief Return a vector with log applied to each of
/// the components of the input vector.
template <typename T>
inline Vec3<T> Log(Vec3<T> v) { return v.log(); }
using Vec3i = Vec3<int32_t>;
using Vec3ui = Vec3<uint32_t>;
using Vec3s = Vec3<float>;
using Vec3d = Vec3<double>;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Vec3i)
OPENVDB_IS_POD(Vec3ui)
OPENVDB_IS_POD(Vec3s)
OPENVDB_IS_POD(Vec3d)
#endif
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_VEC3_HAS_BEEN_INCLUDED
| 20,831 | C | 29.545455 | 100 | 0.57424 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Tuple.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Tuple.h
/// @author Ben Kwa
#ifndef OPENVDB_MATH_TUPLE_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_TUPLE_HAS_BEEN_INCLUDED
#include "Math.h"
#include <cmath>
#include <sstream>
#include <string>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Dummy class for tag dispatch of conversion constructors
struct Conversion {};
/// @class Tuple "Tuple.h"
/// A base class for homogenous tuple types
template<int SIZE, typename T>
class Tuple
{
public:
using value_type = T;
using ValueType = T;
static const int size = SIZE;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// Trivial constructor, the Tuple is NOT initialized
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Tuple() = default;
#else
/// @brief Default ctor. Does nothing.
/// @details This is required because declaring a copy (or other) constructor
/// prevents the compiler from synthesizing a default constructor.
Tuple() {}
/// Copy constructor. Used when the class signature matches exactly.
Tuple(Tuple const& src) {
for (int i = 0; i < SIZE; ++i) {
mm[i] = src.mm[i];
}
}
/// @brief Assignment operator
/// @details This is required because declaring a copy (or other) constructor
/// prevents the compiler from synthesizing a default assignment operator.
Tuple& operator=(Tuple const& src) {
if (&src != this) {
for (int i = 0; i < SIZE; ++i) {
mm[i] = src.mm[i];
}
}
return *this;
}
#endif
/// @brief Conversion constructor.
/// @details Tuples with different value types and different sizes can be
/// interconverted using this member. Converting from a larger tuple
/// results in truncation; converting from a smaller tuple results in
/// the extra data members being zeroed out. This function assumes that
/// the integer 0 is convertible to the tuple's value type.
template <int src_size, typename src_valtype>
explicit Tuple(Tuple<src_size, src_valtype> const &src) {
enum { COPY_END = (SIZE < src_size ? SIZE : src_size) };
for (int i = 0; i < COPY_END; ++i) {
mm[i] = src[i];
}
for (int i = COPY_END; i < SIZE; ++i) {
mm[i] = 0;
}
}
T operator[](int i) const {
// we'd prefer to use size_t, but can't because gcc3.2 doesn't like
// it - it conflicts with child class conversion operators to
// pointer types.
// assert(i >= 0 && i < SIZE);
return mm[i];
}
T& operator[](int i) {
// see above for size_t vs int
// assert(i >= 0 && i < SIZE);
return mm[i];
}
/// @name Compatibility
/// These are mostly for backwards compability with functions that take
/// old-style Vs (which are just arrays).
//@{
/// Copies this tuple into an array of a compatible type
template <typename S>
void toV(S *v) const {
for (int i = 0; i < SIZE; ++i) {
v[i] = mm[i];
}
}
/// Exposes the internal array. Be careful when using this function.
value_type *asV() {
return mm;
}
/// Exposes the internal array. Be careful when using this function.
value_type const *asV() const {
return mm;
}
//@} Compatibility
/// @return string representation of Classname
std::string str() const {
std::ostringstream buffer;
buffer << "[";
// For each column
for (unsigned j(0); j < SIZE; j++) {
if (j) buffer << ", ";
buffer << PrintCast(mm[j]);
}
buffer << "]";
return buffer.str();
}
void write(std::ostream& os) const {
os.write(reinterpret_cast<const char*>(&mm), sizeof(T)*SIZE);
}
void read(std::istream& is) {
is.read(reinterpret_cast<char*>(&mm), sizeof(T)*SIZE);
}
/// True if a Nan is present in this tuple
bool isNan() const {
for (int i = 0; i < SIZE; ++i) {
if (math::isNan(mm[i])) return true;
}
return false;
}
/// True if an Inf is present in this tuple
bool isInfinite() const {
for (int i = 0; i < SIZE; ++i) {
if (math::isInfinite(mm[i])) return true;
}
return false;
}
/// True if no Nan or Inf values are present
bool isFinite() const {
for (int i = 0; i < SIZE; ++i) {
if (!math::isFinite(mm[i])) return false;
}
return true;
}
/// True if all elements are exactly zero
bool isZero() const {
for (int i = 0; i < SIZE; ++i) {
if (!math::isZero(mm[i])) return false;
}
return true;
}
protected:
T mm[SIZE];
};
////////////////////////////////////////
/// @return true if t0 < t1, comparing components in order of significance.
template<int SIZE, typename T0, typename T1>
bool
operator<(const Tuple<SIZE, T0>& t0, const Tuple<SIZE, T1>& t1)
{
for (int i = 0; i < SIZE-1; ++i) {
if (!isExactlyEqual(t0[i], t1[i])) return t0[i] < t1[i];
}
return t0[SIZE-1] < t1[SIZE-1];
}
/// @return true if t0 > t1, comparing components in order of significance.
template<int SIZE, typename T0, typename T1>
bool
operator>(const Tuple<SIZE, T0>& t0, const Tuple<SIZE, T1>& t1)
{
for (int i = 0; i < SIZE-1; ++i) {
if (!isExactlyEqual(t0[i], t1[i])) return t0[i] > t1[i];
}
return t0[SIZE-1] > t1[SIZE-1];
}
////////////////////////////////////////
/// @return the absolute value of the given Tuple.
template<int SIZE, typename T>
Tuple<SIZE, T>
Abs(const Tuple<SIZE, T>& t)
{
Tuple<SIZE, T> result;
for (int i = 0; i < SIZE; ++i) result[i] = math::Abs(t[i]);
return result;
}
/// Return @c true if a Nan is present in the tuple.
template<int SIZE, typename T>
inline bool isNan(const Tuple<SIZE, T>& t) { return t.isNan(); }
/// Return @c true if an Inf is present in the tuple.
template<int SIZE, typename T>
inline bool isInfinite(const Tuple<SIZE, T>& t) { return t.isInfinite(); }
/// Return @c true if no Nan or Inf values are present.
template<int SIZE, typename T>
inline bool isFinite(const Tuple<SIZE, T>& t) { return t.isFinite(); }
/// Return @c true if all elements are exactly equal to zero.
template<int SIZE, typename T>
inline bool isZero(const Tuple<SIZE, T>& t) { return t.isZero(); }
////////////////////////////////////////
/// Write a Tuple to an output stream
template <int SIZE, typename T>
std::ostream& operator<<(std::ostream& ostr, const Tuple<SIZE, T>& classname)
{
ostr << classname.str();
return ostr;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_TUPLE_HAS_BEEN_INCLUDED
| 7,054 | C | 26.666667 | 81 | 0.584349 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/BBox.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_BBOX_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_BBOX_HAS_BEEN_INCLUDED
#include "Math.h" // for math::isApproxEqual() and math::Tolerance()
#include "Vec3.h"
#include <algorithm> // for std::min(), std::max()
#include <cmath> // for std::abs()
#include <iostream>
#include <limits>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Axis-aligned bounding box
template<typename Vec3T>
class BBox
{
public:
using Vec3Type = Vec3T;
using ValueType = Vec3T;
using VectorType = Vec3T;
using ElementType = typename Vec3Type::ValueType;
/// @brief The default constructor creates an invalid bounding box.
BBox();
/// @brief Construct a bounding box that exactly encloses the given
/// minimum and maximum points.
BBox(const Vec3T& xyzMin, const Vec3T& xyzMax);
/// @brief Construct a bounding box that exactly encloses the given
/// minimum and maximum points.
/// @details If @a sorted is false, sort the points by their
/// @e x, @e y and @e z components.
BBox(const Vec3T& xyzMin, const Vec3T& xyzMax, bool sorted);
/// @brief Contruct a cubical bounding box from a minimum coordinate
/// and an edge length.
/// @note Inclusive for integral <b>ElementType</b>s
BBox(const Vec3T& xyzMin, const ElementType& length);
/// @brief Construct a bounding box that exactly encloses two points,
/// whose coordinates are given by an array of six values,
/// <i>x<sub>1</sub></i>, <i>y<sub>1</sub></i>, <i>z<sub>1</sub></i>,
/// <i>x<sub>2</sub></i>, <i>y<sub>2</sub></i> and <i>z<sub>2</sub></i>.
/// @details If @a sorted is false, sort the points by their
/// @e x, @e y and @e z components.
explicit BBox(const ElementType* xyz, bool sorted = true);
BBox(const BBox&) = default;
BBox& operator=(const BBox&) = default;
/// @brief Sort the mininum and maximum points of this bounding box
/// by their @e x, @e y and @e z components.
void sort();
/// @brief Return a const reference to the minimum point of this bounding box.
const Vec3T& min() const { return mMin; }
/// @brief Return a const reference to the maximum point of this bounding box.
const Vec3T& max() const { return mMax; }
/// @brief Return a non-const reference to the minimum point of this bounding box.
Vec3T& min() { return mMin; }
/// @brief Return a non-const reference to the maximum point of this bounding box.
Vec3T& max() { return mMax; }
/// @brief Return @c true if this bounding box is identical to the given bounding box.
bool operator==(const BBox& rhs) const;
/// @brief Return @c true if this bounding box differs from the given bounding box.
bool operator!=(const BBox& rhs) const { return !(*this == rhs); }
/// @brief Return @c true if this bounding box is empty, i.e., it has no (positive) volume.
bool empty() const;
/// @brief Return @c true if this bounding box has (positive) volume.
bool hasVolume() const { return !this->empty(); }
/// @brief Return @c true if this bounding box has (positive) volume.
operator bool() const { return !this->empty(); }
/// @brief Return @c true if all components of the minimum point are less than
/// or equal to the corresponding components of the maximum point.
/// @details This is equivalent to testing whether this bounding box has nonnegative volume.
/// @note For floating-point <b>ElementType</b>s a tolerance is used for this test.
bool isSorted() const;
/// @brief Return the center point of this bounding box.
Vec3d getCenter() const;
/// @brief Return the extents of this bounding box, i.e., the length along each axis.
/// @note Inclusive for integral <b>ElementType</b>s
Vec3T extents() const;
/// @brief Return the index (0, 1 or 2) of the longest axis.
size_t maxExtent() const { return MaxIndex(mMax - mMin); }
/// @brief Return the index (0, 1 or 2) of the shortest axis.
size_t minExtent() const { return MinIndex(mMax - mMin); }
/// @brief Return the volume enclosed by this bounding box.
ElementType volume() const { Vec3T e = this->extents(); return e[0] * e[1] * e[2]; }
/// @brief Return @c true if the given point is inside this bounding box.
bool isInside(const Vec3T& xyz) const;
/// @brief Return @c true if the given bounding box is inside this bounding box.
bool isInside(const BBox&) const;
/// @brief Return @c true if the given bounding box overlaps with this bounding box.
bool hasOverlap(const BBox&) const;
/// @brief Return @c true if the given bounding box overlaps with this bounding box.
bool intersects(const BBox& other) const { return hasOverlap(other); }
/// @brief Pad this bounding box.
void expand(ElementType padding);
/// @brief Expand this bounding box to enclose the given point.
void expand(const Vec3T& xyz);
/// @brief Union this bounding box with the given bounding box.
void expand(const BBox&);
/// @brief Union this bounding box with the cubical bounding box with
/// minimum point @a xyzMin and the given edge length.
/// @note Inclusive for integral <b>ElementType</b>s
void expand(const Vec3T& xyzMin, const ElementType& length);
/// @brief Translate this bounding box by
/// (<i>t<sub>x</sub></i>, <i>t<sub>y</sub></i>, <i>t<sub>z</sub></i>).
void translate(const Vec3T& t);
/// @brief Apply a map to this bounding box.
template<typename MapType>
BBox applyMap(const MapType& map) const;
/// @brief Apply the inverse of a map to this bounding box
template<typename MapType>
BBox applyInverseMap(const MapType& map) const;
/// @brief Unserialize this bounding box from the given stream.
void read(std::istream& is) { mMin.read(is); mMax.read(is); }
/// @brief Serialize this bounding box to the given stream.
void write(std::ostream& os) const { mMin.write(os); mMax.write(os); }
private:
Vec3T mMin, mMax;
}; // class BBox
////////////////////////////////////////
template<typename Vec3T>
inline
BBox<Vec3T>::BBox():
mMin( std::numeric_limits<ElementType>::max()),
mMax(-std::numeric_limits<ElementType>::max())
{
}
template<typename Vec3T>
inline
BBox<Vec3T>::BBox(const Vec3T& xyzMin, const Vec3T& xyzMax):
mMin(xyzMin), mMax(xyzMax)
{
}
template<typename Vec3T>
inline
BBox<Vec3T>::BBox(const Vec3T& xyzMin, const Vec3T& xyzMax, bool sorted):
mMin(xyzMin), mMax(xyzMax)
{
if (!sorted) this->sort();
}
template<typename Vec3T>
inline
BBox<Vec3T>::BBox(const Vec3T& xyzMin, const ElementType& length):
mMin(xyzMin), mMax(xyzMin)
{
// min and max are inclusive for integral ElementType
const ElementType size = std::is_integral<ElementType>::value ? length-1 : length;
mMax[0] += size;
mMax[1] += size;
mMax[2] += size;
}
template<typename Vec3T>
inline
BBox<Vec3T>::BBox(const ElementType* xyz, bool sorted):
mMin(xyz[0], xyz[1], xyz[2]),
mMax(xyz[3], xyz[4], xyz[5])
{
if (!sorted) this->sort();
}
////////////////////////////////////////
template<typename Vec3T>
inline bool
BBox<Vec3T>::empty() const
{
if (std::is_integral<ElementType>::value) {
// min and max are inclusive for integral ElementType
return (mMin[0] > mMax[0] || mMin[1] > mMax[1] || mMin[2] > mMax[2]);
}
return mMin[0] >= mMax[0] || mMin[1] >= mMax[1] || mMin[2] >= mMax[2];
}
template<typename Vec3T>
inline bool
BBox<Vec3T>::operator==(const BBox& rhs) const
{
if (std::is_integral<ElementType>::value) {
return mMin == rhs.min() && mMax == rhs.max();
} else {
return math::isApproxEqual(mMin, rhs.min()) && math::isApproxEqual(mMax, rhs.max());
}
}
template<typename Vec3T>
inline void
BBox<Vec3T>::sort()
{
Vec3T tMin(mMin), tMax(mMax);
for (int i = 0; i < 3; ++i) {
mMin[i] = std::min(tMin[i], tMax[i]);
mMax[i] = std::max(tMin[i], tMax[i]);
}
}
template<typename Vec3T>
inline bool
BBox<Vec3T>::isSorted() const
{
if (std::is_integral<ElementType>::value) {
return (mMin[0] <= mMax[0] && mMin[1] <= mMax[1] && mMin[2] <= mMax[2]);
} else {
ElementType t = math::Tolerance<ElementType>::value();
return (mMin[0] < (mMax[0] + t) && mMin[1] < (mMax[1] + t) && mMin[2] < (mMax[2] + t));
}
}
template<typename Vec3T>
inline Vec3d
BBox<Vec3T>::getCenter() const
{
return (Vec3d(mMin.asPointer()) + Vec3d(mMax.asPointer())) * 0.5;
}
template<typename Vec3T>
inline Vec3T
BBox<Vec3T>::extents() const
{
if (std::is_integral<ElementType>::value) {
return (mMax - mMin) + Vec3T(1, 1, 1);
} else {
return (mMax - mMin);
}
}
////////////////////////////////////////
template<typename Vec3T>
inline bool
BBox<Vec3T>::isInside(const Vec3T& xyz) const
{
if (std::is_integral<ElementType>::value) {
return xyz[0] >= mMin[0] && xyz[0] <= mMax[0] &&
xyz[1] >= mMin[1] && xyz[1] <= mMax[1] &&
xyz[2] >= mMin[2] && xyz[2] <= mMax[2];
} else {
ElementType t = math::Tolerance<ElementType>::value();
return xyz[0] > (mMin[0]-t) && xyz[0] < (mMax[0]+t) &&
xyz[1] > (mMin[1]-t) && xyz[1] < (mMax[1]+t) &&
xyz[2] > (mMin[2]-t) && xyz[2] < (mMax[2]+t);
}
}
template<typename Vec3T>
inline bool
BBox<Vec3T>::isInside(const BBox& b) const
{
if (std::is_integral<ElementType>::value) {
return b.min()[0] >= mMin[0] && b.max()[0] <= mMax[0] &&
b.min()[1] >= mMin[1] && b.max()[1] <= mMax[1] &&
b.min()[2] >= mMin[2] && b.max()[2] <= mMax[2];
} else {
ElementType t = math::Tolerance<ElementType>::value();
return (b.min()[0]-t) > mMin[0] && (b.max()[0]+t) < mMax[0] &&
(b.min()[1]-t) > mMin[1] && (b.max()[1]+t) < mMax[1] &&
(b.min()[2]-t) > mMin[2] && (b.max()[2]+t) < mMax[2];
}
}
template<typename Vec3T>
inline bool
BBox<Vec3T>::hasOverlap(const BBox& b) const
{
if (std::is_integral<ElementType>::value) {
return mMax[0] >= b.min()[0] && mMin[0] <= b.max()[0] &&
mMax[1] >= b.min()[1] && mMin[1] <= b.max()[1] &&
mMax[2] >= b.min()[2] && mMin[2] <= b.max()[2];
} else {
ElementType t = math::Tolerance<ElementType>::value();
return mMax[0] > (b.min()[0]-t) && mMin[0] < (b.max()[0]+t) &&
mMax[1] > (b.min()[1]-t) && mMin[1] < (b.max()[1]+t) &&
mMax[2] > (b.min()[2]-t) && mMin[2] < (b.max()[2]+t);
}
}
////////////////////////////////////////
template<typename Vec3T>
inline void
BBox<Vec3T>::expand(ElementType dx)
{
dx = std::abs(dx);
for (int i = 0; i < 3; ++i) {
mMin[i] -= dx;
mMax[i] += dx;
}
}
template<typename Vec3T>
inline void
BBox<Vec3T>::expand(const Vec3T& xyz)
{
for (int i = 0; i < 3; ++i) {
mMin[i] = std::min(mMin[i], xyz[i]);
mMax[i] = std::max(mMax[i], xyz[i]);
}
}
template<typename Vec3T>
inline void
BBox<Vec3T>::expand(const BBox& b)
{
for (int i = 0; i < 3; ++i) {
mMin[i] = std::min(mMin[i], b.min()[i]);
mMax[i] = std::max(mMax[i], b.max()[i]);
}
}
template<typename Vec3T>
inline void
BBox<Vec3T>::expand(const Vec3T& xyzMin, const ElementType& length)
{
const ElementType size = std::is_integral<ElementType>::value ? length-1 : length;
for (int i = 0; i < 3; ++i) {
mMin[i] = std::min(mMin[i], xyzMin[i]);
mMax[i] = std::max(mMax[i], xyzMin[i] + size);
}
}
template<typename Vec3T>
inline void
BBox<Vec3T>::translate(const Vec3T& dx)
{
mMin += dx;
mMax += dx;
}
template<typename Vec3T>
template<typename MapType>
inline BBox<Vec3T>
BBox<Vec3T>::applyMap(const MapType& map) const
{
using Vec3R = Vec3<double>;
BBox<Vec3T> bbox;
bbox.expand(map.applyMap(Vec3R(mMin[0], mMin[1], mMin[2])));
bbox.expand(map.applyMap(Vec3R(mMin[0], mMin[1], mMax[2])));
bbox.expand(map.applyMap(Vec3R(mMin[0], mMax[1], mMin[2])));
bbox.expand(map.applyMap(Vec3R(mMax[0], mMin[1], mMin[2])));
bbox.expand(map.applyMap(Vec3R(mMax[0], mMax[1], mMin[2])));
bbox.expand(map.applyMap(Vec3R(mMax[0], mMin[1], mMax[2])));
bbox.expand(map.applyMap(Vec3R(mMin[0], mMax[1], mMax[2])));
bbox.expand(map.applyMap(Vec3R(mMax[0], mMax[1], mMax[2])));
return bbox;
}
template<typename Vec3T>
template<typename MapType>
inline BBox<Vec3T>
BBox<Vec3T>::applyInverseMap(const MapType& map) const
{
using Vec3R = Vec3<double>;
BBox<Vec3T> bbox;
bbox.expand(map.applyInverseMap(Vec3R(mMin[0], mMin[1], mMin[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMin[0], mMin[1], mMax[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMin[0], mMax[1], mMin[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMax[0], mMin[1], mMin[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMax[0], mMax[1], mMin[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMax[0], mMin[1], mMax[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMin[0], mMax[1], mMax[2])));
bbox.expand(map.applyInverseMap(Vec3R(mMax[0], mMax[1], mMax[2])));
return bbox;
}
////////////////////////////////////////
template<typename Vec3T>
inline std::ostream&
operator<<(std::ostream& os, const BBox<Vec3T>& b)
{
os << b.min() << " -> " << b.max();
return os;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_BBOX_HAS_BEEN_INCLUDED
| 13,688 | C | 31.285377 | 96 | 0.609585 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Quat.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_QUAT_H_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_QUAT_H_HAS_BEEN_INCLUDED
#include "Mat.h"
#include "Mat3.h"
#include "Math.h"
#include "Vec3.h"
#include <openvdb/Exceptions.h>
#include <cmath>
#include <iostream>
#include <sstream>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Quat;
/// Linear interpolation between the two quaternions
template <typename T>
Quat<T> slerp(const Quat<T> &q1, const Quat<T> &q2, T t, T tolerance=0.00001)
{
T qdot, angle, sineAngle;
qdot = q1.dot(q2);
if (fabs(qdot) >= 1.0) {
angle = 0; // not necessary but suppresses compiler warning
sineAngle = 0;
} else {
angle = acos(qdot);
sineAngle = sin(angle);
}
//
// Denominator close to 0 corresponds to the case where the
// two quaternions are close to the same rotation. In this
// case linear interpolation is used but we normalize to
// guarantee unit length
//
if (sineAngle <= tolerance) {
T s = 1.0 - t;
Quat<T> qtemp(s * q1[0] + t * q2[0], s * q1[1] + t * q2[1],
s * q1[2] + t * q2[2], s * q1[3] + t * q2[3]);
//
// Check the case where two close to antipodal quaternions were
// blended resulting in a nearly zero result which can happen,
// for example, if t is close to 0.5. In this case it is not safe
// to project back onto the sphere.
//
double lengthSquared = qtemp.dot(qtemp);
if (lengthSquared <= tolerance * tolerance) {
qtemp = (t < 0.5) ? q1 : q2;
} else {
qtemp *= 1.0 / sqrt(lengthSquared);
}
return qtemp;
} else {
T sine = 1.0 / sineAngle;
T a = sin((1.0 - t) * angle) * sine;
T b = sin(t * angle) * sine;
return Quat<T>(a * q1[0] + b * q2[0], a * q1[1] + b * q2[1],
a * q1[2] + b * q2[2], a * q1[3] + b * q2[3]);
}
}
template<typename T>
class Quat
{
public:
using value_type = T;
using ValueType = T;
static const int size = 4;
/// Trivial constructor, the quaternion is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Quat() = default;
#else
Quat() {}
/// Copy constructor
Quat(const Quat &q)
{
mm[0] = q.mm[0];
mm[1] = q.mm[1];
mm[2] = q.mm[2];
mm[3] = q.mm[3];
}
/// Assignment operator
Quat& operator=(const Quat &q)
{
mm[0] = q.mm[0];
mm[1] = q.mm[1];
mm[2] = q.mm[2];
mm[3] = q.mm[3];
return *this;
}
#endif
/// Constructor with four arguments, e.g. Quatf q(1,2,3,4);
Quat(T x, T y, T z, T w)
{
mm[0] = x;
mm[1] = y;
mm[2] = z;
mm[3] = w;
}
/// Constructor with array argument, e.g. float a[4]; Quatf q(a);
Quat(T *a)
{
mm[0] = a[0];
mm[1] = a[1];
mm[2] = a[2];
mm[3] = a[3];
}
/// Constructor given rotation as axis and angle, the axis must be
/// unit vector
Quat(const Vec3<T> &axis, T angle)
{
// assert( REL_EQ(axis.length(), 1.) );
T s = T(sin(angle*T(0.5)));
mm[0] = axis.x() * s;
mm[1] = axis.y() * s;
mm[2] = axis.z() * s;
mm[3] = T(cos(angle*T(0.5)));
}
/// Constructor given rotation as axis and angle
Quat(math::Axis axis, T angle)
{
T s = T(sin(angle*T(0.5)));
mm[0] = (axis==math::X_AXIS) * s;
mm[1] = (axis==math::Y_AXIS) * s;
mm[2] = (axis==math::Z_AXIS) * s;
mm[3] = T(cos(angle*T(0.5)));
}
/// Constructor given a rotation matrix
template<typename T1>
Quat(const Mat3<T1> &rot) {
// verify that the matrix is really a rotation
if(!isUnitary(rot)) { // unitary is reflection or rotation
OPENVDB_THROW(ArithmeticError,
"A non-rotation matrix can not be used to construct a quaternion");
}
if (!isApproxEqual(rot.det(), T1(1))) { // rule out reflection
OPENVDB_THROW(ArithmeticError,
"A reflection matrix can not be used to construct a quaternion");
}
T trace(rot.trace());
if (trace > 0) {
T q_w = 0.5 * std::sqrt(trace+1);
T factor = 0.25 / q_w;
mm[0] = factor * (rot(1,2) - rot(2,1));
mm[1] = factor * (rot(2,0) - rot(0,2));
mm[2] = factor * (rot(0,1) - rot(1,0));
mm[3] = q_w;
} else if (rot(0,0) > rot(1,1) && rot(0,0) > rot(2,2)) {
T q_x = 0.5 * sqrt(rot(0,0)- rot(1,1)-rot(2,2)+1);
T factor = 0.25 / q_x;
mm[0] = q_x;
mm[1] = factor * (rot(0,1) + rot(1,0));
mm[2] = factor * (rot(2,0) + rot(0,2));
mm[3] = factor * (rot(1,2) - rot(2,1));
} else if (rot(1,1) > rot(2,2)) {
T q_y = 0.5 * sqrt(rot(1,1)-rot(0,0)-rot(2,2)+1);
T factor = 0.25 / q_y;
mm[0] = factor * (rot(0,1) + rot(1,0));
mm[1] = q_y;
mm[2] = factor * (rot(1,2) + rot(2,1));
mm[3] = factor * (rot(2,0) - rot(0,2));
} else {
T q_z = 0.5 * sqrt(rot(2,2)-rot(0,0)-rot(1,1)+1);
T factor = 0.25 / q_z;
mm[0] = factor * (rot(2,0) + rot(0,2));
mm[1] = factor * (rot(1,2) + rot(2,1));
mm[2] = q_z;
mm[3] = factor * (rot(0,1) - rot(1,0));
}
}
/// Reference to the component, e.g. q.x() = 4.5f;
T& x() { return mm[0]; }
T& y() { return mm[1]; }
T& z() { return mm[2]; }
T& w() { return mm[3]; }
/// Get the component, e.g. float f = q.w();
T x() const { return mm[0]; }
T y() const { return mm[1]; }
T z() const { return mm[2]; }
T w() const { return mm[3]; }
// Number of elements
static unsigned numElements() { return 4; }
/// Array style reference to the components, e.g. q[3] = 1.34f;
T& operator[](int i) { return mm[i]; }
/// Array style constant reference to the components, e.g. float f = q[1];
T operator[](int i) const { return mm[i]; }
/// Cast to T*
operator T*() { return mm; }
operator const T*() const { return mm; }
/// Alternative indexed reference to the elements
T& operator()(int i) { return mm[i]; }
/// Alternative indexed constant reference to the elements,
T operator()(int i) const { return mm[i]; }
/// Return angle of rotation
T angle() const
{
T sqrLength = mm[0]*mm[0] + mm[1]*mm[1] + mm[2]*mm[2];
if ( sqrLength > 1.0e-8 ) {
return T(T(2.0) * acos(mm[3]));
} else {
return T(0.0);
}
}
/// Return axis of rotation
Vec3<T> axis() const
{
T sqrLength = mm[0]*mm[0] + mm[1]*mm[1] + mm[2]*mm[2];
if ( sqrLength > 1.0e-8 ) {
T invLength = T(T(1)/sqrt(sqrLength));
return Vec3<T>( mm[0]*invLength, mm[1]*invLength, mm[2]*invLength );
} else {
return Vec3<T>(1,0,0);
}
}
/// "this" quaternion gets initialized to [x, y, z, w]
Quat& init(T x, T y, T z, T w)
{
mm[0] = x; mm[1] = y; mm[2] = z; mm[3] = w;
return *this;
}
/// "this" quaternion gets initialized to identity, same as setIdentity()
Quat& init() { return setIdentity(); }
/// Set "this" quaternion to rotation specified by axis and angle,
/// the axis must be unit vector
Quat& setAxisAngle(const Vec3<T>& axis, T angle)
{
T s = T(sin(angle*T(0.5)));
mm[0] = axis.x() * s;
mm[1] = axis.y() * s;
mm[2] = axis.z() * s;
mm[3] = T(cos(angle*T(0.5)));
return *this;
} // axisAngleTest
/// Set "this" vector to zero
Quat& setZero()
{
mm[0] = mm[1] = mm[2] = mm[3] = 0;
return *this;
}
/// Set "this" vector to identity
Quat& setIdentity()
{
mm[0] = mm[1] = mm[2] = 0;
mm[3] = 1;
return *this;
}
/// Returns vector of x,y,z rotational components
Vec3<T> eulerAngles(RotationOrder rotationOrder) const
{ return math::eulerAngles(Mat3<T>(*this), rotationOrder); }
/// Equality operator, does exact floating point comparisons
bool operator==(const Quat &q) const
{
return (isExactlyEqual(mm[0],q.mm[0]) &&
isExactlyEqual(mm[1],q.mm[1]) &&
isExactlyEqual(mm[2],q.mm[2]) &&
isExactlyEqual(mm[3],q.mm[3]) );
}
/// Test if "this" is equivalent to q with tolerance of eps value
bool eq(const Quat &q, T eps=1.0e-7) const
{
return isApproxEqual(mm[0],q.mm[0],eps) && isApproxEqual(mm[1],q.mm[1],eps) &&
isApproxEqual(mm[2],q.mm[2],eps) && isApproxEqual(mm[3],q.mm[3],eps) ;
} // trivial
/// Add quaternion q to "this" quaternion, e.g. q += q1;
Quat& operator+=(const Quat &q)
{
mm[0] += q.mm[0];
mm[1] += q.mm[1];
mm[2] += q.mm[2];
mm[3] += q.mm[3];
return *this;
}
/// Subtract quaternion q from "this" quaternion, e.g. q -= q1;
Quat& operator-=(const Quat &q)
{
mm[0] -= q.mm[0];
mm[1] -= q.mm[1];
mm[2] -= q.mm[2];
mm[3] -= q.mm[3];
return *this;
}
/// Scale "this" quaternion by scalar, e.g. q *= scalar;
Quat& operator*=(T scalar)
{
mm[0] *= scalar;
mm[1] *= scalar;
mm[2] *= scalar;
mm[3] *= scalar;
return *this;
}
/// Return (this+q), e.g. q = q1 + q2;
Quat operator+(const Quat &q) const
{
return Quat<T>(mm[0]+q.mm[0], mm[1]+q.mm[1], mm[2]+q.mm[2], mm[3]+q.mm[3]);
}
/// Return (this-q), e.g. q = q1 - q2;
Quat operator-(const Quat &q) const
{
return Quat<T>(mm[0]-q.mm[0], mm[1]-q.mm[1], mm[2]-q.mm[2], mm[3]-q.mm[3]);
}
/// Return (this*q), e.g. q = q1 * q2;
Quat operator*(const Quat &q) const
{
Quat<T> prod;
prod.mm[0] = mm[3]*q.mm[0] + mm[0]*q.mm[3] + mm[1]*q.mm[2] - mm[2]*q.mm[1];
prod.mm[1] = mm[3]*q.mm[1] + mm[1]*q.mm[3] + mm[2]*q.mm[0] - mm[0]*q.mm[2];
prod.mm[2] = mm[3]*q.mm[2] + mm[2]*q.mm[3] + mm[0]*q.mm[1] - mm[1]*q.mm[0];
prod.mm[3] = mm[3]*q.mm[3] - mm[0]*q.mm[0] - mm[1]*q.mm[1] - mm[2]*q.mm[2];
return prod;
}
/// Assigns this to (this*q), e.g. q *= q1;
Quat operator*=(const Quat &q)
{
*this = *this * q;
return *this;
}
/// Return (this*scalar), e.g. q = q1 * scalar;
Quat operator*(T scalar) const
{
return Quat<T>(mm[0]*scalar, mm[1]*scalar, mm[2]*scalar, mm[3]*scalar);
}
/// Return (this/scalar), e.g. q = q1 / scalar;
Quat operator/(T scalar) const
{
return Quat<T>(mm[0]/scalar, mm[1]/scalar, mm[2]/scalar, mm[3]/scalar);
}
/// Negation operator, e.g. q = -q;
Quat operator-() const
{ return Quat<T>(-mm[0], -mm[1], -mm[2], -mm[3]); }
/// this = q1 + q2
/// "this", q1 and q2 need not be distinct objects, e.g. q.add(q1,q);
Quat& add(const Quat &q1, const Quat &q2)
{
mm[0] = q1.mm[0] + q2.mm[0];
mm[1] = q1.mm[1] + q2.mm[1];
mm[2] = q1.mm[2] + q2.mm[2];
mm[3] = q1.mm[3] + q2.mm[3];
return *this;
}
/// this = q1 - q2
/// "this", q1 and q2 need not be distinct objects, e.g. q.sub(q1,q);
Quat& sub(const Quat &q1, const Quat &q2)
{
mm[0] = q1.mm[0] - q2.mm[0];
mm[1] = q1.mm[1] - q2.mm[1];
mm[2] = q1.mm[2] - q2.mm[2];
mm[3] = q1.mm[3] - q2.mm[3];
return *this;
}
/// this = q1 * q2
/// q1 and q2 must be distinct objects than "this", e.g. q.mult(q1,q2);
Quat& mult(const Quat &q1, const Quat &q2)
{
mm[0] = q1.mm[3]*q2.mm[0] + q1.mm[0]*q2.mm[3] +
q1.mm[1]*q2.mm[2] - q1.mm[2]*q2.mm[1];
mm[1] = q1.mm[3]*q2.mm[1] + q1.mm[1]*q2.mm[3] +
q1.mm[2]*q2.mm[0] - q1.mm[0]*q2.mm[2];
mm[2] = q1.mm[3]*q2.mm[2] + q1.mm[2]*q2.mm[3] +
q1.mm[0]*q2.mm[1] - q1.mm[1]*q2.mm[0];
mm[3] = q1.mm[3]*q2.mm[3] - q1.mm[0]*q2.mm[0] -
q1.mm[1]*q2.mm[1] - q1.mm[2]*q2.mm[2];
return *this;
}
/// this = scalar*q, q need not be distinct object than "this",
/// e.g. q.scale(1.5,q1);
Quat& scale(T scale, const Quat &q)
{
mm[0] = scale * q.mm[0];
mm[1] = scale * q.mm[1];
mm[2] = scale * q.mm[2];
mm[3] = scale * q.mm[3];
return *this;
}
/// Dot product
T dot(const Quat &q) const
{
return (mm[0]*q.mm[0] + mm[1]*q.mm[1] + mm[2]*q.mm[2] + mm[3]*q.mm[3]);
}
/// Return the quaternion rate corrsponding to the angular velocity omega
/// and "this" current rotation
Quat derivative(const Vec3<T>& omega) const
{
return Quat<T>( +w()*omega.x() -z()*omega.y() +y()*omega.z() ,
+z()*omega.x() +w()*omega.y() -x()*omega.z() ,
-y()*omega.x() +x()*omega.y() +w()*omega.z() ,
-x()*omega.x() -y()*omega.y() -z()*omega.z() );
}
/// this = normalized this
bool normalize(T eps = T(1.0e-8))
{
T d = T(sqrt(mm[0]*mm[0] + mm[1]*mm[1] + mm[2]*mm[2] + mm[3]*mm[3]));
if( isApproxEqual(d, T(0.0), eps) ) return false;
*this *= ( T(1)/d );
return true;
}
/// this = normalized this
Quat unit() const
{
T d = sqrt(mm[0]*mm[0] + mm[1]*mm[1] + mm[2]*mm[2] + mm[3]*mm[3]);
if( isExactlyEqual(d , T(0.0) ) )
OPENVDB_THROW(ArithmeticError,
"Normalizing degenerate quaternion");
return *this / d;
}
/// returns inverse of this
Quat inverse(T tolerance = T(0)) const
{
T d = mm[0]*mm[0] + mm[1]*mm[1] + mm[2]*mm[2] + mm[3]*mm[3];
if( isApproxEqual(d, T(0.0), tolerance) )
OPENVDB_THROW(ArithmeticError,
"Cannot invert degenerate quaternion");
Quat result = *this/-d;
result.mm[3] = -result.mm[3];
return result;
}
/// Return the conjugate of "this", same as invert without
/// unit quaternion test
Quat conjugate() const
{
return Quat<T>(-mm[0], -mm[1], -mm[2], mm[3]);
}
/// Return rotated vector by "this" quaternion
Vec3<T> rotateVector(const Vec3<T> &v) const
{
Mat3<T> m(*this);
return m.transform(v);
}
/// Predefined constants, e.g. Quat q = Quat::identity();
static Quat zero() { return Quat<T>(0,0,0,0); }
static Quat identity() { return Quat<T>(0,0,0,1); }
/// @return string representation of Classname
std::string str() const
{
std::ostringstream buffer;
buffer << "[";
// For each column
for (unsigned j(0); j < 4; j++) {
if (j) buffer << ", ";
buffer << mm[j];
}
buffer << "]";
return buffer.str();
}
/// Output to the stream, e.g. std::cout << q << std::endl;
friend std::ostream& operator<<(std::ostream &stream, const Quat &q)
{
stream << q.str();
return stream;
}
friend Quat slerp<>(const Quat &q1, const Quat &q2, T t, T tolerance);
void write(std::ostream& os) const { os.write(static_cast<char*>(&mm), sizeof(T) * 4); }
void read(std::istream& is) { is.read(static_cast<char*>(&mm), sizeof(T) * 4); }
protected:
T mm[4];
};
/// Multiply each element of the given quaternion by @a scalar and return the result.
template <typename S, typename T>
Quat<T> operator*(S scalar, const Quat<T> &q) { return q*scalar; }
/// @brief Interpolate between m1 and m2.
/// Converts to quaternion form and uses slerp
/// m1 and m2 must be rotation matrices!
template <typename T, typename T0>
Mat3<T> slerp(const Mat3<T0> &m1, const Mat3<T0> &m2, T t)
{
using MatType = Mat3<T>;
Quat<T> q1(m1);
Quat<T> q2(m2);
if (q1.dot(q2) < 0) q2 *= -1;
Quat<T> qslerp = slerp<T>(q1, q2, static_cast<T>(t));
MatType m = rotation<MatType>(qslerp);
return m;
}
/// Interpolate between m1 and m4 by converting m1 ... m4 into
/// quaternions and treating them as control points of a Bezier
/// curve using slerp in place of lerp in the De Castlejeau evaluation
/// algorithm. Just like a cubic Bezier curve, this will interpolate
/// m1 at t = 0 and m4 at t = 1 but in general will not pass through
/// m2 and m3. Unlike a standard Bezier curve this curve will not have
/// the convex hull property.
/// m1 ... m4 must be rotation matrices!
template <typename T, typename T0>
Mat3<T> bezLerp(const Mat3<T0> &m1, const Mat3<T0> &m2,
const Mat3<T0> &m3, const Mat3<T0> &m4,
T t)
{
Mat3<T> m00, m01, m02, m10, m11;
m00 = slerp(m1, m2, t);
m01 = slerp(m2, m3, t);
m02 = slerp(m3, m4, t);
m10 = slerp(m00, m01, t);
m11 = slerp(m01, m02, t);
return slerp(m10, m11, t);
}
using Quats = Quat<float>;
using Quatd = Quat<double>;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Quats)
OPENVDB_IS_POD(Quatd)
#endif
} // namespace math
template<> inline math::Quats zeroVal<math::Quats >() { return math::Quats::zero(); }
template<> inline math::Quatd zeroVal<math::Quatd >() { return math::Quatd::zero(); }
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif //OPENVDB_MATH_QUAT_H_HAS_BEEN_INCLUDED
| 17,912 | C | 26.858476 | 92 | 0.505806 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Stencils.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth
///
/// @file Stencils.h
///
/// @brief Defines various finite difference stencils by means of the
/// "curiously recurring template pattern" on a BaseStencil
/// that caches stencil values and stores a ValueAccessor for
/// fast lookup.
#ifndef OPENVDB_MATH_STENCILS_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_STENCILS_HAS_BEEN_INCLUDED
#include <algorithm>
#include <vector> // for std::vector
#include <bitset> // for std::bitset
#include <openvdb/math/Math.h> // for Pow2, needed by WENO and Godunov
#include <openvdb/Types.h> // for Real
#include <openvdb/math/Coord.h> // for Coord
#include <openvdb/math/FiniteDifference.h> // for WENO5 and GodunovsNormSqrd
#include <openvdb/tree/ValueAccessor.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
////////////////////////////////////////
template<typename DerivedType, typename GridT, bool IsSafe>
class BaseStencil
{
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridT::ValueType ValueType;
typedef tree::ValueAccessor<const TreeType, IsSafe> AccessorType;
typedef std::vector<ValueType> BufferType;
typedef typename BufferType::iterator IterType;
/// @brief Initialize the stencil buffer with the values of voxel (i, j, k)
/// and its neighbors.
/// @param ijk Index coordinates of stencil center
inline void moveTo(const Coord& ijk)
{
mCenter = ijk;
mValues[0] = mAcc.getValue(ijk);
static_cast<DerivedType&>(*this).init(mCenter);
}
/// @brief Initialize the stencil buffer with the values of voxel (i, j, k)
/// and its neighbors. The method also takes a value of the center
/// element of the stencil, assuming it is already known.
/// @param ijk Index coordinates of stnecil center
/// @param centerValue Value of the center element of the stencil
inline void moveTo(const Coord& ijk, const ValueType& centerValue)
{
mCenter = ijk;
mValues[0] = centerValue;
static_cast<DerivedType&>(*this).init(mCenter);
}
/// @brief Initialize the stencil buffer with the values of voxel
/// (x, y, z) and its neighbors.
///
/// @note This version is slightly faster than the one above, since
/// the center voxel's value is read directly from the iterator.
template<typename IterType>
inline void moveTo(const IterType& iter)
{
mCenter = iter.getCoord();
mValues[0] = *iter;
static_cast<DerivedType&>(*this).init(mCenter);
}
/// @brief Initialize the stencil buffer with the values of voxel (x, y, z)
/// and its neighbors.
/// @param xyz Floating point voxel coordinates of stencil center
/// @details This method will check to see if it is necessary to
/// update the stencil based on the cached index coordinates of
/// the center point.
template<typename RealType>
inline void moveTo(const Vec3<RealType>& xyz)
{
Coord ijk = Coord::floor(xyz);
if (ijk != mCenter) this->moveTo(ijk);
}
/// @brief Return the value from the stencil buffer with linear
/// offset pos.
///
/// @note The default (@a pos = 0) corresponds to the first element
/// which is typically the center point of the stencil.
inline const ValueType& getValue(unsigned int pos = 0) const
{
assert(pos < mValues.size());
return mValues[pos];
}
/// @brief Return the value at the specified location relative to the center of the stencil
template<int i, int j, int k>
inline const ValueType& getValue() const
{
return mValues[static_cast<const DerivedType&>(*this).template pos<i,j,k>()];
}
/// @brief Set the value at the specified location relative to the center of the stencil
template<int i, int j, int k>
inline void setValue(const ValueType& value)
{
mValues[static_cast<const DerivedType&>(*this).template pos<i,j,k>()] = value;
}
/// @brief Return the size of the stencil buffer.
inline int size() { return mValues.size(); }
/// @brief Return the median value of the current stencil.
inline ValueType median() const
{
BufferType tmp(mValues);//local copy
assert(!tmp.empty());
size_t midpoint = (tmp.size() - 1) >> 1;
// Partially sort the vector until the median value is at the midpoint.
#if !defined(_MSC_VER) || _MSC_VER < 1924
std::nth_element(tmp.begin(), tmp.begin() + midpoint, tmp.end());
#else
// Workaround MSVC bool warning C4804 unsafe use of type 'bool'
std::nth_element(tmp.begin(), tmp.begin() + midpoint, tmp.end(),
std::less<ValueType>());
#endif
return tmp[midpoint];
}
/// @brief Return the mean value of the current stencil.
inline ValueType mean() const
{
ValueType sum = 0.0;
for (int n = 0, s = int(mValues.size()); n < s; ++n) sum += mValues[n];
return sum / ValueType(mValues.size());
}
/// @brief Return the smallest value in the stencil buffer.
inline ValueType min() const
{
IterType iter = std::min_element(mValues.begin(), mValues.end());
return *iter;
}
/// @brief Return the largest value in the stencil buffer.
inline ValueType max() const
{
IterType iter = std::max_element(mValues.begin(), mValues.end());
return *iter;
}
/// @brief Return the coordinates of the center point of the stencil.
inline const Coord& getCenterCoord() const { return mCenter; }
/// @brief Return the value at the center of the stencil
inline const ValueType& getCenterValue() const { return mValues[0]; }
/// @brief Return true if the center of the stencil intersects the
/// iso-contour specified by the isoValue
inline bool intersects(const ValueType &isoValue = zeroVal<ValueType>()) const
{
const bool less = this->getValue< 0, 0, 0>() < isoValue;
return (less ^ (this->getValue<-1, 0, 0>() < isoValue)) ||
(less ^ (this->getValue< 1, 0, 0>() < isoValue)) ||
(less ^ (this->getValue< 0,-1, 0>() < isoValue)) ||
(less ^ (this->getValue< 0, 1, 0>() < isoValue)) ||
(less ^ (this->getValue< 0, 0,-1>() < isoValue)) ||
(less ^ (this->getValue< 0, 0, 1>() < isoValue)) ;
}
/// @brief Return true a bit-mask where the 6 bits indicates if the
/// center of the stencil intersects the iso-contour specified by the isoValue.
///
/// @note There are 2^6 = 64 different possible cases, including no intersections!
///
/// @details The ordering of bit mask is ( -x, +x, -y, +y, -z, +z ), so to
/// check if there is an intersection in -y use mask.test(2) where mask is
/// ther return value from this function. To check if there are any
/// intersections use mask.any(), and for no intersections use mask.none().
/// To count the number of intersections use mask.count().
inline std::bitset<6> intersectionMask(const ValueType &isoValue = zeroVal<ValueType>()) const
{
std::bitset<6> mask;
const bool less = this->getValue< 0, 0, 0>() < isoValue;
mask[0] = less ^ (this->getValue<-1, 0, 0>() < isoValue);
mask[1] = less ^ (this->getValue< 1, 0, 0>() < isoValue);
mask[2] = less ^ (this->getValue< 0,-1, 0>() < isoValue);
mask[3] = less ^ (this->getValue< 0, 1, 0>() < isoValue);
mask[4] = less ^ (this->getValue< 0, 0,-1>() < isoValue);
mask[5] = less ^ (this->getValue< 0, 0, 1>() < isoValue);
return mask;
}
/// @brief Return a const reference to the grid from which this
/// stencil was constructed.
inline const GridType& grid() const { return *mGrid; }
/// @brief Return a const reference to the ValueAccessor
/// associated with this Stencil.
inline const AccessorType& accessor() const { return mAcc; }
protected:
// Constructor is protected to prevent direct instantiation.
BaseStencil(const GridType& grid, int size)
: mGrid(&grid)
, mAcc(grid.tree())
, mValues(size)
, mCenter(Coord::max())
{
}
const GridType* mGrid;
AccessorType mAcc;
BufferType mValues;
Coord mCenter;
}; // BaseStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the seven point stencil
template<int i, int j, int k> struct SevenPt {};
template<> struct SevenPt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct SevenPt< 1, 0, 0> { enum { idx = 1 }; };
template<> struct SevenPt< 0, 1, 0> { enum { idx = 2 }; };
template<> struct SevenPt< 0, 0, 1> { enum { idx = 3 }; };
template<> struct SevenPt<-1, 0, 0> { enum { idx = 4 }; };
template<> struct SevenPt< 0,-1, 0> { enum { idx = 5 }; };
template<> struct SevenPt< 0, 0,-1> { enum { idx = 6 }; };
}
template<typename GridT, bool IsSafe = true>
class SevenPointStencil: public BaseStencil<SevenPointStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef SevenPointStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe> BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridT::ValueType ValueType;
static const int SIZE = 7;
SevenPointStencil(const GridT& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return SevenPt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
BaseType::template setValue<-1, 0, 0>(mAcc.getValue(ijk.offsetBy(-1, 0, 0)));
BaseType::template setValue< 1, 0, 0>(mAcc.getValue(ijk.offsetBy( 1, 0, 0)));
BaseType::template setValue< 0,-1, 0>(mAcc.getValue(ijk.offsetBy( 0,-1, 0)));
BaseType::template setValue< 0, 1, 0>(mAcc.getValue(ijk.offsetBy( 0, 1, 0)));
BaseType::template setValue< 0, 0,-1>(mAcc.getValue(ijk.offsetBy( 0, 0,-1)));
BaseType::template setValue< 0, 0, 1>(mAcc.getValue(ijk.offsetBy( 0, 0, 1)));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// SevenPointStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the eight point box stencil
template<int i, int j, int k> struct BoxPt {};
template<> struct BoxPt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct BoxPt< 0, 0, 1> { enum { idx = 1 }; };
template<> struct BoxPt< 0, 1, 1> { enum { idx = 2 }; };
template<> struct BoxPt< 0, 1, 0> { enum { idx = 3 }; };
template<> struct BoxPt< 1, 0, 0> { enum { idx = 4 }; };
template<> struct BoxPt< 1, 0, 1> { enum { idx = 5 }; };
template<> struct BoxPt< 1, 1, 1> { enum { idx = 6 }; };
template<> struct BoxPt< 1, 1, 0> { enum { idx = 7 }; };
}
template<typename GridT, bool IsSafe = true>
class BoxStencil: public BaseStencil<BoxStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef BoxStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe> BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridT::ValueType ValueType;
static const int SIZE = 8;
BoxStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return BoxPt<i,j,k>::idx; }
/// @brief Return true if the center of the stencil intersects the
/// iso-contour specified by the isoValue
inline bool intersects(const ValueType &isoValue = zeroVal<ValueType>()) const
{
const bool less = mValues[0] < isoValue;
return (less ^ (mValues[1] < isoValue)) ||
(less ^ (mValues[2] < isoValue)) ||
(less ^ (mValues[3] < isoValue)) ||
(less ^ (mValues[4] < isoValue)) ||
(less ^ (mValues[5] < isoValue)) ||
(less ^ (mValues[6] < isoValue)) ||
(less ^ (mValues[7] < isoValue)) ;
}
/// @brief Return the trilinear interpolation at the normalized position.
/// @param xyz Floating point coordinate position.
/// @warning It is assumed that the stencil has already been moved
/// to the relevant voxel position, e.g. using moveTo(xyz).
/// @note Trilinear interpolation kernal reads as:
/// v000 (1-u)(1-v)(1-w) + v001 (1-u)(1-v)w + v010 (1-u)v(1-w) + v011 (1-u)vw
/// + v100 u(1-v)(1-w) + v101 u(1-v)w + v110 uv(1-w) + v111 uvw
inline ValueType interpolation(const math::Vec3<ValueType>& xyz) const
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueType u = xyz[0] - BaseType::mCenter[0];
const ValueType v = xyz[1] - BaseType::mCenter[1];
const ValueType w = xyz[2] - BaseType::mCenter[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
assert(u>=0 && u<=1);
assert(v>=0 && v<=1);
assert(w>=0 && w<=1);
ValueType V = BaseType::template getValue<0,0,0>();
ValueType A = static_cast<ValueType>(V + (BaseType::template getValue<0,0,1>() - V) * w);
V = BaseType::template getValue< 0, 1, 0>();
ValueType B = static_cast<ValueType>(V + (BaseType::template getValue<0,1,1>() - V) * w);
ValueType C = static_cast<ValueType>(A + (B - A) * v);
V = BaseType::template getValue<1,0,0>();
A = static_cast<ValueType>(V + (BaseType::template getValue<1,0,1>() - V) * w);
V = BaseType::template getValue<1,1,0>();
B = static_cast<ValueType>(V + (BaseType::template getValue<1,1,1>() - V) * w);
ValueType D = static_cast<ValueType>(A + (B - A) * v);
return static_cast<ValueType>(C + (D - C) * u);
}
/// @brief Return the gradient in world space of the trilinear interpolation kernel.
/// @param xyz Floating point coordinate position.
/// @warning It is assumed that the stencil has already been moved
/// to the relevant voxel position, e.g. using moveTo(xyz).
/// @note Computed as partial derivatives of the trilinear interpolation kernel:
/// v000 (1-u)(1-v)(1-w) + v001 (1-u)(1-v)w + v010 (1-u)v(1-w) + v011 (1-u)vw
/// + v100 u(1-v)(1-w) + v101 u(1-v)w + v110 uv(1-w) + v111 uvw
inline math::Vec3<ValueType> gradient(const math::Vec3<ValueType>& xyz) const
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueType u = xyz[0] - BaseType::mCenter[0];
const ValueType v = xyz[1] - BaseType::mCenter[1];
const ValueType w = xyz[2] - BaseType::mCenter[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
assert(u>=0 && u<=1);
assert(v>=0 && v<=1);
assert(w>=0 && w<=1);
ValueType D[4]={BaseType::template getValue<0,0,1>()-BaseType::template getValue<0,0,0>(),
BaseType::template getValue<0,1,1>()-BaseType::template getValue<0,1,0>(),
BaseType::template getValue<1,0,1>()-BaseType::template getValue<1,0,0>(),
BaseType::template getValue<1,1,1>()-BaseType::template getValue<1,1,0>()};
// Z component
ValueType A = static_cast<ValueType>(D[0] + (D[1]- D[0]) * v);
ValueType B = static_cast<ValueType>(D[2] + (D[3]- D[2]) * v);
math::Vec3<ValueType> grad(zeroVal<ValueType>(),
zeroVal<ValueType>(),
static_cast<ValueType>(A + (B - A) * u));
D[0] = static_cast<ValueType>(BaseType::template getValue<0,0,0>() + D[0] * w);
D[1] = static_cast<ValueType>(BaseType::template getValue<0,1,0>() + D[1] * w);
D[2] = static_cast<ValueType>(BaseType::template getValue<1,0,0>() + D[2] * w);
D[3] = static_cast<ValueType>(BaseType::template getValue<1,1,0>() + D[3] * w);
// X component
A = static_cast<ValueType>(D[0] + (D[1] - D[0]) * v);
B = static_cast<ValueType>(D[2] + (D[3] - D[2]) * v);
grad[0] = B - A;
// Y component
A = D[1] - D[0];
B = D[3] - D[2];
grad[1] = static_cast<ValueType>(A + (B - A) * u);
return BaseType::mGrid->transform().baseMap()->applyIJT(grad, xyz);
}
private:
inline void init(const Coord& ijk)
{
BaseType::template setValue< 0, 0, 1>(mAcc.getValue(ijk.offsetBy( 0, 0, 1)));
BaseType::template setValue< 0, 1, 1>(mAcc.getValue(ijk.offsetBy( 0, 1, 1)));
BaseType::template setValue< 0, 1, 0>(mAcc.getValue(ijk.offsetBy( 0, 1, 0)));
BaseType::template setValue< 1, 0, 0>(mAcc.getValue(ijk.offsetBy( 1, 0, 0)));
BaseType::template setValue< 1, 0, 1>(mAcc.getValue(ijk.offsetBy( 1, 0, 1)));
BaseType::template setValue< 1, 1, 1>(mAcc.getValue(ijk.offsetBy( 1, 1, 1)));
BaseType::template setValue< 1, 1, 0>(mAcc.getValue(ijk.offsetBy( 1, 1, 0)));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// BoxStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the dense point stencil
template<int i, int j, int k> struct DensePt {};
template<> struct DensePt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct DensePt< 1, 0, 0> { enum { idx = 1 }; };
template<> struct DensePt< 0, 1, 0> { enum { idx = 2 }; };
template<> struct DensePt< 0, 0, 1> { enum { idx = 3 }; };
template<> struct DensePt<-1, 0, 0> { enum { idx = 4 }; };
template<> struct DensePt< 0,-1, 0> { enum { idx = 5 }; };
template<> struct DensePt< 0, 0,-1> { enum { idx = 6 }; };
template<> struct DensePt<-1,-1, 0> { enum { idx = 7 }; };
template<> struct DensePt< 0,-1,-1> { enum { idx = 8 }; };
template<> struct DensePt<-1, 0,-1> { enum { idx = 9 }; };
template<> struct DensePt< 1,-1, 0> { enum { idx = 10 }; };
template<> struct DensePt< 0, 1,-1> { enum { idx = 11 }; };
template<> struct DensePt<-1, 0, 1> { enum { idx = 12 }; };
template<> struct DensePt<-1, 1, 0> { enum { idx = 13 }; };
template<> struct DensePt< 0,-1, 1> { enum { idx = 14 }; };
template<> struct DensePt< 1, 0,-1> { enum { idx = 15 }; };
template<> struct DensePt< 1, 1, 0> { enum { idx = 16 }; };
template<> struct DensePt< 0, 1, 1> { enum { idx = 17 }; };
template<> struct DensePt< 1, 0, 1> { enum { idx = 18 }; };
}
template<typename GridT, bool IsSafe = true>
class SecondOrderDenseStencil
: public BaseStencil<SecondOrderDenseStencil<GridT, IsSafe>, GridT, IsSafe >
{
typedef SecondOrderDenseStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 19;
SecondOrderDenseStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return DensePt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
mValues[DensePt< 1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[DensePt< 0, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[DensePt< 0, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[DensePt<-1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[DensePt< 0,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -1, 0));
mValues[DensePt< 0, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -1));
mValues[DensePt<-1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, -1, 0));
mValues[DensePt< 1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, -1, 0));
mValues[DensePt<-1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 1, 0));
mValues[DensePt< 1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 1, 0));
mValues[DensePt<-1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, -1));
mValues[DensePt< 1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, -1));
mValues[DensePt<-1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 1));
mValues[DensePt< 1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 1));
mValues[DensePt< 0,-1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, -1, -1));
mValues[DensePt< 0, 1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, -1));
mValues[DensePt< 0,-1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, -1, 1));
mValues[DensePt< 0, 1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 1));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// SecondOrderDenseStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the dense point stencil
template<int i, int j, int k> struct ThirteenPt {};
template<> struct ThirteenPt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct ThirteenPt< 1, 0, 0> { enum { idx = 1 }; };
template<> struct ThirteenPt< 0, 1, 0> { enum { idx = 2 }; };
template<> struct ThirteenPt< 0, 0, 1> { enum { idx = 3 }; };
template<> struct ThirteenPt<-1, 0, 0> { enum { idx = 4 }; };
template<> struct ThirteenPt< 0,-1, 0> { enum { idx = 5 }; };
template<> struct ThirteenPt< 0, 0,-1> { enum { idx = 6 }; };
template<> struct ThirteenPt< 2, 0, 0> { enum { idx = 7 }; };
template<> struct ThirteenPt< 0, 2, 0> { enum { idx = 8 }; };
template<> struct ThirteenPt< 0, 0, 2> { enum { idx = 9 }; };
template<> struct ThirteenPt<-2, 0, 0> { enum { idx = 10 }; };
template<> struct ThirteenPt< 0,-2, 0> { enum { idx = 11 }; };
template<> struct ThirteenPt< 0, 0,-2> { enum { idx = 12 }; };
}
template<typename GridT, bool IsSafe = true>
class ThirteenPointStencil
: public BaseStencil<ThirteenPointStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef ThirteenPointStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 13;
ThirteenPointStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return ThirteenPt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
mValues[ThirteenPt< 2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 0));
mValues[ThirteenPt< 1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[ThirteenPt<-1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[ThirteenPt<-2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 0));
mValues[ThirteenPt< 0, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 0));
mValues[ThirteenPt< 0, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[ThirteenPt< 0,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -1, 0));
mValues[ThirteenPt< 0,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -2, 0));
mValues[ThirteenPt< 0, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 2));
mValues[ThirteenPt< 0, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[ThirteenPt< 0, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -1));
mValues[ThirteenPt< 0, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -2));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// ThirteenPointStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the 4th-order dense point stencil
template<int i, int j, int k> struct FourthDensePt {};
template<> struct FourthDensePt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct FourthDensePt<-2, 2, 0> { enum { idx = 1 }; };
template<> struct FourthDensePt<-1, 2, 0> { enum { idx = 2 }; };
template<> struct FourthDensePt< 0, 2, 0> { enum { idx = 3 }; };
template<> struct FourthDensePt< 1, 2, 0> { enum { idx = 4 }; };
template<> struct FourthDensePt< 2, 2, 0> { enum { idx = 5 }; };
template<> struct FourthDensePt<-2, 1, 0> { enum { idx = 6 }; };
template<> struct FourthDensePt<-1, 1, 0> { enum { idx = 7 }; };
template<> struct FourthDensePt< 0, 1, 0> { enum { idx = 8 }; };
template<> struct FourthDensePt< 1, 1, 0> { enum { idx = 9 }; };
template<> struct FourthDensePt< 2, 1, 0> { enum { idx = 10 }; };
template<> struct FourthDensePt<-2, 0, 0> { enum { idx = 11 }; };
template<> struct FourthDensePt<-1, 0, 0> { enum { idx = 12 }; };
template<> struct FourthDensePt< 1, 0, 0> { enum { idx = 13 }; };
template<> struct FourthDensePt< 2, 0, 0> { enum { idx = 14 }; };
template<> struct FourthDensePt<-2,-1, 0> { enum { idx = 15 }; };
template<> struct FourthDensePt<-1,-1, 0> { enum { idx = 16 }; };
template<> struct FourthDensePt< 0,-1, 0> { enum { idx = 17 }; };
template<> struct FourthDensePt< 1,-1, 0> { enum { idx = 18 }; };
template<> struct FourthDensePt< 2,-1, 0> { enum { idx = 19 }; };
template<> struct FourthDensePt<-2,-2, 0> { enum { idx = 20 }; };
template<> struct FourthDensePt<-1,-2, 0> { enum { idx = 21 }; };
template<> struct FourthDensePt< 0,-2, 0> { enum { idx = 22 }; };
template<> struct FourthDensePt< 1,-2, 0> { enum { idx = 23 }; };
template<> struct FourthDensePt< 2,-2, 0> { enum { idx = 24 }; };
template<> struct FourthDensePt<-2, 0, 2> { enum { idx = 25 }; };
template<> struct FourthDensePt<-1, 0, 2> { enum { idx = 26 }; };
template<> struct FourthDensePt< 0, 0, 2> { enum { idx = 27 }; };
template<> struct FourthDensePt< 1, 0, 2> { enum { idx = 28 }; };
template<> struct FourthDensePt< 2, 0, 2> { enum { idx = 29 }; };
template<> struct FourthDensePt<-2, 0, 1> { enum { idx = 30 }; };
template<> struct FourthDensePt<-1, 0, 1> { enum { idx = 31 }; };
template<> struct FourthDensePt< 0, 0, 1> { enum { idx = 32 }; };
template<> struct FourthDensePt< 1, 0, 1> { enum { idx = 33 }; };
template<> struct FourthDensePt< 2, 0, 1> { enum { idx = 34 }; };
template<> struct FourthDensePt<-2, 0,-1> { enum { idx = 35 }; };
template<> struct FourthDensePt<-1, 0,-1> { enum { idx = 36 }; };
template<> struct FourthDensePt< 0, 0,-1> { enum { idx = 37 }; };
template<> struct FourthDensePt< 1, 0,-1> { enum { idx = 38 }; };
template<> struct FourthDensePt< 2, 0,-1> { enum { idx = 39 }; };
template<> struct FourthDensePt<-2, 0,-2> { enum { idx = 40 }; };
template<> struct FourthDensePt<-1, 0,-2> { enum { idx = 41 }; };
template<> struct FourthDensePt< 0, 0,-2> { enum { idx = 42 }; };
template<> struct FourthDensePt< 1, 0,-2> { enum { idx = 43 }; };
template<> struct FourthDensePt< 2, 0,-2> { enum { idx = 44 }; };
template<> struct FourthDensePt< 0,-2, 2> { enum { idx = 45 }; };
template<> struct FourthDensePt< 0,-1, 2> { enum { idx = 46 }; };
template<> struct FourthDensePt< 0, 1, 2> { enum { idx = 47 }; };
template<> struct FourthDensePt< 0, 2, 2> { enum { idx = 48 }; };
template<> struct FourthDensePt< 0,-2, 1> { enum { idx = 49 }; };
template<> struct FourthDensePt< 0,-1, 1> { enum { idx = 50 }; };
template<> struct FourthDensePt< 0, 1, 1> { enum { idx = 51 }; };
template<> struct FourthDensePt< 0, 2, 1> { enum { idx = 52 }; };
template<> struct FourthDensePt< 0,-2,-1> { enum { idx = 53 }; };
template<> struct FourthDensePt< 0,-1,-1> { enum { idx = 54 }; };
template<> struct FourthDensePt< 0, 1,-1> { enum { idx = 55 }; };
template<> struct FourthDensePt< 0, 2,-1> { enum { idx = 56 }; };
template<> struct FourthDensePt< 0,-2,-2> { enum { idx = 57 }; };
template<> struct FourthDensePt< 0,-1,-2> { enum { idx = 58 }; };
template<> struct FourthDensePt< 0, 1,-2> { enum { idx = 59 }; };
template<> struct FourthDensePt< 0, 2,-2> { enum { idx = 60 }; };
}
template<typename GridT, bool IsSafe = true>
class FourthOrderDenseStencil
: public BaseStencil<FourthOrderDenseStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef FourthOrderDenseStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 61;
FourthOrderDenseStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return FourthDensePt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
mValues[FourthDensePt<-2, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 2, 0));
mValues[FourthDensePt<-1, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 2, 0));
mValues[FourthDensePt< 0, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 0));
mValues[FourthDensePt< 1, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 2, 0));
mValues[FourthDensePt< 2, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 2, 0));
mValues[FourthDensePt<-2, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 1, 0));
mValues[FourthDensePt<-1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 1, 0));
mValues[FourthDensePt< 0, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[FourthDensePt< 1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 1, 0));
mValues[FourthDensePt< 2, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 1, 0));
mValues[FourthDensePt<-2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 0));
mValues[FourthDensePt<-1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[FourthDensePt< 1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[FourthDensePt< 2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 0));
mValues[FourthDensePt<-2,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2,-1, 0));
mValues[FourthDensePt<-1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1,-1, 0));
mValues[FourthDensePt< 0,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 0));
mValues[FourthDensePt< 1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1,-1, 0));
mValues[FourthDensePt< 2,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2,-1, 0));
mValues[FourthDensePt<-2,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2,-2, 0));
mValues[FourthDensePt<-1,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1,-2, 0));
mValues[FourthDensePt< 0,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 0));
mValues[FourthDensePt< 1,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1,-2, 0));
mValues[FourthDensePt< 2,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2,-2, 0));
mValues[FourthDensePt<-2, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 2));
mValues[FourthDensePt<-1, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 2));
mValues[FourthDensePt< 0, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 2));
mValues[FourthDensePt< 1, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 2));
mValues[FourthDensePt< 2, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 2));
mValues[FourthDensePt<-2, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 1));
mValues[FourthDensePt<-1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 1));
mValues[FourthDensePt< 0, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[FourthDensePt< 1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 1));
mValues[FourthDensePt< 2, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 1));
mValues[FourthDensePt<-2, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0,-1));
mValues[FourthDensePt<-1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0,-1));
mValues[FourthDensePt< 0, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0,-1));
mValues[FourthDensePt< 1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0,-1));
mValues[FourthDensePt< 2, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0,-1));
mValues[FourthDensePt<-2, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0,-2));
mValues[FourthDensePt<-1, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0,-2));
mValues[FourthDensePt< 0, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0,-2));
mValues[FourthDensePt< 1, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0,-2));
mValues[FourthDensePt< 2, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0,-2));
mValues[FourthDensePt< 0,-2, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 2));
mValues[FourthDensePt< 0,-1, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 2));
mValues[FourthDensePt< 0, 1, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 2));
mValues[FourthDensePt< 0, 2, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 2));
mValues[FourthDensePt< 0,-2, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 1));
mValues[FourthDensePt< 0,-1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 1));
mValues[FourthDensePt< 0, 1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 1));
mValues[FourthDensePt< 0, 2, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 1));
mValues[FourthDensePt< 0,-2,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2,-1));
mValues[FourthDensePt< 0,-1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1,-1));
mValues[FourthDensePt< 0, 1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1,-1));
mValues[FourthDensePt< 0, 2,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2,-1));
mValues[FourthDensePt< 0,-2,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2,-2));
mValues[FourthDensePt< 0,-1,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1,-2));
mValues[FourthDensePt< 0, 1,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1,-2));
mValues[FourthDensePt< 0, 2,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2,-2));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// FourthOrderDenseStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the dense point stencil
template<int i, int j, int k> struct NineteenPt {};
template<> struct NineteenPt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct NineteenPt< 1, 0, 0> { enum { idx = 1 }; };
template<> struct NineteenPt< 0, 1, 0> { enum { idx = 2 }; };
template<> struct NineteenPt< 0, 0, 1> { enum { idx = 3 }; };
template<> struct NineteenPt<-1, 0, 0> { enum { idx = 4 }; };
template<> struct NineteenPt< 0,-1, 0> { enum { idx = 5 }; };
template<> struct NineteenPt< 0, 0,-1> { enum { idx = 6 }; };
template<> struct NineteenPt< 2, 0, 0> { enum { idx = 7 }; };
template<> struct NineteenPt< 0, 2, 0> { enum { idx = 8 }; };
template<> struct NineteenPt< 0, 0, 2> { enum { idx = 9 }; };
template<> struct NineteenPt<-2, 0, 0> { enum { idx = 10 }; };
template<> struct NineteenPt< 0,-2, 0> { enum { idx = 11 }; };
template<> struct NineteenPt< 0, 0,-2> { enum { idx = 12 }; };
template<> struct NineteenPt< 3, 0, 0> { enum { idx = 13 }; };
template<> struct NineteenPt< 0, 3, 0> { enum { idx = 14 }; };
template<> struct NineteenPt< 0, 0, 3> { enum { idx = 15 }; };
template<> struct NineteenPt<-3, 0, 0> { enum { idx = 16 }; };
template<> struct NineteenPt< 0,-3, 0> { enum { idx = 17 }; };
template<> struct NineteenPt< 0, 0,-3> { enum { idx = 18 }; };
}
template<typename GridT, bool IsSafe = true>
class NineteenPointStencil
: public BaseStencil<NineteenPointStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef NineteenPointStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 19;
NineteenPointStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return NineteenPt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
mValues[NineteenPt< 3, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0, 0));
mValues[NineteenPt< 2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 0));
mValues[NineteenPt< 1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[NineteenPt<-1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[NineteenPt<-2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 0));
mValues[NineteenPt<-3, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0, 0));
mValues[NineteenPt< 0, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3, 0));
mValues[NineteenPt< 0, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 0));
mValues[NineteenPt< 0, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[NineteenPt< 0,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -1, 0));
mValues[NineteenPt< 0,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -2, 0));
mValues[NineteenPt< 0,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, -3, 0));
mValues[NineteenPt< 0, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 3));
mValues[NineteenPt< 0, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 2));
mValues[NineteenPt< 0, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[NineteenPt< 0, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -1));
mValues[NineteenPt< 0, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -2));
mValues[NineteenPt< 0, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, -3));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// NineteenPointStencil class
////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the 4th-order dense point stencil
template<int i, int j, int k> struct SixthDensePt { };
template<> struct SixthDensePt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct SixthDensePt<-3, 3, 0> { enum { idx = 1 }; };
template<> struct SixthDensePt<-2, 3, 0> { enum { idx = 2 }; };
template<> struct SixthDensePt<-1, 3, 0> { enum { idx = 3 }; };
template<> struct SixthDensePt< 0, 3, 0> { enum { idx = 4 }; };
template<> struct SixthDensePt< 1, 3, 0> { enum { idx = 5 }; };
template<> struct SixthDensePt< 2, 3, 0> { enum { idx = 6 }; };
template<> struct SixthDensePt< 3, 3, 0> { enum { idx = 7 }; };
template<> struct SixthDensePt<-3, 2, 0> { enum { idx = 8 }; };
template<> struct SixthDensePt<-2, 2, 0> { enum { idx = 9 }; };
template<> struct SixthDensePt<-1, 2, 0> { enum { idx = 10 }; };
template<> struct SixthDensePt< 0, 2, 0> { enum { idx = 11 }; };
template<> struct SixthDensePt< 1, 2, 0> { enum { idx = 12 }; };
template<> struct SixthDensePt< 2, 2, 0> { enum { idx = 13 }; };
template<> struct SixthDensePt< 3, 2, 0> { enum { idx = 14 }; };
template<> struct SixthDensePt<-3, 1, 0> { enum { idx = 15 }; };
template<> struct SixthDensePt<-2, 1, 0> { enum { idx = 16 }; };
template<> struct SixthDensePt<-1, 1, 0> { enum { idx = 17 }; };
template<> struct SixthDensePt< 0, 1, 0> { enum { idx = 18 }; };
template<> struct SixthDensePt< 1, 1, 0> { enum { idx = 19 }; };
template<> struct SixthDensePt< 2, 1, 0> { enum { idx = 20 }; };
template<> struct SixthDensePt< 3, 1, 0> { enum { idx = 21 }; };
template<> struct SixthDensePt<-3, 0, 0> { enum { idx = 22 }; };
template<> struct SixthDensePt<-2, 0, 0> { enum { idx = 23 }; };
template<> struct SixthDensePt<-1, 0, 0> { enum { idx = 24 }; };
template<> struct SixthDensePt< 1, 0, 0> { enum { idx = 25 }; };
template<> struct SixthDensePt< 2, 0, 0> { enum { idx = 26 }; };
template<> struct SixthDensePt< 3, 0, 0> { enum { idx = 27 }; };
template<> struct SixthDensePt<-3,-1, 0> { enum { idx = 28 }; };
template<> struct SixthDensePt<-2,-1, 0> { enum { idx = 29 }; };
template<> struct SixthDensePt<-1,-1, 0> { enum { idx = 30 }; };
template<> struct SixthDensePt< 0,-1, 0> { enum { idx = 31 }; };
template<> struct SixthDensePt< 1,-1, 0> { enum { idx = 32 }; };
template<> struct SixthDensePt< 2,-1, 0> { enum { idx = 33 }; };
template<> struct SixthDensePt< 3,-1, 0> { enum { idx = 34 }; };
template<> struct SixthDensePt<-3,-2, 0> { enum { idx = 35 }; };
template<> struct SixthDensePt<-2,-2, 0> { enum { idx = 36 }; };
template<> struct SixthDensePt<-1,-2, 0> { enum { idx = 37 }; };
template<> struct SixthDensePt< 0,-2, 0> { enum { idx = 38 }; };
template<> struct SixthDensePt< 1,-2, 0> { enum { idx = 39 }; };
template<> struct SixthDensePt< 2,-2, 0> { enum { idx = 40 }; };
template<> struct SixthDensePt< 3,-2, 0> { enum { idx = 41 }; };
template<> struct SixthDensePt<-3,-3, 0> { enum { idx = 42 }; };
template<> struct SixthDensePt<-2,-3, 0> { enum { idx = 43 }; };
template<> struct SixthDensePt<-1,-3, 0> { enum { idx = 44 }; };
template<> struct SixthDensePt< 0,-3, 0> { enum { idx = 45 }; };
template<> struct SixthDensePt< 1,-3, 0> { enum { idx = 46 }; };
template<> struct SixthDensePt< 2,-3, 0> { enum { idx = 47 }; };
template<> struct SixthDensePt< 3,-3, 0> { enum { idx = 48 }; };
template<> struct SixthDensePt<-3, 0, 3> { enum { idx = 49 }; };
template<> struct SixthDensePt<-2, 0, 3> { enum { idx = 50 }; };
template<> struct SixthDensePt<-1, 0, 3> { enum { idx = 51 }; };
template<> struct SixthDensePt< 0, 0, 3> { enum { idx = 52 }; };
template<> struct SixthDensePt< 1, 0, 3> { enum { idx = 53 }; };
template<> struct SixthDensePt< 2, 0, 3> { enum { idx = 54 }; };
template<> struct SixthDensePt< 3, 0, 3> { enum { idx = 55 }; };
template<> struct SixthDensePt<-3, 0, 2> { enum { idx = 56 }; };
template<> struct SixthDensePt<-2, 0, 2> { enum { idx = 57 }; };
template<> struct SixthDensePt<-1, 0, 2> { enum { idx = 58 }; };
template<> struct SixthDensePt< 0, 0, 2> { enum { idx = 59 }; };
template<> struct SixthDensePt< 1, 0, 2> { enum { idx = 60 }; };
template<> struct SixthDensePt< 2, 0, 2> { enum { idx = 61 }; };
template<> struct SixthDensePt< 3, 0, 2> { enum { idx = 62 }; };
template<> struct SixthDensePt<-3, 0, 1> { enum { idx = 63 }; };
template<> struct SixthDensePt<-2, 0, 1> { enum { idx = 64 }; };
template<> struct SixthDensePt<-1, 0, 1> { enum { idx = 65 }; };
template<> struct SixthDensePt< 0, 0, 1> { enum { idx = 66 }; };
template<> struct SixthDensePt< 1, 0, 1> { enum { idx = 67 }; };
template<> struct SixthDensePt< 2, 0, 1> { enum { idx = 68 }; };
template<> struct SixthDensePt< 3, 0, 1> { enum { idx = 69 }; };
template<> struct SixthDensePt<-3, 0,-1> { enum { idx = 70 }; };
template<> struct SixthDensePt<-2, 0,-1> { enum { idx = 71 }; };
template<> struct SixthDensePt<-1, 0,-1> { enum { idx = 72 }; };
template<> struct SixthDensePt< 0, 0,-1> { enum { idx = 73 }; };
template<> struct SixthDensePt< 1, 0,-1> { enum { idx = 74 }; };
template<> struct SixthDensePt< 2, 0,-1> { enum { idx = 75 }; };
template<> struct SixthDensePt< 3, 0,-1> { enum { idx = 76 }; };
template<> struct SixthDensePt<-3, 0,-2> { enum { idx = 77 }; };
template<> struct SixthDensePt<-2, 0,-2> { enum { idx = 78 }; };
template<> struct SixthDensePt<-1, 0,-2> { enum { idx = 79 }; };
template<> struct SixthDensePt< 0, 0,-2> { enum { idx = 80 }; };
template<> struct SixthDensePt< 1, 0,-2> { enum { idx = 81 }; };
template<> struct SixthDensePt< 2, 0,-2> { enum { idx = 82 }; };
template<> struct SixthDensePt< 3, 0,-2> { enum { idx = 83 }; };
template<> struct SixthDensePt<-3, 0,-3> { enum { idx = 84 }; };
template<> struct SixthDensePt<-2, 0,-3> { enum { idx = 85 }; };
template<> struct SixthDensePt<-1, 0,-3> { enum { idx = 86 }; };
template<> struct SixthDensePt< 0, 0,-3> { enum { idx = 87 }; };
template<> struct SixthDensePt< 1, 0,-3> { enum { idx = 88 }; };
template<> struct SixthDensePt< 2, 0,-3> { enum { idx = 89 }; };
template<> struct SixthDensePt< 3, 0,-3> { enum { idx = 90 }; };
template<> struct SixthDensePt< 0,-3, 3> { enum { idx = 91 }; };
template<> struct SixthDensePt< 0,-2, 3> { enum { idx = 92 }; };
template<> struct SixthDensePt< 0,-1, 3> { enum { idx = 93 }; };
template<> struct SixthDensePt< 0, 1, 3> { enum { idx = 94 }; };
template<> struct SixthDensePt< 0, 2, 3> { enum { idx = 95 }; };
template<> struct SixthDensePt< 0, 3, 3> { enum { idx = 96 }; };
template<> struct SixthDensePt< 0,-3, 2> { enum { idx = 97 }; };
template<> struct SixthDensePt< 0,-2, 2> { enum { idx = 98 }; };
template<> struct SixthDensePt< 0,-1, 2> { enum { idx = 99 }; };
template<> struct SixthDensePt< 0, 1, 2> { enum { idx = 100 }; };
template<> struct SixthDensePt< 0, 2, 2> { enum { idx = 101 }; };
template<> struct SixthDensePt< 0, 3, 2> { enum { idx = 102 }; };
template<> struct SixthDensePt< 0,-3, 1> { enum { idx = 103 }; };
template<> struct SixthDensePt< 0,-2, 1> { enum { idx = 104 }; };
template<> struct SixthDensePt< 0,-1, 1> { enum { idx = 105 }; };
template<> struct SixthDensePt< 0, 1, 1> { enum { idx = 106 }; };
template<> struct SixthDensePt< 0, 2, 1> { enum { idx = 107 }; };
template<> struct SixthDensePt< 0, 3, 1> { enum { idx = 108 }; };
template<> struct SixthDensePt< 0,-3,-1> { enum { idx = 109 }; };
template<> struct SixthDensePt< 0,-2,-1> { enum { idx = 110 }; };
template<> struct SixthDensePt< 0,-1,-1> { enum { idx = 111 }; };
template<> struct SixthDensePt< 0, 1,-1> { enum { idx = 112 }; };
template<> struct SixthDensePt< 0, 2,-1> { enum { idx = 113 }; };
template<> struct SixthDensePt< 0, 3,-1> { enum { idx = 114 }; };
template<> struct SixthDensePt< 0,-3,-2> { enum { idx = 115 }; };
template<> struct SixthDensePt< 0,-2,-2> { enum { idx = 116 }; };
template<> struct SixthDensePt< 0,-1,-2> { enum { idx = 117 }; };
template<> struct SixthDensePt< 0, 1,-2> { enum { idx = 118 }; };
template<> struct SixthDensePt< 0, 2,-2> { enum { idx = 119 }; };
template<> struct SixthDensePt< 0, 3,-2> { enum { idx = 120 }; };
template<> struct SixthDensePt< 0,-3,-3> { enum { idx = 121 }; };
template<> struct SixthDensePt< 0,-2,-3> { enum { idx = 122 }; };
template<> struct SixthDensePt< 0,-1,-3> { enum { idx = 123 }; };
template<> struct SixthDensePt< 0, 1,-3> { enum { idx = 124 }; };
template<> struct SixthDensePt< 0, 2,-3> { enum { idx = 125 }; };
template<> struct SixthDensePt< 0, 3,-3> { enum { idx = 126 }; };
}
template<typename GridT, bool IsSafe = true>
class SixthOrderDenseStencil
: public BaseStencil<SixthOrderDenseStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef SixthOrderDenseStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 127;
SixthOrderDenseStencil(const GridType& grid): BaseType(grid, SIZE) {}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return SixthDensePt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
mValues[SixthDensePt<-3, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3, 3, 0));
mValues[SixthDensePt<-2, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 3, 0));
mValues[SixthDensePt<-1, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 3, 0));
mValues[SixthDensePt< 0, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3, 0));
mValues[SixthDensePt< 1, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 3, 0));
mValues[SixthDensePt< 2, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 3, 0));
mValues[SixthDensePt< 3, 3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3, 3, 0));
mValues[SixthDensePt<-3, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3, 2, 0));
mValues[SixthDensePt<-2, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 2, 0));
mValues[SixthDensePt<-1, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 2, 0));
mValues[SixthDensePt< 0, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 0));
mValues[SixthDensePt< 1, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 2, 0));
mValues[SixthDensePt< 2, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 2, 0));
mValues[SixthDensePt< 3, 2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3, 2, 0));
mValues[SixthDensePt<-3, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3, 1, 0));
mValues[SixthDensePt<-2, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 1, 0));
mValues[SixthDensePt<-1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 1, 0));
mValues[SixthDensePt< 0, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[SixthDensePt< 1, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 1, 0));
mValues[SixthDensePt< 2, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 1, 0));
mValues[SixthDensePt< 3, 1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3, 1, 0));
mValues[SixthDensePt<-3, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0, 0));
mValues[SixthDensePt<-2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 0));
mValues[SixthDensePt<-1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[SixthDensePt< 1, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[SixthDensePt< 2, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 0));
mValues[SixthDensePt< 3, 0, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0, 0));
mValues[SixthDensePt<-3,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3,-1, 0));
mValues[SixthDensePt<-2,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2,-1, 0));
mValues[SixthDensePt<-1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1,-1, 0));
mValues[SixthDensePt< 0,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 0));
mValues[SixthDensePt< 1,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1,-1, 0));
mValues[SixthDensePt< 2,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2,-1, 0));
mValues[SixthDensePt< 3,-1, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3,-1, 0));
mValues[SixthDensePt<-3,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3,-2, 0));
mValues[SixthDensePt<-2,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2,-2, 0));
mValues[SixthDensePt<-1,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1,-2, 0));
mValues[SixthDensePt< 0,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 0));
mValues[SixthDensePt< 1,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1,-2, 0));
mValues[SixthDensePt< 2,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2,-2, 0));
mValues[SixthDensePt< 3,-2, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3,-2, 0));
mValues[SixthDensePt<-3,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-3,-3, 0));
mValues[SixthDensePt<-2,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-2,-3, 0));
mValues[SixthDensePt<-1,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy(-1,-3, 0));
mValues[SixthDensePt< 0,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3, 0));
mValues[SixthDensePt< 1,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 1,-3, 0));
mValues[SixthDensePt< 2,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 2,-3, 0));
mValues[SixthDensePt< 3,-3, 0>::idx] = mAcc.getValue(ijk.offsetBy( 3,-3, 0));
mValues[SixthDensePt<-3, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0, 3));
mValues[SixthDensePt<-2, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 3));
mValues[SixthDensePt<-1, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 3));
mValues[SixthDensePt< 0, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 3));
mValues[SixthDensePt< 1, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 3));
mValues[SixthDensePt< 2, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 3));
mValues[SixthDensePt< 3, 0, 3>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0, 3));
mValues[SixthDensePt<-3, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0, 2));
mValues[SixthDensePt<-2, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 2));
mValues[SixthDensePt<-1, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 2));
mValues[SixthDensePt< 0, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 2));
mValues[SixthDensePt< 1, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 2));
mValues[SixthDensePt< 2, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 2));
mValues[SixthDensePt< 3, 0, 2>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0, 2));
mValues[SixthDensePt<-3, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0, 1));
mValues[SixthDensePt<-2, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0, 1));
mValues[SixthDensePt<-1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0, 1));
mValues[SixthDensePt< 0, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[SixthDensePt< 1, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0, 1));
mValues[SixthDensePt< 2, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0, 1));
mValues[SixthDensePt< 3, 0, 1>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0, 1));
mValues[SixthDensePt<-3, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0,-1));
mValues[SixthDensePt<-2, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0,-1));
mValues[SixthDensePt<-1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0,-1));
mValues[SixthDensePt< 0, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0,-1));
mValues[SixthDensePt< 1, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0,-1));
mValues[SixthDensePt< 2, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0,-1));
mValues[SixthDensePt< 3, 0,-1>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0,-1));
mValues[SixthDensePt<-3, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0,-2));
mValues[SixthDensePt<-2, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0,-2));
mValues[SixthDensePt<-1, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0,-2));
mValues[SixthDensePt< 0, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0,-2));
mValues[SixthDensePt< 1, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0,-2));
mValues[SixthDensePt< 2, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0,-2));
mValues[SixthDensePt< 3, 0,-2>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0,-2));
mValues[SixthDensePt<-3, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy(-3, 0,-3));
mValues[SixthDensePt<-2, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy(-2, 0,-3));
mValues[SixthDensePt<-1, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy(-1, 0,-3));
mValues[SixthDensePt< 0, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 0,-3));
mValues[SixthDensePt< 1, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy( 1, 0,-3));
mValues[SixthDensePt< 2, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy( 2, 0,-3));
mValues[SixthDensePt< 3, 0,-3>::idx] = mAcc.getValue(ijk.offsetBy( 3, 0,-3));
mValues[SixthDensePt< 0,-3, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3, 3));
mValues[SixthDensePt< 0,-2, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 3));
mValues[SixthDensePt< 0,-1, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 3));
mValues[SixthDensePt< 0, 1, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 3));
mValues[SixthDensePt< 0, 2, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 3));
mValues[SixthDensePt< 0, 3, 3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3, 3));
mValues[SixthDensePt< 0,-3, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3, 2));
mValues[SixthDensePt< 0,-2, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 2));
mValues[SixthDensePt< 0,-1, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 2));
mValues[SixthDensePt< 0, 1, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 2));
mValues[SixthDensePt< 0, 2, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 2));
mValues[SixthDensePt< 0, 3, 2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3, 2));
mValues[SixthDensePt< 0,-3, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3, 1));
mValues[SixthDensePt< 0,-2, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2, 1));
mValues[SixthDensePt< 0,-1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1, 1));
mValues[SixthDensePt< 0, 1, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1, 1));
mValues[SixthDensePt< 0, 2, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2, 1));
mValues[SixthDensePt< 0, 3, 1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3, 1));
mValues[SixthDensePt< 0,-3,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3,-1));
mValues[SixthDensePt< 0,-2,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2,-1));
mValues[SixthDensePt< 0,-1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1,-1));
mValues[SixthDensePt< 0, 1,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1,-1));
mValues[SixthDensePt< 0, 2,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2,-1));
mValues[SixthDensePt< 0, 3,-1>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3,-1));
mValues[SixthDensePt< 0,-3,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3,-2));
mValues[SixthDensePt< 0,-2,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2,-2));
mValues[SixthDensePt< 0,-1,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1,-2));
mValues[SixthDensePt< 0, 1,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1,-2));
mValues[SixthDensePt< 0, 2,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2,-2));
mValues[SixthDensePt< 0, 3,-2>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3,-2));
mValues[SixthDensePt< 0,-3,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-3,-3));
mValues[SixthDensePt< 0,-2,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-2,-3));
mValues[SixthDensePt< 0,-1,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0,-1,-3));
mValues[SixthDensePt< 0, 1,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 1,-3));
mValues[SixthDensePt< 0, 2,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 2,-3));
mValues[SixthDensePt< 0, 3,-3>::idx] = mAcc.getValue(ijk.offsetBy( 0, 3,-3));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
};// SixthOrderDenseStencil class
//////////////////////////////////////////////////////////////////////
namespace { // anonymous namespace for stencil-layout map
// the seven point stencil with a different layout from SevenPt
template<int i, int j, int k> struct GradPt {};
template<> struct GradPt< 0, 0, 0> { enum { idx = 0 }; };
template<> struct GradPt< 1, 0, 0> { enum { idx = 2 }; };
template<> struct GradPt< 0, 1, 0> { enum { idx = 4 }; };
template<> struct GradPt< 0, 0, 1> { enum { idx = 6 }; };
template<> struct GradPt<-1, 0, 0> { enum { idx = 1 }; };
template<> struct GradPt< 0,-1, 0> { enum { idx = 3 }; };
template<> struct GradPt< 0, 0,-1> { enum { idx = 5 }; };
}
/// This is a simple 7-point nearest neighbor stencil that supports
/// gradient by second-order central differencing, first-order upwinding,
/// Laplacian, closest-point transform and zero-crossing test.
///
/// @note For optimal random access performance this class
/// includes its own grid accessor.
template<typename GridT, bool IsSafe = true>
class GradStencil : public BaseStencil<GradStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef GradStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 7;
GradStencil(const GridType& grid)
: BaseType(grid, SIZE)
, mInv2Dx(ValueType(0.5 / grid.voxelSize()[0]))
, mInvDx2(ValueType(4.0 * mInv2Dx * mInv2Dx))
{
}
GradStencil(const GridType& grid, Real dx)
: BaseType(grid, SIZE)
, mInv2Dx(ValueType(0.5 / dx))
, mInvDx2(ValueType(4.0 * mInv2Dx * mInv2Dx))
{
}
/// @brief Return the norm square of the single-sided upwind gradient
/// (computed via Godunov's scheme) at the previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType normSqGrad() const
{
return mInvDx2 * math::GodunovsNormSqrd(mValues[0] > zeroVal<ValueType>(),
mValues[0] - mValues[1],
mValues[2] - mValues[0],
mValues[0] - mValues[3],
mValues[4] - mValues[0],
mValues[0] - mValues[5],
mValues[6] - mValues[0]);
}
/// @brief Return the gradient computed at the previously buffered
/// location by second order central differencing.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline math::Vec3<ValueType> gradient() const
{
return math::Vec3<ValueType>(mValues[2] - mValues[1],
mValues[4] - mValues[3],
mValues[6] - mValues[5])*mInv2Dx;
}
/// @brief Return the first-order upwind gradient corresponding to the direction V.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline math::Vec3<ValueType> gradient(const math::Vec3<ValueType>& V) const
{
return math::Vec3<ValueType>(
V[0]>0 ? mValues[0] - mValues[1] : mValues[2] - mValues[0],
V[1]>0 ? mValues[0] - mValues[3] : mValues[4] - mValues[0],
V[2]>0 ? mValues[0] - mValues[5] : mValues[6] - mValues[0])*2*mInv2Dx;
}
/// Return the Laplacian computed at the previously buffered
/// location by second-order central differencing.
inline ValueType laplacian() const
{
return mInvDx2 * (mValues[1] + mValues[2] +
mValues[3] + mValues[4] +
mValues[5] + mValues[6] - 6*mValues[0]);
}
/// Return @c true if the sign of the value at the center point of the stencil
/// is different from the signs of any of its six nearest neighbors.
inline bool zeroCrossing() const
{
const typename BaseType::BufferType& v = mValues;
return (v[0]>0 ? (v[1]<0 || v[2]<0 || v[3]<0 || v[4]<0 || v[5]<0 || v[6]<0)
: (v[1]>0 || v[2]>0 || v[3]>0 || v[4]>0 || v[5]>0 || v[6]>0));
}
/// @brief Compute the closest-point transform to a level set.
/// @return the closest point in index space to the surface
/// from which the level set was derived.
///
/// @note This method assumes that the grid represents a level set
/// with distances in world units and a simple affine transfrom
/// with uniform scaling.
inline math::Vec3<ValueType> cpt()
{
const Coord& ijk = BaseType::getCenterCoord();
const ValueType d = ValueType(mValues[0] * 0.5 * mInvDx2); // distance in voxels / (2dx^2)
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto value = math::Vec3<ValueType>( ijk[0] - d*(mValues[2] - mValues[1]),
ijk[1] - d*(mValues[4] - mValues[3]),
ijk[2] - d*(mValues[6] - mValues[5]));
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
/// Return linear offset for the specified stencil point relative to its center
template<int i, int j, int k>
unsigned int pos() const { return GradPt<i,j,k>::idx; }
private:
inline void init(const Coord& ijk)
{
BaseType::template setValue<-1, 0, 0>(mAcc.getValue(ijk.offsetBy(-1, 0, 0)));
BaseType::template setValue< 1, 0, 0>(mAcc.getValue(ijk.offsetBy( 1, 0, 0)));
BaseType::template setValue< 0,-1, 0>(mAcc.getValue(ijk.offsetBy( 0,-1, 0)));
BaseType::template setValue< 0, 1, 0>(mAcc.getValue(ijk.offsetBy( 0, 1, 0)));
BaseType::template setValue< 0, 0,-1>(mAcc.getValue(ijk.offsetBy( 0, 0,-1)));
BaseType::template setValue< 0, 0, 1>(mAcc.getValue(ijk.offsetBy( 0, 0, 1)));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
const ValueType mInv2Dx, mInvDx2;
}; // GradStencil class
////////////////////////////////////////
/// @brief This is a special 19-point stencil that supports optimal fifth-order WENO
/// upwinding, second-order central differencing, Laplacian, and zero-crossing test.
///
/// @note For optimal random access performance this class
/// includes its own grid accessor.
template<typename GridT, bool IsSafe = true>
class WenoStencil: public BaseStencil<WenoStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef WenoStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe > BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
static const int SIZE = 19;
WenoStencil(const GridType& grid)
: BaseType(grid, SIZE)
, mDx2(ValueType(math::Pow2(grid.voxelSize()[0])))
, mInv2Dx(ValueType(0.5 / grid.voxelSize()[0]))
, mInvDx2(ValueType(1.0 / mDx2))
{
}
WenoStencil(const GridType& grid, Real dx)
: BaseType(grid, SIZE)
, mDx2(ValueType(dx * dx))
, mInv2Dx(ValueType(0.5 / dx))
, mInvDx2(ValueType(1.0 / mDx2))
{
}
/// @brief Return the norm-square of the WENO upwind gradient (computed via
/// WENO upwinding and Godunov's scheme) at the previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType normSqGrad(const ValueType &isoValue = zeroVal<ValueType>()) const
{
const typename BaseType::BufferType& v = mValues;
#ifdef DWA_OPENVDB
// SSE optimized
const simd::Float4
v1(v[2]-v[1], v[ 8]-v[ 7], v[14]-v[13], 0),
v2(v[3]-v[2], v[ 9]-v[ 8], v[15]-v[14], 0),
v3(v[0]-v[3], v[ 0]-v[ 9], v[ 0]-v[15], 0),
v4(v[4]-v[0], v[10]-v[ 0], v[16]-v[ 0], 0),
v5(v[5]-v[4], v[11]-v[10], v[17]-v[16], 0),
v6(v[6]-v[5], v[12]-v[11], v[18]-v[17], 0),
dP_m = math::WENO5(v1, v2, v3, v4, v5, mDx2),
dP_p = math::WENO5(v6, v5, v4, v3, v2, mDx2);
return mInvDx2 * math::GodunovsNormSqrd(mValues[0] > isoValue, dP_m, dP_p);
#else
const Real
dP_xm = math::WENO5(v[ 2]-v[ 1],v[ 3]-v[ 2],v[ 0]-v[ 3],v[ 4]-v[ 0],v[ 5]-v[ 4],mDx2),
dP_xp = math::WENO5(v[ 6]-v[ 5],v[ 5]-v[ 4],v[ 4]-v[ 0],v[ 0]-v[ 3],v[ 3]-v[ 2],mDx2),
dP_ym = math::WENO5(v[ 8]-v[ 7],v[ 9]-v[ 8],v[ 0]-v[ 9],v[10]-v[ 0],v[11]-v[10],mDx2),
dP_yp = math::WENO5(v[12]-v[11],v[11]-v[10],v[10]-v[ 0],v[ 0]-v[ 9],v[ 9]-v[ 8],mDx2),
dP_zm = math::WENO5(v[14]-v[13],v[15]-v[14],v[ 0]-v[15],v[16]-v[ 0],v[17]-v[16],mDx2),
dP_zp = math::WENO5(v[18]-v[17],v[17]-v[16],v[16]-v[ 0],v[ 0]-v[15],v[15]-v[14],mDx2);
return static_cast<ValueType>(
mInvDx2*math::GodunovsNormSqrd(v[0]>isoValue, dP_xm, dP_xp, dP_ym, dP_yp, dP_zm, dP_zp));
#endif
}
/// Return the optimal fifth-order upwind gradient corresponding to the
/// direction V.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline math::Vec3<ValueType> gradient(const math::Vec3<ValueType>& V) const
{
const typename BaseType::BufferType& v = mValues;
return 2*mInv2Dx * math::Vec3<ValueType>(
V[0]>0 ? math::WENO5(v[ 2]-v[ 1],v[ 3]-v[ 2],v[ 0]-v[ 3], v[ 4]-v[ 0],v[ 5]-v[ 4],mDx2)
: math::WENO5(v[ 6]-v[ 5],v[ 5]-v[ 4],v[ 4]-v[ 0], v[ 0]-v[ 3],v[ 3]-v[ 2],mDx2),
V[1]>0 ? math::WENO5(v[ 8]-v[ 7],v[ 9]-v[ 8],v[ 0]-v[ 9], v[10]-v[ 0],v[11]-v[10],mDx2)
: math::WENO5(v[12]-v[11],v[11]-v[10],v[10]-v[ 0], v[ 0]-v[ 9],v[ 9]-v[ 8],mDx2),
V[2]>0 ? math::WENO5(v[14]-v[13],v[15]-v[14],v[ 0]-v[15], v[16]-v[ 0],v[17]-v[16],mDx2)
: math::WENO5(v[18]-v[17],v[17]-v[16],v[16]-v[ 0], v[ 0]-v[15],v[15]-v[14],mDx2));
}
/// Return the gradient computed at the previously buffered
/// location by second-order central differencing.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline math::Vec3<ValueType> gradient() const
{
return mInv2Dx * math::Vec3<ValueType>(mValues[ 4] - mValues[ 3],
mValues[10] - mValues[ 9],
mValues[16] - mValues[15]);
}
/// Return the Laplacian computed at the previously buffered
/// location by second-order central differencing.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType laplacian() const
{
return mInvDx2 * (
mValues[ 3] + mValues[ 4] +
mValues[ 9] + mValues[10] +
mValues[15] + mValues[16] - 6*mValues[0]);
}
/// Return @c true if the sign of the value at the center point of the stencil
/// differs from the sign of any of its six nearest neighbors
inline bool zeroCrossing() const
{
const typename BaseType::BufferType& v = mValues;
return (v[ 0]>0 ? (v[ 3]<0 || v[ 4]<0 || v[ 9]<0 || v[10]<0 || v[15]<0 || v[16]<0)
: (v[ 3]>0 || v[ 4]>0 || v[ 9]>0 || v[10]>0 || v[15]>0 || v[16]>0));
}
private:
inline void init(const Coord& ijk)
{
mValues[ 1] = mAcc.getValue(ijk.offsetBy(-3, 0, 0));
mValues[ 2] = mAcc.getValue(ijk.offsetBy(-2, 0, 0));
mValues[ 3] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[ 4] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[ 5] = mAcc.getValue(ijk.offsetBy( 2, 0, 0));
mValues[ 6] = mAcc.getValue(ijk.offsetBy( 3, 0, 0));
mValues[ 7] = mAcc.getValue(ijk.offsetBy( 0, -3, 0));
mValues[ 8] = mAcc.getValue(ijk.offsetBy( 0, -2, 0));
mValues[ 9] = mAcc.getValue(ijk.offsetBy( 0, -1, 0));
mValues[10] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[11] = mAcc.getValue(ijk.offsetBy( 0, 2, 0));
mValues[12] = mAcc.getValue(ijk.offsetBy( 0, 3, 0));
mValues[13] = mAcc.getValue(ijk.offsetBy( 0, 0, -3));
mValues[14] = mAcc.getValue(ijk.offsetBy( 0, 0, -2));
mValues[15] = mAcc.getValue(ijk.offsetBy( 0, 0, -1));
mValues[16] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[17] = mAcc.getValue(ijk.offsetBy( 0, 0, 2));
mValues[18] = mAcc.getValue(ijk.offsetBy( 0, 0, 3));
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
const ValueType mDx2, mInv2Dx, mInvDx2;
}; // WenoStencil class
//////////////////////////////////////////////////////////////////////
template<typename GridT, bool IsSafe = true>
class CurvatureStencil: public BaseStencil<CurvatureStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef CurvatureStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe> BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridT::ValueType ValueType;
static const int SIZE = 19;
CurvatureStencil(const GridType& grid)
: BaseType(grid, SIZE)
, mInv2Dx(ValueType(0.5 / grid.voxelSize()[0]))
, mInvDx2(ValueType(4.0 * mInv2Dx * mInv2Dx))
{
}
CurvatureStencil(const GridType& grid, Real dx)
: BaseType(grid, SIZE)
, mInv2Dx(ValueType(0.5 / dx))
, mInvDx2(ValueType(4.0 * mInv2Dx * mInv2Dx))
{
}
/// @brief Return the mean curvature at the previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType meanCurvature() const
{
Real alpha, normGrad;
return this->meanCurvature(alpha, normGrad) ?
ValueType(alpha*mInv2Dx/math::Pow3(normGrad)) : 0;
}
/// @brief Return the Gaussian curvature at the previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType gaussianCurvature() const
{
Real alpha, normGrad;
return this->gaussianCurvature(alpha, normGrad) ?
ValueType(alpha*mInvDx2/math::Pow4(normGrad)) : 0;
}
/// @brief Return both the mean and the Gaussian curvature at the
/// previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline void curvatures(ValueType &mean, ValueType& gauss) const
{
Real alphaM, alphaG, normGrad;
if (this->curvatures(alphaM, alphaG, normGrad)) {
mean = ValueType(alphaM*mInv2Dx/math::Pow3(normGrad));
gauss = ValueType(alphaG*mInvDx2/math::Pow4(normGrad));
} else {
mean = gauss = 0;
}
}
/// Return the mean curvature multiplied by the norm of the
/// central-difference gradient. This method is very useful for
/// mean-curvature flow of level sets!
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType meanCurvatureNormGrad() const
{
Real alpha, normGrad;
return this->meanCurvature(alpha, normGrad) ?
ValueType(alpha*mInvDx2/(2*math::Pow2(normGrad))) : 0;
}
/// Return the mean Gaussian multiplied by the norm of the
/// central-difference gradient.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType gaussianCurvatureNormGrad() const
{
Real alpha, normGrad;
return this->gaussianCurvature(alpha, normGrad) ?
ValueType(2*alpha*mInv2Dx*mInvDx2/math::Pow3(normGrad)) : 0;
}
/// @brief Return both the mean and the Gaussian curvature at the
/// previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline void curvaturesNormGrad(ValueType &mean, ValueType& gauss) const
{
Real alphaM, alphaG, normGrad;
if (this->curvatures(alphaM, alphaG, normGrad)) {
mean = ValueType(alphaM*mInvDx2/(2*math::Pow2(normGrad)));
gauss = ValueType(2*alphaG*mInv2Dx*mInvDx2/math::Pow3(normGrad));
} else {
mean = gauss = 0;
}
}
/// @brief Return the pair (minimum, maximum) principal curvature at the
/// previously buffered location.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline std::pair<ValueType, ValueType> principalCurvatures() const
{
std::pair<ValueType, ValueType> pair(0, 0);// min, max
Real alphaM, alphaG, normGrad;
if (this->curvatures(alphaM, alphaG, normGrad)) {
const Real mean = alphaM*mInv2Dx/math::Pow3(normGrad);
const Real tmp = std::sqrt(mean*mean - alphaG*mInvDx2/math::Pow4(normGrad));
pair.first = ValueType(mean - tmp);
pair.second = ValueType(mean + tmp);
}
return pair;// min, max
}
/// Return the Laplacian computed at the previously buffered
/// location by second-order central differencing.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline ValueType laplacian() const
{
return mInvDx2 * (
mValues[1] + mValues[2] +
mValues[3] + mValues[4] +
mValues[5] + mValues[6] - 6*mValues[0]);
}
/// Return the gradient computed at the previously buffered
/// location by second-order central differencing.
///
/// @note This method should not be called until the stencil
/// buffer has been populated via a call to moveTo(ijk).
inline math::Vec3<ValueType> gradient() const
{
return math::Vec3<ValueType>(
mValues[2] - mValues[1],
mValues[4] - mValues[3],
mValues[6] - mValues[5])*mInv2Dx;
}
private:
inline void init(const Coord &ijk)
{
mValues[ 1] = mAcc.getValue(ijk.offsetBy(-1, 0, 0));
mValues[ 2] = mAcc.getValue(ijk.offsetBy( 1, 0, 0));
mValues[ 3] = mAcc.getValue(ijk.offsetBy( 0, -1, 0));
mValues[ 4] = mAcc.getValue(ijk.offsetBy( 0, 1, 0));
mValues[ 5] = mAcc.getValue(ijk.offsetBy( 0, 0, -1));
mValues[ 6] = mAcc.getValue(ijk.offsetBy( 0, 0, 1));
mValues[ 7] = mAcc.getValue(ijk.offsetBy(-1, -1, 0));
mValues[ 8] = mAcc.getValue(ijk.offsetBy( 1, -1, 0));
mValues[ 9] = mAcc.getValue(ijk.offsetBy(-1, 1, 0));
mValues[10] = mAcc.getValue(ijk.offsetBy( 1, 1, 0));
mValues[11] = mAcc.getValue(ijk.offsetBy(-1, 0, -1));
mValues[12] = mAcc.getValue(ijk.offsetBy( 1, 0, -1));
mValues[13] = mAcc.getValue(ijk.offsetBy(-1, 0, 1));
mValues[14] = mAcc.getValue(ijk.offsetBy( 1, 0, 1));
mValues[15] = mAcc.getValue(ijk.offsetBy( 0, -1, -1));
mValues[16] = mAcc.getValue(ijk.offsetBy( 0, 1, -1));
mValues[17] = mAcc.getValue(ijk.offsetBy( 0, -1, 1));
mValues[18] = mAcc.getValue(ijk.offsetBy( 0, 1, 1));
}
inline Real Dx() const { return 0.5*(mValues[2] - mValues[1]); }// * 1/dx
inline Real Dy() const { return 0.5*(mValues[4] - mValues[3]); }// * 1/dx
inline Real Dz() const { return 0.5*(mValues[6] - mValues[5]); }// * 1/dx
inline Real Dxx() const { return mValues[2] - 2 * mValues[0] + mValues[1]; }// * 1/dx2
inline Real Dyy() const { return mValues[4] - 2 * mValues[0] + mValues[3]; }// * 1/dx2}
inline Real Dzz() const { return mValues[6] - 2 * mValues[0] + mValues[5]; }// * 1/dx2
inline Real Dxy() const { return 0.25 * (mValues[10] - mValues[ 8] + mValues[ 7] - mValues[ 9]); }// * 1/dx2
inline Real Dxz() const { return 0.25 * (mValues[14] - mValues[12] + mValues[11] - mValues[13]); }// * 1/dx2
inline Real Dyz() const { return 0.25 * (mValues[18] - mValues[16] + mValues[15] - mValues[17]); }// * 1/dx2
inline bool meanCurvature(Real& alpha, Real& normGrad) const
{
// For performance all finite differences are unscaled wrt dx
const Real Dx = this->Dx(), Dy = this->Dy(), Dz = this->Dz(),
Dx2 = Dx*Dx, Dy2 = Dy*Dy, Dz2 = Dz*Dz, normGrad2 = Dx2 + Dy2 + Dz2;
if (normGrad2 <= math::Tolerance<Real>::value()) {
alpha = normGrad = 0;
return false;
}
const Real Dxx = this->Dxx(), Dyy = this->Dyy(), Dzz = this->Dzz();
alpha = Dx2*(Dyy + Dzz) + Dy2*(Dxx + Dzz) + Dz2*(Dxx + Dyy) -
2*(Dx*(Dy*this->Dxy() + Dz*this->Dxz()) + Dy*Dz*this->Dyz());// * 1/dx^4
normGrad = std::sqrt(normGrad2); // * 1/dx
return true;
}
inline bool gaussianCurvature(Real& alpha, Real& normGrad) const
{
// For performance all finite differences are unscaled wrt dx
const Real Dx = this->Dx(), Dy = this->Dy(), Dz = this->Dz(),
Dx2 = Dx*Dx, Dy2 = Dy*Dy, Dz2 = Dz*Dz, normGrad2 = Dx2 + Dy2 + Dz2;
if (normGrad2 <= math::Tolerance<Real>::value()) {
alpha = normGrad = 0;
return false;
}
const Real Dxx = this->Dxx(), Dyy = this->Dyy(), Dzz = this->Dzz(),
Dxy = this->Dxy(), Dxz = this->Dxz(), Dyz = this->Dyz();
alpha = Dx2*(Dyy*Dzz - Dyz*Dyz) + Dy2*(Dxx*Dzz - Dxz*Dxz) + Dz2*(Dxx*Dyy - Dxy*Dxy) +
2*( Dy*Dz*(Dxy*Dxz - Dyz*Dxx) + Dx*Dz*(Dxy*Dyz - Dxz*Dyy) + Dx*Dy*(Dxz*Dyz - Dxy*Dzz) );// * 1/dx^6
normGrad = std::sqrt(normGrad2); // * 1/dx
return true;
}
inline bool curvatures(Real& alphaM, Real& alphaG, Real& normGrad) const
{
// For performance all finite differences are unscaled wrt dx
const Real Dx = this->Dx(), Dy = this->Dy(), Dz = this->Dz(),
Dx2 = Dx*Dx, Dy2 = Dy*Dy, Dz2 = Dz*Dz, normGrad2 = Dx2 + Dy2 + Dz2;
if (normGrad2 <= math::Tolerance<Real>::value()) {
alphaM = alphaG =normGrad = 0;
return false;
}
const Real Dxx = this->Dxx(), Dyy = this->Dyy(), Dzz = this->Dzz(),
Dxy = this->Dxy(), Dxz = this->Dxz(), Dyz = this->Dyz();
alphaM = Dx2*(Dyy + Dzz) + Dy2*(Dxx + Dzz) + Dz2*(Dxx + Dyy) -
2*(Dx*(Dy*Dxy + Dz*Dxz) + Dy*Dz*Dyz);// *1/dx^4
alphaG = Dx2*(Dyy*Dzz - Dyz*Dyz) + Dy2*(Dxx*Dzz - Dxz*Dxz) + Dz2*(Dxx*Dyy - Dxy*Dxy) +
2*( Dy*Dz*(Dxy*Dxz - Dyz*Dxx) + Dx*Dz*(Dxy*Dyz - Dxz*Dyy) + Dx*Dy*(Dxz*Dyz - Dxy*Dzz) );// *1/dx^6
normGrad = std::sqrt(normGrad2); // * 1/dx
return true;
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
const ValueType mInv2Dx, mInvDx2;
}; // CurvatureStencil class
//////////////////////////////////////////////////////////////////////
/// @brief Dense stencil of a given width
template<typename GridT, bool IsSafe = true>
class DenseStencil: public BaseStencil<DenseStencil<GridT, IsSafe>, GridT, IsSafe>
{
typedef DenseStencil<GridT, IsSafe> SelfT;
typedef BaseStencil<SelfT, GridT, IsSafe> BaseType;
public:
typedef GridT GridType;
typedef typename GridT::TreeType TreeType;
typedef typename GridType::ValueType ValueType;
DenseStencil(const GridType& grid, int halfWidth)
: BaseType(grid, /*size=*/math::Pow3(2 * halfWidth + 1))
, mHalfWidth(halfWidth)
{
assert(halfWidth>0);
}
inline const ValueType& getCenterValue() const { return mValues[(mValues.size()-1)>>1]; }
/// @brief Initialize the stencil buffer with the values of voxel (x, y, z)
/// and its neighbors.
inline void moveTo(const Coord& ijk)
{
BaseType::mCenter = ijk;
this->init(ijk);
}
/// @brief Initialize the stencil buffer with the values of voxel
/// (x, y, z) and its neighbors.
template<typename IterType>
inline void moveTo(const IterType& iter)
{
BaseType::mCenter = iter.getCoord();
this->init(BaseType::mCenter);
}
private:
/// Initialize the stencil buffer centered at (i, j, k).
/// @warning The center point is NOT at mValues[0] for this DenseStencil!
inline void init(const Coord& ijk)
{
int n = 0;
for (Coord p=ijk.offsetBy(-mHalfWidth), q=ijk.offsetBy(mHalfWidth); p[0] <= q[0]; ++p[0]) {
for (p[1] = ijk[1]-mHalfWidth; p[1] <= q[1]; ++p[1]) {
for (p[2] = ijk[2]-mHalfWidth; p[2] <= q[2]; ++p[2]) {
mValues[n++] = mAcc.getValue(p);
}
}
}
}
template<typename, typename, bool> friend class BaseStencil; // allow base class to call init()
using BaseType::mAcc;
using BaseType::mValues;
const int mHalfWidth;
};// DenseStencil class
} // end math namespace
} // namespace OPENVDB_VERSION_NAME
} // end openvdb namespace
#endif // OPENVDB_MATH_STENCILS_HAS_BEEN_INCLUDED
| 86,783 | C | 46.631175 | 115 | 0.580091 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/FiniteDifference.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file math/FiniteDifference.h
#ifndef OPENVDB_MATH_FINITEDIFFERENCE_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_FINITEDIFFERENCE_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include "Math.h"
#include "Coord.h"
#include "Vec3.h"
#include <string>
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/trim.hpp>
#ifdef DWA_OPENVDB
#include <simd/Simd.h>
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
////////////////////////////////////////
/// @brief Different discrete schemes used in the first derivatives.
// Add new items to the *end* of this list, and update NUM_DS_SCHEMES.
enum DScheme {
UNKNOWN_DS = -1,
CD_2NDT = 0, // center difference, 2nd order, but the result must be divided by 2
CD_2ND, // center difference, 2nd order
CD_4TH, // center difference, 4th order
CD_6TH, // center difference, 6th order
FD_1ST, // forward difference, 1st order
FD_2ND, // forward difference, 2nd order
FD_3RD, // forward difference, 3rd order
BD_1ST, // backward difference, 1st order
BD_2ND, // backward difference, 2nd order
BD_3RD, // backward difference, 3rd order
FD_WENO5, // forward difference, weno5
BD_WENO5, // backward difference, weno5
FD_HJWENO5, // forward differene, HJ-weno5
BD_HJWENO5 // backward difference, HJ-weno5
};
enum { NUM_DS_SCHEMES = BD_HJWENO5 + 1 };
inline std::string
dsSchemeToString(DScheme dss)
{
std::string ret;
switch (dss) {
case UNKNOWN_DS: ret = "unknown_ds"; break;
case CD_2NDT: ret = "cd_2ndt"; break;
case CD_2ND: ret = "cd_2nd"; break;
case CD_4TH: ret = "cd_4th"; break;
case CD_6TH: ret = "cd_6th"; break;
case FD_1ST: ret = "fd_1st"; break;
case FD_2ND: ret = "fd_2nd"; break;
case FD_3RD: ret = "fd_3rd"; break;
case BD_1ST: ret = "bd_1st"; break;
case BD_2ND: ret = "bd_2nd"; break;
case BD_3RD: ret = "bd_3rd"; break;
case FD_WENO5: ret = "fd_weno5"; break;
case BD_WENO5: ret = "bd_weno5"; break;
case FD_HJWENO5: ret = "fd_hjweno5"; break;
case BD_HJWENO5: ret = "bd_hjweno5"; break;
}
return ret;
}
inline DScheme
stringToDScheme(const std::string& s)
{
DScheme ret = UNKNOWN_DS;
std::string str = s;
boost::trim(str);
boost::to_lower(str);
if (str == dsSchemeToString(CD_2NDT)) {
ret = CD_2NDT;
} else if (str == dsSchemeToString(CD_2ND)) {
ret = CD_2ND;
} else if (str == dsSchemeToString(CD_4TH)) {
ret = CD_4TH;
} else if (str == dsSchemeToString(CD_6TH)) {
ret = CD_6TH;
} else if (str == dsSchemeToString(FD_1ST)) {
ret = FD_1ST;
} else if (str == dsSchemeToString(FD_2ND)) {
ret = FD_2ND;
} else if (str == dsSchemeToString(FD_3RD)) {
ret = FD_3RD;
} else if (str == dsSchemeToString(BD_1ST)) {
ret = BD_1ST;
} else if (str == dsSchemeToString(BD_2ND)) {
ret = BD_2ND;
} else if (str == dsSchemeToString(BD_3RD)) {
ret = BD_3RD;
} else if (str == dsSchemeToString(FD_WENO5)) {
ret = FD_WENO5;
} else if (str == dsSchemeToString(BD_WENO5)) {
ret = BD_WENO5;
} else if (str == dsSchemeToString(FD_HJWENO5)) {
ret = FD_HJWENO5;
} else if (str == dsSchemeToString(BD_HJWENO5)) {
ret = BD_HJWENO5;
}
return ret;
}
inline std::string
dsSchemeToMenuName(DScheme dss)
{
std::string ret;
switch (dss) {
case UNKNOWN_DS: ret = "Unknown DS scheme"; break;
case CD_2NDT: ret = "Twice 2nd-order center difference"; break;
case CD_2ND: ret = "2nd-order center difference"; break;
case CD_4TH: ret = "4th-order center difference"; break;
case CD_6TH: ret = "6th-order center difference"; break;
case FD_1ST: ret = "1st-order forward difference"; break;
case FD_2ND: ret = "2nd-order forward difference"; break;
case FD_3RD: ret = "3rd-order forward difference"; break;
case BD_1ST: ret = "1st-order backward difference"; break;
case BD_2ND: ret = "2nd-order backward difference"; break;
case BD_3RD: ret = "3rd-order backward difference"; break;
case FD_WENO5: ret = "5th-order WENO forward difference"; break;
case BD_WENO5: ret = "5th-order WENO backward difference"; break;
case FD_HJWENO5: ret = "5th-order HJ-WENO forward difference"; break;
case BD_HJWENO5: ret = "5th-order HJ-WENO backward difference"; break;
}
return ret;
}
////////////////////////////////////////
/// @brief Different discrete schemes used in the second derivatives.
// Add new items to the *end* of this list, and update NUM_DD_SCHEMES.
enum DDScheme {
UNKNOWN_DD = -1,
CD_SECOND = 0, // center difference, 2nd order
CD_FOURTH, // center difference, 4th order
CD_SIXTH // center difference, 6th order
};
enum { NUM_DD_SCHEMES = CD_SIXTH + 1 };
////////////////////////////////////////
/// @brief Biased Gradients are limited to non-centered differences
// Add new items to the *end* of this list, and update NUM_BIAS_SCHEMES.
enum BiasedGradientScheme {
UNKNOWN_BIAS = -1,
FIRST_BIAS = 0, // uses FD_1ST & BD_1ST
SECOND_BIAS, // uses FD_2ND & BD_2ND
THIRD_BIAS, // uses FD_3RD & BD_3RD
WENO5_BIAS, // uses WENO5
HJWENO5_BIAS // uses HJWENO5
};
enum { NUM_BIAS_SCHEMES = HJWENO5_BIAS + 1 };
inline std::string
biasedGradientSchemeToString(BiasedGradientScheme bgs)
{
std::string ret;
switch (bgs) {
case UNKNOWN_BIAS: ret = "unknown_bias"; break;
case FIRST_BIAS: ret = "first_bias"; break;
case SECOND_BIAS: ret = "second_bias"; break;
case THIRD_BIAS: ret = "third_bias"; break;
case WENO5_BIAS: ret = "weno5_bias"; break;
case HJWENO5_BIAS: ret = "hjweno5_bias"; break;
}
return ret;
}
inline BiasedGradientScheme
stringToBiasedGradientScheme(const std::string& s)
{
BiasedGradientScheme ret = UNKNOWN_BIAS;
std::string str = s;
boost::trim(str);
boost::to_lower(str);
if (str == biasedGradientSchemeToString(FIRST_BIAS)) {
ret = FIRST_BIAS;
} else if (str == biasedGradientSchemeToString(SECOND_BIAS)) {
ret = SECOND_BIAS;
} else if (str == biasedGradientSchemeToString(THIRD_BIAS)) {
ret = THIRD_BIAS;
} else if (str == biasedGradientSchemeToString(WENO5_BIAS)) {
ret = WENO5_BIAS;
} else if (str == biasedGradientSchemeToString(HJWENO5_BIAS)) {
ret = HJWENO5_BIAS;
}
return ret;
}
inline std::string
biasedGradientSchemeToMenuName(BiasedGradientScheme bgs)
{
std::string ret;
switch (bgs) {
case UNKNOWN_BIAS: ret = "Unknown biased gradient"; break;
case FIRST_BIAS: ret = "1st-order biased gradient"; break;
case SECOND_BIAS: ret = "2nd-order biased gradient"; break;
case THIRD_BIAS: ret = "3rd-order biased gradient"; break;
case WENO5_BIAS: ret = "5th-order WENO biased gradient"; break;
case HJWENO5_BIAS: ret = "5th-order HJ-WENO biased gradient"; break;
}
return ret;
}
////////////////////////////////////////
/// @brief Temporal integration schemes
// Add new items to the *end* of this list, and update NUM_TEMPORAL_SCHEMES.
enum TemporalIntegrationScheme {
UNKNOWN_TIS = -1,
TVD_RK1,//same as explicit Euler integration
TVD_RK2,
TVD_RK3
};
enum { NUM_TEMPORAL_SCHEMES = TVD_RK3 + 1 };
inline std::string
temporalIntegrationSchemeToString(TemporalIntegrationScheme tis)
{
std::string ret;
switch (tis) {
case UNKNOWN_TIS: ret = "unknown_tis"; break;
case TVD_RK1: ret = "tvd_rk1"; break;
case TVD_RK2: ret = "tvd_rk2"; break;
case TVD_RK3: ret = "tvd_rk3"; break;
}
return ret;
}
inline TemporalIntegrationScheme
stringToTemporalIntegrationScheme(const std::string& s)
{
TemporalIntegrationScheme ret = UNKNOWN_TIS;
std::string str = s;
boost::trim(str);
boost::to_lower(str);
if (str == temporalIntegrationSchemeToString(TVD_RK1)) {
ret = TVD_RK1;
} else if (str == temporalIntegrationSchemeToString(TVD_RK2)) {
ret = TVD_RK2;
} else if (str == temporalIntegrationSchemeToString(TVD_RK3)) {
ret = TVD_RK3;
}
return ret;
}
inline std::string
temporalIntegrationSchemeToMenuName(TemporalIntegrationScheme tis)
{
std::string ret;
switch (tis) {
case UNKNOWN_TIS: ret = "Unknown temporal integration"; break;
case TVD_RK1: ret = "Forward Euler"; break;
case TVD_RK2: ret = "2nd-order Runge-Kutta"; break;
case TVD_RK3: ret = "3rd-order Runge-Kutta"; break;
}
return ret;
}
//@}
/// @brief Implementation of nominally fifth-order finite-difference WENO
/// @details This function returns the numerical flux. See "High Order Finite Difference and
/// Finite Volume WENO Schemes and Discontinuous Galerkin Methods for CFD" - Chi-Wang Shu
/// ICASE Report No 2001-11 (page 6). Also see ICASE No 97-65 for a more complete reference
/// (Shu, 1997).
/// Given v1 = f(x-2dx), v2 = f(x-dx), v3 = f(x), v4 = f(x+dx) and v5 = f(x+2dx),
/// return an interpolated value f(x+dx/2) with the special property that
/// ( f(x+dx/2) - f(x-dx/2) ) / dx = df/dx (x) + error,
/// where the error is fifth-order in smooth regions: O(dx) <= error <=O(dx^5)
template<typename ValueType>
inline ValueType
WENO5(const ValueType& v1, const ValueType& v2, const ValueType& v3,
const ValueType& v4, const ValueType& v5, float scale2 = 0.01f)
{
const double C = 13.0 / 12.0;
// WENO is formulated for non-dimensional equations, here the optional scale2
// is a reference value (squared) for the function being interpolated. For
// example if 'v' is of order 1000, then scale2 = 10^6 is ok. But in practice
// leave scale2 = 1.
const double eps = 1.0e-6 * static_cast<double>(scale2);
// {\tilde \omega_k} = \gamma_k / ( \beta_k + \epsilon)^2 in Shu's ICASE report)
const double A1=0.1/math::Pow2(C*math::Pow2(v1-2*v2+v3)+0.25*math::Pow2(v1-4*v2+3.0*v3)+eps),
A2=0.6/math::Pow2(C*math::Pow2(v2-2*v3+v4)+0.25*math::Pow2(v2-v4)+eps),
A3=0.3/math::Pow2(C*math::Pow2(v3-2*v4+v5)+0.25*math::Pow2(3.0*v3-4*v4+v5)+eps);
return static_cast<ValueType>(static_cast<ValueType>(
A1*(2.0*v1 - 7.0*v2 + 11.0*v3) +
A2*(5.0*v3 - v2 + 2.0*v4) +
A3*(2.0*v3 + 5.0*v4 - v5))/(6.0*(A1+A2+A3)));
}
template <typename Real>
inline Real GodunovsNormSqrd(bool isOutside,
Real dP_xm, Real dP_xp,
Real dP_ym, Real dP_yp,
Real dP_zm, Real dP_zp)
{
using math::Max;
using math::Min;
using math::Pow2;
const Real zero(0);
Real dPLen2;
if (isOutside) { // outside
dPLen2 = Max(Pow2(Max(dP_xm, zero)), Pow2(Min(dP_xp,zero))); // (dP/dx)2
dPLen2 += Max(Pow2(Max(dP_ym, zero)), Pow2(Min(dP_yp,zero))); // (dP/dy)2
dPLen2 += Max(Pow2(Max(dP_zm, zero)), Pow2(Min(dP_zp,zero))); // (dP/dz)2
} else { // inside
dPLen2 = Max(Pow2(Min(dP_xm, zero)), Pow2(Max(dP_xp,zero))); // (dP/dx)2
dPLen2 += Max(Pow2(Min(dP_ym, zero)), Pow2(Max(dP_yp,zero))); // (dP/dy)2
dPLen2 += Max(Pow2(Min(dP_zm, zero)), Pow2(Max(dP_zp,zero))); // (dP/dz)2
}
return dPLen2; // |\nabla\phi|^2
}
template<typename Real>
inline Real
GodunovsNormSqrd(bool isOutside, const Vec3<Real>& gradient_m, const Vec3<Real>& gradient_p)
{
return GodunovsNormSqrd<Real>(isOutside,
gradient_m[0], gradient_p[0],
gradient_m[1], gradient_p[1],
gradient_m[2], gradient_p[2]);
}
#ifdef DWA_OPENVDB
inline simd::Float4 simdMin(const simd::Float4& a, const simd::Float4& b) {
return simd::Float4(_mm_min_ps(a.base(), b.base()));
}
inline simd::Float4 simdMax(const simd::Float4& a, const simd::Float4& b) {
return simd::Float4(_mm_max_ps(a.base(), b.base()));
}
inline float simdSum(const simd::Float4& v);
inline simd::Float4 Pow2(const simd::Float4& v) { return v * v; }
template<>
inline simd::Float4
WENO5<simd::Float4>(const simd::Float4& v1, const simd::Float4& v2, const simd::Float4& v3,
const simd::Float4& v4, const simd::Float4& v5, float scale2)
{
using math::Pow2;
using F4 = simd::Float4;
const F4
C(13.f / 12.f),
eps(1.0e-6f * scale2),
two(2.0), three(3.0), four(4.0), five(5.0), fourth(0.25),
A1 = F4(0.1f) / Pow2(C*Pow2(v1-two*v2+v3) + fourth*Pow2(v1-four*v2+three*v3) + eps),
A2 = F4(0.6f) / Pow2(C*Pow2(v2-two*v3+v4) + fourth*Pow2(v2-v4) + eps),
A3 = F4(0.3f) / Pow2(C*Pow2(v3-two*v4+v5) + fourth*Pow2(three*v3-four*v4+v5) + eps);
return (A1 * (two * v1 - F4(7.0) * v2 + F4(11.0) * v3) +
A2 * (five * v3 - v2 + two * v4) +
A3 * (two * v3 + five * v4 - v5)) / (F4(6.0) * (A1 + A2 + A3));
}
inline float
simdSum(const simd::Float4& v)
{
// temp = { v3+v3, v2+v2, v1+v3, v0+v2 }
__m128 temp = _mm_add_ps(v.base(), _mm_movehl_ps(v.base(), v.base()));
// temp = { v3+v3, v2+v2, v1+v3, (v0+v2)+(v1+v3) }
temp = _mm_add_ss(temp, _mm_shuffle_ps(temp, temp, 1));
return _mm_cvtss_f32(temp);
}
inline float
GodunovsNormSqrd(bool isOutside, const simd::Float4& dP_m, const simd::Float4& dP_p)
{
const simd::Float4 zero(0.0);
simd::Float4 v = isOutside
? simdMax(math::Pow2(simdMax(dP_m, zero)), math::Pow2(simdMin(dP_p, zero)))
: simdMax(math::Pow2(simdMin(dP_m, zero)), math::Pow2(simdMax(dP_p, zero)));
return simdSum(v);//should be v[0]+v[1]+v[2]
}
#endif
template<DScheme DiffScheme>
struct D1
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk);
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S);
};
template<>
struct D1<CD_2NDT>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp1, const ValueType& xm1) {
return xp1 - xm1;
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(1, 0, 0)),
grid.getValue(ijk.offsetBy(-1, 0, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0, 1, 0)),
grid.getValue(ijk.offsetBy( 0, -1, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0, 0, 1)),
grid.getValue(ijk.offsetBy( 0, 0, -1)));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 1, 0, 0>(), S.template getValue<-1, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 1, 0>(), S.template getValue< 0,-1, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 1>(), S.template getValue< 0, 0,-1>());
}
};
template<>
struct D1<CD_2ND>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp1, const ValueType& xm1) {
return (xp1 - xm1)*ValueType(0.5);
}
static bool difference(const bool& xp1, const bool& /*xm1*/) {
return xp1;
}
// random access
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(1, 0, 0)),
grid.getValue(ijk.offsetBy(-1, 0, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0, 1, 0)),
grid.getValue(ijk.offsetBy( 0, -1, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0, 0, 1)),
grid.getValue(ijk.offsetBy( 0, 0, -1)));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue< 1, 0, 0>(), S.template getValue<-1, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference(S.template getValue< 0, 1, 0>(), S.template getValue< 0,-1, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference(S.template getValue< 0, 0, 1>(), S.template getValue< 0, 0,-1>());
}
};
template<>
struct D1<CD_4TH>
{
// the difference opperator
template <typename ValueType>
static ValueType difference( const ValueType& xp2, const ValueType& xp1,
const ValueType& xm1, const ValueType& xm2 ) {
return ValueType(2./3.)*(xp1 - xm1) + ValueType(1./12.)*(xm2 - xp2) ;
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 2,0,0)), grid.getValue(ijk.offsetBy( 1,0,0)),
grid.getValue(ijk.offsetBy(-1,0,0)), grid.getValue(ijk.offsetBy(-2,0,0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 2, 0)), grid.getValue(ijk.offsetBy( 0, 1, 0)),
grid.getValue(ijk.offsetBy( 0,-1, 0)), grid.getValue(ijk.offsetBy( 0,-2, 0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 0, 2)), grid.getValue(ijk.offsetBy( 0, 0, 1)),
grid.getValue(ijk.offsetBy( 0, 0,-1)), grid.getValue(ijk.offsetBy( 0, 0,-2)) );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue<-2, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0,-2, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0,-2>() );
}
};
template<>
struct D1<CD_6TH>
{
// the difference opperator
template <typename ValueType>
static ValueType difference( const ValueType& xp3, const ValueType& xp2, const ValueType& xp1,
const ValueType& xm1, const ValueType& xm2, const ValueType& xm3 )
{
return ValueType(3./4.)*(xp1 - xm1) - ValueType(0.15)*(xp2 - xm2)
+ ValueType(1./60.)*(xp3-xm3);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 3,0,0)), grid.getValue(ijk.offsetBy( 2,0,0)),
grid.getValue(ijk.offsetBy( 1,0,0)), grid.getValue(ijk.offsetBy(-1,0,0)),
grid.getValue(ijk.offsetBy(-2,0,0)), grid.getValue(ijk.offsetBy(-3,0,0)));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 3, 0)), grid.getValue(ijk.offsetBy( 0, 2, 0)),
grid.getValue(ijk.offsetBy( 0, 1, 0)), grid.getValue(ijk.offsetBy( 0,-1, 0)),
grid.getValue(ijk.offsetBy( 0,-2, 0)), grid.getValue(ijk.offsetBy( 0,-3, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 0, 3)), grid.getValue(ijk.offsetBy( 0, 0, 2)),
grid.getValue(ijk.offsetBy( 0, 0, 1)), grid.getValue(ijk.offsetBy( 0, 0,-1)),
grid.getValue(ijk.offsetBy( 0, 0,-2)), grid.getValue(ijk.offsetBy( 0, 0,-3)));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue< 3, 0, 0>(),
S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue<-2, 0, 0>(),
S.template getValue<-3, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 3, 0>(),
S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0,-2, 0>(),
S.template getValue< 0,-3, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 3>(),
S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0,-2>(),
S.template getValue< 0, 0,-3>());
}
};
template<>
struct D1<FD_1ST>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp1, const ValueType& xp0) {
return xp1 - xp0;
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(1, 0, 0)), grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(0, 1, 0)), grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(0, 0, 1)), grid.getValue(ijk));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue< 1, 0, 0>(), S.template getValue< 0, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference(S.template getValue< 0, 1, 0>(), S.template getValue< 0, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference(S.template getValue< 0, 0, 1>(), S.template getValue< 0, 0, 0>());
}
};
template<>
struct D1<FD_2ND>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp2, const ValueType& xp1, const ValueType& xp0)
{
return ValueType(2)*xp1 -(ValueType(0.5)*xp2 + ValueType(3./2.)*xp0);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(2,0,0)),
grid.getValue(ijk.offsetBy(1,0,0)),
grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0,2,0)),
grid.getValue(ijk.offsetBy(0,1,0)),
grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0,0,2)),
grid.getValue(ijk.offsetBy(0,0,1)),
grid.getValue(ijk));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0, 0>() );
}
};
template<>
struct D1<FD_3RD>
{
// the difference opperator
template<typename ValueType>
static ValueType difference(const ValueType& xp3, const ValueType& xp2,
const ValueType& xp1, const ValueType& xp0)
{
return static_cast<ValueType>(xp3/3.0 - 1.5*xp2 + 3.0*xp1 - 11.0*xp0/6.0);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(3,0,0)),
grid.getValue(ijk.offsetBy(2,0,0)),
grid.getValue(ijk.offsetBy(1,0,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(0,3,0)),
grid.getValue(ijk.offsetBy(0,2,0)),
grid.getValue(ijk.offsetBy(0,1,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(0,0,3)),
grid.getValue(ijk.offsetBy(0,0,2)),
grid.getValue(ijk.offsetBy(0,0,1)),
grid.getValue(ijk) );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue< 3, 0, 0>(),
S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference(S.template getValue< 0, 3, 0>(),
S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 3>(),
S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0, 0>() );
}
};
template<>
struct D1<BD_1ST>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xm1, const ValueType& xm0) {
return -D1<FD_1ST>::difference(xm1, xm0);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(-1,0,0)), grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(0,-1,0)), grid.getValue(ijk));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(grid.getValue(ijk.offsetBy(0, 0,-1)), grid.getValue(ijk));
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue<-1, 0, 0>(), S.template getValue< 0, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference(S.template getValue< 0,-1, 0>(), S.template getValue< 0, 0, 0>());
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference(S.template getValue< 0, 0,-1>(), S.template getValue< 0, 0, 0>());
}
};
template<>
struct D1<BD_2ND>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xm2, const ValueType& xm1, const ValueType& xm0)
{
return -D1<FD_2ND>::difference(xm2, xm1, xm0);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(-2,0,0)),
grid.getValue(ijk.offsetBy(-1,0,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(0,-2,0)),
grid.getValue(ijk.offsetBy(0,-1,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(0,0,-2)),
grid.getValue(ijk.offsetBy(0,0,-1)),
grid.getValue(ijk) );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue<-2, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0,-2, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0,-2>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0, 0>() );
}
};
template<>
struct D1<BD_3RD>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xm3, const ValueType& xm2,
const ValueType& xm1, const ValueType& xm0)
{
return -D1<FD_3RD>::difference(xm3, xm2, xm1, xm0);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(-3,0,0)),
grid.getValue(ijk.offsetBy(-2,0,0)),
grid.getValue(ijk.offsetBy(-1,0,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy( 0,-3,0)),
grid.getValue(ijk.offsetBy( 0,-2,0)),
grid.getValue(ijk.offsetBy( 0,-1,0)),
grid.getValue(ijk) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy( 0, 0,-3)),
grid.getValue(ijk.offsetBy( 0, 0,-2)),
grid.getValue(ijk.offsetBy( 0, 0,-1)),
grid.getValue(ijk) );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue<-3, 0, 0>(),
S.template getValue<-2, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0,-3, 0>(),
S.template getValue< 0,-2, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0,-3>(),
S.template getValue< 0, 0,-2>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0, 0>() );
}
};
template<>
struct D1<FD_WENO5>
{
// the difference operator
template <typename ValueType>
static ValueType difference(const ValueType& xp3, const ValueType& xp2,
const ValueType& xp1, const ValueType& xp0,
const ValueType& xm1, const ValueType& xm2) {
return WENO5<ValueType>(xp3, xp2, xp1, xp0, xm1)
- WENO5<ValueType>(xp2, xp1, xp0, xm1, xm2);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(3,0,0));
V[1] = grid.getValue(ijk.offsetBy(2,0,0));
V[2] = grid.getValue(ijk.offsetBy(1,0,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(-1,0,0));
V[5] = grid.getValue(ijk.offsetBy(-2,0,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,3,0));
V[1] = grid.getValue(ijk.offsetBy(0,2,0));
V[2] = grid.getValue(ijk.offsetBy(0,1,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,-1,0));
V[5] = grid.getValue(ijk.offsetBy(0,-2,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,0,3));
V[1] = grid.getValue(ijk.offsetBy(0,0,2));
V[2] = grid.getValue(ijk.offsetBy(0,0,1));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,0,-1));
V[5] = grid.getValue(ijk.offsetBy(0,0,-2));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return static_cast<typename Stencil::ValueType>(difference(
S.template getValue< 3, 0, 0>(),
S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue<-2, 0, 0>() ));
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return static_cast<typename Stencil::ValueType>(difference(
S.template getValue< 0, 3, 0>(),
S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0,-2, 0>() ));
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return static_cast<typename Stencil::ValueType>(difference(
S.template getValue< 0, 0, 3>(),
S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0,-2>() ));
}
};
template<>
struct D1<FD_HJWENO5>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp3, const ValueType& xp2,
const ValueType& xp1, const ValueType& xp0,
const ValueType& xm1, const ValueType& xm2) {
return WENO5<ValueType>(xp3 - xp2, xp2 - xp1, xp1 - xp0, xp0-xm1, xm1-xm2);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(3,0,0));
V[1] = grid.getValue(ijk.offsetBy(2,0,0));
V[2] = grid.getValue(ijk.offsetBy(1,0,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(-1,0,0));
V[5] = grid.getValue(ijk.offsetBy(-2,0,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,3,0));
V[1] = grid.getValue(ijk.offsetBy(0,2,0));
V[2] = grid.getValue(ijk.offsetBy(0,1,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,-1,0));
V[5] = grid.getValue(ijk.offsetBy(0,-2,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,0,3));
V[1] = grid.getValue(ijk.offsetBy(0,0,2));
V[2] = grid.getValue(ijk.offsetBy(0,0,1));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,0,-1));
V[5] = grid.getValue(ijk.offsetBy(0,0,-2));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 3, 0, 0>(),
S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue<-1, 0, 0>(),
S.template getValue<-2, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 3, 0>(),
S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0,-1, 0>(),
S.template getValue< 0,-2, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 3>(),
S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0, 0,-1>(),
S.template getValue< 0, 0,-2>() );
}
};
template<>
struct D1<BD_WENO5>
{
template<typename ValueType>
static ValueType difference(const ValueType& xm3, const ValueType& xm2, const ValueType& xm1,
const ValueType& xm0, const ValueType& xp1, const ValueType& xp2)
{
return -D1<FD_WENO5>::difference(xm3, xm2, xm1, xm0, xp1, xp2);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(-3,0,0));
V[1] = grid.getValue(ijk.offsetBy(-2,0,0));
V[2] = grid.getValue(ijk.offsetBy(-1,0,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(1,0,0));
V[5] = grid.getValue(ijk.offsetBy(2,0,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,-3,0));
V[1] = grid.getValue(ijk.offsetBy(0,-2,0));
V[2] = grid.getValue(ijk.offsetBy(0,-1,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,1,0));
V[5] = grid.getValue(ijk.offsetBy(0,2,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,0,-3));
V[1] = grid.getValue(ijk.offsetBy(0,0,-2));
V[2] = grid.getValue(ijk.offsetBy(0,0,-1));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,0,1));
V[5] = grid.getValue(ijk.offsetBy(0,0,2));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue<-3, 0, 0>();
V[1] = S.template getValue<-2, 0, 0>();
V[2] = S.template getValue<-1, 0, 0>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 1, 0, 0>();
V[5] = S.template getValue< 2, 0, 0>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue< 0,-3, 0>();
V[1] = S.template getValue< 0,-2, 0>();
V[2] = S.template getValue< 0,-1, 0>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 0, 1, 0>();
V[5] = S.template getValue< 0, 2, 0>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue< 0, 0,-3>();
V[1] = S.template getValue< 0, 0,-2>();
V[2] = S.template getValue< 0, 0,-1>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 0, 0, 1>();
V[5] = S.template getValue< 0, 0, 2>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
};
template<>
struct D1<BD_HJWENO5>
{
template<typename ValueType>
static ValueType difference(const ValueType& xm3, const ValueType& xm2, const ValueType& xm1,
const ValueType& xm0, const ValueType& xp1, const ValueType& xp2)
{
return -D1<FD_HJWENO5>::difference(xm3, xm2, xm1, xm0, xp1, xp2);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(-3,0,0));
V[1] = grid.getValue(ijk.offsetBy(-2,0,0));
V[2] = grid.getValue(ijk.offsetBy(-1,0,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(1,0,0));
V[5] = grid.getValue(ijk.offsetBy(2,0,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,-3,0));
V[1] = grid.getValue(ijk.offsetBy(0,-2,0));
V[2] = grid.getValue(ijk.offsetBy(0,-1,0));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,1,0));
V[5] = grid.getValue(ijk.offsetBy(0,2,0));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType V[6];
V[0] = grid.getValue(ijk.offsetBy(0,0,-3));
V[1] = grid.getValue(ijk.offsetBy(0,0,-2));
V[2] = grid.getValue(ijk.offsetBy(0,0,-1));
V[3] = grid.getValue(ijk);
V[4] = grid.getValue(ijk.offsetBy(0,0,1));
V[5] = grid.getValue(ijk.offsetBy(0,0,2));
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue<-3, 0, 0>();
V[1] = S.template getValue<-2, 0, 0>();
V[2] = S.template getValue<-1, 0, 0>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 1, 0, 0>();
V[5] = S.template getValue< 2, 0, 0>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue< 0,-3, 0>();
V[1] = S.template getValue< 0,-2, 0>();
V[2] = S.template getValue< 0,-1, 0>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 0, 1, 0>();
V[5] = S.template getValue< 0, 2, 0>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
using ValueType = typename Stencil::ValueType;
ValueType V[6];
V[0] = S.template getValue< 0, 0,-3>();
V[1] = S.template getValue< 0, 0,-2>();
V[2] = S.template getValue< 0, 0,-1>();
V[3] = S.template getValue< 0, 0, 0>();
V[4] = S.template getValue< 0, 0, 1>();
V[5] = S.template getValue< 0, 0, 2>();
return difference(V[0], V[1], V[2], V[3], V[4], V[5]);
}
};
template<DScheme DiffScheme>
struct D1Vec
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType::value_type
inX(const Accessor& grid, const Coord& ijk, int n)
{
return D1<DiffScheme>::inX(grid, ijk)[n];
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inY(const Accessor& grid, const Coord& ijk, int n)
{
return D1<DiffScheme>::inY(grid, ijk)[n];
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inZ(const Accessor& grid, const Coord& ijk, int n)
{
return D1<DiffScheme>::inZ(grid, ijk)[n];
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType::value_type inX(const Stencil& S, int n)
{
return D1<DiffScheme>::inX(S)[n];
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inY(const Stencil& S, int n)
{
return D1<DiffScheme>::inY(S)[n];
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inZ(const Stencil& S, int n)
{
return D1<DiffScheme>::inZ(S)[n];
}
};
template<>
struct D1Vec<CD_2NDT>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType::value_type
inX(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2NDT>::difference( grid.getValue(ijk.offsetBy( 1, 0, 0))[n],
grid.getValue(ijk.offsetBy(-1, 0, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inY(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2NDT>::difference( grid.getValue(ijk.offsetBy(0, 1, 0))[n],
grid.getValue(ijk.offsetBy(0,-1, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inZ(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2NDT>::difference( grid.getValue(ijk.offsetBy(0, 0, 1))[n],
grid.getValue(ijk.offsetBy(0, 0,-1))[n] );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType::value_type inX(const Stencil& S, int n)
{
return D1<CD_2NDT>::difference( S.template getValue< 1, 0, 0>()[n],
S.template getValue<-1, 0, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inY(const Stencil& S, int n)
{
return D1<CD_2NDT>::difference( S.template getValue< 0, 1, 0>()[n],
S.template getValue< 0,-1, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inZ(const Stencil& S, int n)
{
return D1<CD_2NDT>::difference( S.template getValue< 0, 0, 1>()[n],
S.template getValue< 0, 0,-1>()[n] );
}
};
template<>
struct D1Vec<CD_2ND>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType::value_type
inX(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2ND>::difference( grid.getValue(ijk.offsetBy( 1, 0, 0))[n] ,
grid.getValue(ijk.offsetBy(-1, 0, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inY(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2ND>::difference( grid.getValue(ijk.offsetBy(0, 1, 0))[n] ,
grid.getValue(ijk.offsetBy(0,-1, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inZ(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_2ND>::difference( grid.getValue(ijk.offsetBy(0, 0, 1))[n] ,
grid.getValue(ijk.offsetBy(0, 0,-1))[n] );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType::value_type inX(const Stencil& S, int n)
{
return D1<CD_2ND>::difference( S.template getValue< 1, 0, 0>()[n],
S.template getValue<-1, 0, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inY(const Stencil& S, int n)
{
return D1<CD_2ND>::difference( S.template getValue< 0, 1, 0>()[n],
S.template getValue< 0,-1, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inZ(const Stencil& S, int n)
{
return D1<CD_2ND>::difference( S.template getValue< 0, 0, 1>()[n],
S.template getValue< 0, 0,-1>()[n] );
}
};
template<>
struct D1Vec<CD_4TH> {
// using value_type = typename Accessor::ValueType::value_type;
// random access version
template<typename Accessor>
static typename Accessor::ValueType::value_type
inX(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_4TH>::difference(
grid.getValue(ijk.offsetBy(2, 0, 0))[n], grid.getValue(ijk.offsetBy( 1, 0, 0))[n],
grid.getValue(ijk.offsetBy(-1,0, 0))[n], grid.getValue(ijk.offsetBy(-2, 0, 0))[n]);
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inY(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_4TH>::difference(
grid.getValue(ijk.offsetBy( 0, 2, 0))[n], grid.getValue(ijk.offsetBy( 0, 1, 0))[n],
grid.getValue(ijk.offsetBy( 0,-1, 0))[n], grid.getValue(ijk.offsetBy( 0,-2, 0))[n]);
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inZ(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_4TH>::difference(
grid.getValue(ijk.offsetBy(0,0, 2))[n], grid.getValue(ijk.offsetBy( 0, 0, 1))[n],
grid.getValue(ijk.offsetBy(0,0,-1))[n], grid.getValue(ijk.offsetBy( 0, 0,-2))[n]);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType::value_type inX(const Stencil& S, int n)
{
return D1<CD_4TH>::difference(
S.template getValue< 2, 0, 0>()[n], S.template getValue< 1, 0, 0>()[n],
S.template getValue<-1, 0, 0>()[n], S.template getValue<-2, 0, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inY(const Stencil& S, int n)
{
return D1<CD_4TH>::difference(
S.template getValue< 0, 2, 0>()[n], S.template getValue< 0, 1, 0>()[n],
S.template getValue< 0,-1, 0>()[n], S.template getValue< 0,-2, 0>()[n]);
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inZ(const Stencil& S, int n)
{
return D1<CD_4TH>::difference(
S.template getValue< 0, 0, 2>()[n], S.template getValue< 0, 0, 1>()[n],
S.template getValue< 0, 0,-1>()[n], S.template getValue< 0, 0,-2>()[n]);
}
};
template<>
struct D1Vec<CD_6TH>
{
//using ValueType = typename Accessor::ValueType::value_type::value_type;
// random access version
template<typename Accessor>
static typename Accessor::ValueType::value_type
inX(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_6TH>::difference(
grid.getValue(ijk.offsetBy( 3, 0, 0))[n], grid.getValue(ijk.offsetBy( 2, 0, 0))[n],
grid.getValue(ijk.offsetBy( 1, 0, 0))[n], grid.getValue(ijk.offsetBy(-1, 0, 0))[n],
grid.getValue(ijk.offsetBy(-2, 0, 0))[n], grid.getValue(ijk.offsetBy(-3, 0, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inY(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_6TH>::difference(
grid.getValue(ijk.offsetBy( 0, 3, 0))[n], grid.getValue(ijk.offsetBy( 0, 2, 0))[n],
grid.getValue(ijk.offsetBy( 0, 1, 0))[n], grid.getValue(ijk.offsetBy( 0,-1, 0))[n],
grid.getValue(ijk.offsetBy( 0,-2, 0))[n], grid.getValue(ijk.offsetBy( 0,-3, 0))[n] );
}
template<typename Accessor>
static typename Accessor::ValueType::value_type
inZ(const Accessor& grid, const Coord& ijk, int n)
{
return D1<CD_6TH>::difference(
grid.getValue(ijk.offsetBy( 0, 0, 3))[n], grid.getValue(ijk.offsetBy( 0, 0, 2))[n],
grid.getValue(ijk.offsetBy( 0, 0, 1))[n], grid.getValue(ijk.offsetBy( 0, 0,-1))[n],
grid.getValue(ijk.offsetBy( 0, 0,-2))[n], grid.getValue(ijk.offsetBy( 0, 0,-3))[n] );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType::value_type inX(const Stencil& S, int n)
{
return D1<CD_6TH>::difference(
S.template getValue< 3, 0, 0>()[n], S.template getValue< 2, 0, 0>()[n],
S.template getValue< 1, 0, 0>()[n], S.template getValue<-1, 0, 0>()[n],
S.template getValue<-2, 0, 0>()[n], S.template getValue<-3, 0, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inY(const Stencil& S, int n)
{
return D1<CD_6TH>::difference(
S.template getValue< 0, 3, 0>()[n], S.template getValue< 0, 2, 0>()[n],
S.template getValue< 0, 1, 0>()[n], S.template getValue< 0,-1, 0>()[n],
S.template getValue< 0,-2, 0>()[n], S.template getValue< 0,-3, 0>()[n] );
}
template<typename Stencil>
static typename Stencil::ValueType::value_type inZ(const Stencil& S, int n)
{
return D1<CD_6TH>::difference(
S.template getValue< 0, 0, 3>()[n], S.template getValue< 0, 0, 2>()[n],
S.template getValue< 0, 0, 1>()[n], S.template getValue< 0, 0,-1>()[n],
S.template getValue< 0, 0,-2>()[n], S.template getValue< 0, 0,-3>()[n] );
}
};
template<DDScheme DiffScheme>
struct D2
{
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk);
// cross derivatives
template<typename Accessor>
static typename Accessor::ValueType inXandY(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inXandZ(const Accessor& grid, const Coord& ijk);
template<typename Accessor>
static typename Accessor::ValueType inYandZ(const Accessor& grid, const Coord& ijk);
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S);
// cross derivatives
template<typename Stencil>
static typename Stencil::ValueType inXandY(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inXandZ(const Stencil& S);
template<typename Stencil>
static typename Stencil::ValueType inYandZ(const Stencil& S);
};
template<>
struct D2<CD_SECOND>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp1, const ValueType& xp0, const ValueType& xm1)
{
return xp1 + xm1 - ValueType(2)*xp0;
}
template <typename ValueType>
static ValueType crossdifference(const ValueType& xpyp, const ValueType& xpym,
const ValueType& xmyp, const ValueType& xmym)
{
return ValueType(0.25)*(xpyp + xmym - xpym - xmyp);
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy( 1,0,0)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy(-1,0,0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy(0, 1,0)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy(0,-1,0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference( grid.getValue(ijk.offsetBy( 0,0, 1)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy( 0,0,-1)) );
}
// cross derivatives
template<typename Accessor>
static typename Accessor::ValueType inXandY(const Accessor& grid, const Coord& ijk)
{
return crossdifference(
grid.getValue(ijk.offsetBy(1, 1,0)), grid.getValue(ijk.offsetBy( 1,-1,0)),
grid.getValue(ijk.offsetBy(-1,1,0)), grid.getValue(ijk.offsetBy(-1,-1,0)));
}
template<typename Accessor>
static typename Accessor::ValueType inXandZ(const Accessor& grid, const Coord& ijk)
{
return crossdifference(
grid.getValue(ijk.offsetBy(1,0, 1)), grid.getValue(ijk.offsetBy(1, 0,-1)),
grid.getValue(ijk.offsetBy(-1,0,1)), grid.getValue(ijk.offsetBy(-1,0,-1)) );
}
template<typename Accessor>
static typename Accessor::ValueType inYandZ(const Accessor& grid, const Coord& ijk)
{
return crossdifference(
grid.getValue(ijk.offsetBy(0, 1,1)), grid.getValue(ijk.offsetBy(0, 1,-1)),
grid.getValue(ijk.offsetBy(0,-1,1)), grid.getValue(ijk.offsetBy(0,-1,-1)) );
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 1, 0, 0>(), S.template getValue< 0, 0, 0>(),
S.template getValue<-1, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 1, 0>(), S.template getValue< 0, 0, 0>(),
S.template getValue< 0,-1, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 1>(), S.template getValue< 0, 0, 0>(),
S.template getValue< 0, 0,-1>() );
}
// cross derivatives
template<typename Stencil>
static typename Stencil::ValueType inXandY(const Stencil& S)
{
return crossdifference(S.template getValue< 1, 1, 0>(), S.template getValue< 1,-1, 0>(),
S.template getValue<-1, 1, 0>(), S.template getValue<-1,-1, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inXandZ(const Stencil& S)
{
return crossdifference(S.template getValue< 1, 0, 1>(), S.template getValue< 1, 0,-1>(),
S.template getValue<-1, 0, 1>(), S.template getValue<-1, 0,-1>() );
}
template<typename Stencil>
static typename Stencil::ValueType inYandZ(const Stencil& S)
{
return crossdifference(S.template getValue< 0, 1, 1>(), S.template getValue< 0, 1,-1>(),
S.template getValue< 0,-1, 1>(), S.template getValue< 0,-1,-1>() );
}
};
template<>
struct D2<CD_FOURTH>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp2, const ValueType& xp1, const ValueType& xp0,
const ValueType& xm1, const ValueType& xm2) {
return ValueType(-1./12.)*(xp2 + xm2) + ValueType(4./3.)*(xp1 + xm1) -ValueType(2.5)*xp0;
}
template <typename ValueType>
static ValueType crossdifference(const ValueType& xp2yp2, const ValueType& xp2yp1,
const ValueType& xp2ym1, const ValueType& xp2ym2,
const ValueType& xp1yp2, const ValueType& xp1yp1,
const ValueType& xp1ym1, const ValueType& xp1ym2,
const ValueType& xm2yp2, const ValueType& xm2yp1,
const ValueType& xm2ym1, const ValueType& xm2ym2,
const ValueType& xm1yp2, const ValueType& xm1yp1,
const ValueType& xm1ym1, const ValueType& xm1ym2 ) {
ValueType tmp1 =
ValueType(2./3.0)*(xp1yp1 - xm1yp1 - xp1ym1 + xm1ym1)-
ValueType(1./12.)*(xp2yp1 - xm2yp1 - xp2ym1 + xm2ym1);
ValueType tmp2 =
ValueType(2./3.0)*(xp1yp2 - xm1yp2 - xp1ym2 + xm1ym2)-
ValueType(1./12.)*(xp2yp2 - xm2yp2 - xp2ym2 + xm2ym2);
return ValueType(2./3.)*tmp1 - ValueType(1./12.)*tmp2;
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(2,0,0)), grid.getValue(ijk.offsetBy( 1,0,0)),
grid.getValue(ijk),
grid.getValue(ijk.offsetBy(-1,0,0)), grid.getValue(ijk.offsetBy(-2, 0, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0, 2,0)), grid.getValue(ijk.offsetBy(0, 1,0)),
grid.getValue(ijk),
grid.getValue(ijk.offsetBy(0,-1,0)), grid.getValue(ijk.offsetBy(0,-2, 0)));
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy(0,0, 2)), grid.getValue(ijk.offsetBy(0, 0,1)),
grid.getValue(ijk),
grid.getValue(ijk.offsetBy(0,0,-1)), grid.getValue(ijk.offsetBy(0,0,-2)));
}
// cross derivatives
template<typename Accessor>
static typename Accessor::ValueType inXandY(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
typename Accessor::ValueType tmp1 =
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 1, 0)) -
D1<CD_4TH>::inX(grid, ijk.offsetBy(0,-1, 0));
typename Accessor::ValueType tmp2 =
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 2, 0)) -
D1<CD_4TH>::inX(grid, ijk.offsetBy(0,-2, 0));
return ValueType(2./3.)*tmp1 - ValueType(1./12.)*tmp2;
}
template<typename Accessor>
static typename Accessor::ValueType inXandZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
typename Accessor::ValueType tmp1 =
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 0, 1)) -
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 0,-1));
typename Accessor::ValueType tmp2 =
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 0, 2)) -
D1<CD_4TH>::inX(grid, ijk.offsetBy(0, 0,-2));
return ValueType(2./3.)*tmp1 - ValueType(1./12.)*tmp2;
}
template<typename Accessor>
static typename Accessor::ValueType inYandZ(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
typename Accessor::ValueType tmp1 =
D1<CD_4TH>::inY(grid, ijk.offsetBy(0, 0, 1)) -
D1<CD_4TH>::inY(grid, ijk.offsetBy(0, 0,-1));
typename Accessor::ValueType tmp2 =
D1<CD_4TH>::inY(grid, ijk.offsetBy(0, 0, 2)) -
D1<CD_4TH>::inY(grid, ijk.offsetBy(0, 0,-2));
return ValueType(2./3.)*tmp1 - ValueType(1./12.)*tmp2;
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference(S.template getValue< 2, 0, 0>(), S.template getValue< 1, 0, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue<-1, 0, 0>(), S.template getValue<-2, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference(S.template getValue< 0, 2, 0>(), S.template getValue< 0, 1, 0>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0,-1, 0>(), S.template getValue< 0,-2, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference(S.template getValue< 0, 0, 2>(), S.template getValue< 0, 0, 1>(),
S.template getValue< 0, 0, 0>(),
S.template getValue< 0, 0,-1>(), S.template getValue< 0, 0,-2>() );
}
// cross derivatives
template<typename Stencil>
static typename Stencil::ValueType inXandY(const Stencil& S)
{
return crossdifference(
S.template getValue< 2, 2, 0>(), S.template getValue< 2, 1, 0>(),
S.template getValue< 2,-1, 0>(), S.template getValue< 2,-2, 0>(),
S.template getValue< 1, 2, 0>(), S.template getValue< 1, 1, 0>(),
S.template getValue< 1,-1, 0>(), S.template getValue< 1,-2, 0>(),
S.template getValue<-2, 2, 0>(), S.template getValue<-2, 1, 0>(),
S.template getValue<-2,-1, 0>(), S.template getValue<-2,-2, 0>(),
S.template getValue<-1, 2, 0>(), S.template getValue<-1, 1, 0>(),
S.template getValue<-1,-1, 0>(), S.template getValue<-1,-2, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inXandZ(const Stencil& S)
{
return crossdifference(
S.template getValue< 2, 0, 2>(), S.template getValue< 2, 0, 1>(),
S.template getValue< 2, 0,-1>(), S.template getValue< 2, 0,-2>(),
S.template getValue< 1, 0, 2>(), S.template getValue< 1, 0, 1>(),
S.template getValue< 1, 0,-1>(), S.template getValue< 1, 0,-2>(),
S.template getValue<-2, 0, 2>(), S.template getValue<-2, 0, 1>(),
S.template getValue<-2, 0,-1>(), S.template getValue<-2, 0,-2>(),
S.template getValue<-1, 0, 2>(), S.template getValue<-1, 0, 1>(),
S.template getValue<-1, 0,-1>(), S.template getValue<-1, 0,-2>() );
}
template<typename Stencil>
static typename Stencil::ValueType inYandZ(const Stencil& S)
{
return crossdifference(
S.template getValue< 0, 2, 2>(), S.template getValue< 0, 2, 1>(),
S.template getValue< 0, 2,-1>(), S.template getValue< 0, 2,-2>(),
S.template getValue< 0, 1, 2>(), S.template getValue< 0, 1, 1>(),
S.template getValue< 0, 1,-1>(), S.template getValue< 0, 1,-2>(),
S.template getValue< 0,-2, 2>(), S.template getValue< 0,-2, 1>(),
S.template getValue< 0,-2,-1>(), S.template getValue< 0,-2,-2>(),
S.template getValue< 0,-1, 2>(), S.template getValue< 0,-1, 1>(),
S.template getValue< 0,-1,-1>(), S.template getValue< 0,-1,-2>() );
}
};
template<>
struct D2<CD_SIXTH>
{
// the difference opperator
template <typename ValueType>
static ValueType difference(const ValueType& xp3, const ValueType& xp2, const ValueType& xp1,
const ValueType& xp0,
const ValueType& xm1, const ValueType& xm2, const ValueType& xm3)
{
return ValueType(1./90.)*(xp3 + xm3) - ValueType(3./20.)*(xp2 + xm2)
+ ValueType(1.5)*(xp1 + xm1) - ValueType(49./18.)*xp0;
}
template <typename ValueType>
static ValueType crossdifference( const ValueType& xp1yp1,const ValueType& xm1yp1,
const ValueType& xp1ym1,const ValueType& xm1ym1,
const ValueType& xp2yp1,const ValueType& xm2yp1,
const ValueType& xp2ym1,const ValueType& xm2ym1,
const ValueType& xp3yp1,const ValueType& xm3yp1,
const ValueType& xp3ym1,const ValueType& xm3ym1,
const ValueType& xp1yp2,const ValueType& xm1yp2,
const ValueType& xp1ym2,const ValueType& xm1ym2,
const ValueType& xp2yp2,const ValueType& xm2yp2,
const ValueType& xp2ym2,const ValueType& xm2ym2,
const ValueType& xp3yp2,const ValueType& xm3yp2,
const ValueType& xp3ym2,const ValueType& xm3ym2,
const ValueType& xp1yp3,const ValueType& xm1yp3,
const ValueType& xp1ym3,const ValueType& xm1ym3,
const ValueType& xp2yp3,const ValueType& xm2yp3,
const ValueType& xp2ym3,const ValueType& xm2ym3,
const ValueType& xp3yp3,const ValueType& xm3yp3,
const ValueType& xp3ym3,const ValueType& xm3ym3 )
{
ValueType tmp1 =
ValueType(0.7500)*(xp1yp1 - xm1yp1 - xp1ym1 + xm1ym1) -
ValueType(0.1500)*(xp2yp1 - xm2yp1 - xp2ym1 + xm2ym1) +
ValueType(1./60.)*(xp3yp1 - xm3yp1 - xp3ym1 + xm3ym1);
ValueType tmp2 =
ValueType(0.7500)*(xp1yp2 - xm1yp2 - xp1ym2 + xm1ym2) -
ValueType(0.1500)*(xp2yp2 - xm2yp2 - xp2ym2 + xm2ym2) +
ValueType(1./60.)*(xp3yp2 - xm3yp2 - xp3ym2 + xm3ym2);
ValueType tmp3 =
ValueType(0.7500)*(xp1yp3 - xm1yp3 - xp1ym3 + xm1ym3) -
ValueType(0.1500)*(xp2yp3 - xm2yp3 - xp2ym3 + xm2ym3) +
ValueType(1./60.)*(xp3yp3 - xm3yp3 - xp3ym3 + xm3ym3);
return ValueType(0.75)*tmp1 - ValueType(0.15)*tmp2 + ValueType(1./60)*tmp3;
}
// random access version
template<typename Accessor>
static typename Accessor::ValueType inX(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 3, 0, 0)), grid.getValue(ijk.offsetBy( 2, 0, 0)),
grid.getValue(ijk.offsetBy( 1, 0, 0)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy(-1, 0, 0)), grid.getValue(ijk.offsetBy(-2, 0, 0)),
grid.getValue(ijk.offsetBy(-3, 0, 0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inY(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 3, 0)), grid.getValue(ijk.offsetBy( 0, 2, 0)),
grid.getValue(ijk.offsetBy( 0, 1, 0)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy( 0,-1, 0)), grid.getValue(ijk.offsetBy( 0,-2, 0)),
grid.getValue(ijk.offsetBy( 0,-3, 0)) );
}
template<typename Accessor>
static typename Accessor::ValueType inZ(const Accessor& grid, const Coord& ijk)
{
return difference(
grid.getValue(ijk.offsetBy( 0, 0, 3)), grid.getValue(ijk.offsetBy( 0, 0, 2)),
grid.getValue(ijk.offsetBy( 0, 0, 1)), grid.getValue(ijk),
grid.getValue(ijk.offsetBy( 0, 0,-1)), grid.getValue(ijk.offsetBy( 0, 0,-2)),
grid.getValue(ijk.offsetBy( 0, 0,-3)) );
}
template<typename Accessor>
static typename Accessor::ValueType inXandY(const Accessor& grid, const Coord& ijk)
{
using ValueT = typename Accessor::ValueType;
ValueT tmp1 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 1, 0)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0,-1, 0));
ValueT tmp2 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 2, 0)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0,-2, 0));
ValueT tmp3 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 3, 0)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0,-3, 0));
return ValueT(0.75*tmp1 - 0.15*tmp2 + 1./60*tmp3);
}
template<typename Accessor>
static typename Accessor::ValueType inXandZ(const Accessor& grid, const Coord& ijk)
{
using ValueT = typename Accessor::ValueType;
ValueT tmp1 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0, 1)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0,-1));
ValueT tmp2 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0, 2)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0,-2));
ValueT tmp3 =
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0, 3)) -
D1<CD_6TH>::inX(grid, ijk.offsetBy(0, 0,-3));
return ValueT(0.75*tmp1 - 0.15*tmp2 + 1./60*tmp3);
}
template<typename Accessor>
static typename Accessor::ValueType inYandZ(const Accessor& grid, const Coord& ijk)
{
using ValueT = typename Accessor::ValueType;
ValueT tmp1 =
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0, 1)) -
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0,-1));
ValueT tmp2 =
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0, 2)) -
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0,-2));
ValueT tmp3 =
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0, 3)) -
D1<CD_6TH>::inY(grid, ijk.offsetBy(0, 0,-3));
return ValueT(0.75*tmp1 - 0.15*tmp2 + 1./60*tmp3);
}
// stencil access version
template<typename Stencil>
static typename Stencil::ValueType inX(const Stencil& S)
{
return difference( S.template getValue< 3, 0, 0>(), S.template getValue< 2, 0, 0>(),
S.template getValue< 1, 0, 0>(), S.template getValue< 0, 0, 0>(),
S.template getValue<-1, 0, 0>(), S.template getValue<-2, 0, 0>(),
S.template getValue<-3, 0, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inY(const Stencil& S)
{
return difference( S.template getValue< 0, 3, 0>(), S.template getValue< 0, 2, 0>(),
S.template getValue< 0, 1, 0>(), S.template getValue< 0, 0, 0>(),
S.template getValue< 0,-1, 0>(), S.template getValue< 0,-2, 0>(),
S.template getValue< 0,-3, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inZ(const Stencil& S)
{
return difference( S.template getValue< 0, 0, 3>(), S.template getValue< 0, 0, 2>(),
S.template getValue< 0, 0, 1>(), S.template getValue< 0, 0, 0>(),
S.template getValue< 0, 0,-1>(), S.template getValue< 0, 0,-2>(),
S.template getValue< 0, 0,-3>() );
}
template<typename Stencil>
static typename Stencil::ValueType inXandY(const Stencil& S)
{
return crossdifference( S.template getValue< 1, 1, 0>(), S.template getValue<-1, 1, 0>(),
S.template getValue< 1,-1, 0>(), S.template getValue<-1,-1, 0>(),
S.template getValue< 2, 1, 0>(), S.template getValue<-2, 1, 0>(),
S.template getValue< 2,-1, 0>(), S.template getValue<-2,-1, 0>(),
S.template getValue< 3, 1, 0>(), S.template getValue<-3, 1, 0>(),
S.template getValue< 3,-1, 0>(), S.template getValue<-3,-1, 0>(),
S.template getValue< 1, 2, 0>(), S.template getValue<-1, 2, 0>(),
S.template getValue< 1,-2, 0>(), S.template getValue<-1,-2, 0>(),
S.template getValue< 2, 2, 0>(), S.template getValue<-2, 2, 0>(),
S.template getValue< 2,-2, 0>(), S.template getValue<-2,-2, 0>(),
S.template getValue< 3, 2, 0>(), S.template getValue<-3, 2, 0>(),
S.template getValue< 3,-2, 0>(), S.template getValue<-3,-2, 0>(),
S.template getValue< 1, 3, 0>(), S.template getValue<-1, 3, 0>(),
S.template getValue< 1,-3, 0>(), S.template getValue<-1,-3, 0>(),
S.template getValue< 2, 3, 0>(), S.template getValue<-2, 3, 0>(),
S.template getValue< 2,-3, 0>(), S.template getValue<-2,-3, 0>(),
S.template getValue< 3, 3, 0>(), S.template getValue<-3, 3, 0>(),
S.template getValue< 3,-3, 0>(), S.template getValue<-3,-3, 0>() );
}
template<typename Stencil>
static typename Stencil::ValueType inXandZ(const Stencil& S)
{
return crossdifference( S.template getValue< 1, 0, 1>(), S.template getValue<-1, 0, 1>(),
S.template getValue< 1, 0,-1>(), S.template getValue<-1, 0,-1>(),
S.template getValue< 2, 0, 1>(), S.template getValue<-2, 0, 1>(),
S.template getValue< 2, 0,-1>(), S.template getValue<-2, 0,-1>(),
S.template getValue< 3, 0, 1>(), S.template getValue<-3, 0, 1>(),
S.template getValue< 3, 0,-1>(), S.template getValue<-3, 0,-1>(),
S.template getValue< 1, 0, 2>(), S.template getValue<-1, 0, 2>(),
S.template getValue< 1, 0,-2>(), S.template getValue<-1, 0,-2>(),
S.template getValue< 2, 0, 2>(), S.template getValue<-2, 0, 2>(),
S.template getValue< 2, 0,-2>(), S.template getValue<-2, 0,-2>(),
S.template getValue< 3, 0, 2>(), S.template getValue<-3, 0, 2>(),
S.template getValue< 3, 0,-2>(), S.template getValue<-3, 0,-2>(),
S.template getValue< 1, 0, 3>(), S.template getValue<-1, 0, 3>(),
S.template getValue< 1, 0,-3>(), S.template getValue<-1, 0,-3>(),
S.template getValue< 2, 0, 3>(), S.template getValue<-2, 0, 3>(),
S.template getValue< 2, 0,-3>(), S.template getValue<-2, 0,-3>(),
S.template getValue< 3, 0, 3>(), S.template getValue<-3, 0, 3>(),
S.template getValue< 3, 0,-3>(), S.template getValue<-3, 0,-3>() );
}
template<typename Stencil>
static typename Stencil::ValueType inYandZ(const Stencil& S)
{
return crossdifference( S.template getValue< 0, 1, 1>(), S.template getValue< 0,-1, 1>(),
S.template getValue< 0, 1,-1>(), S.template getValue< 0,-1,-1>(),
S.template getValue< 0, 2, 1>(), S.template getValue< 0,-2, 1>(),
S.template getValue< 0, 2,-1>(), S.template getValue< 0,-2,-1>(),
S.template getValue< 0, 3, 1>(), S.template getValue< 0,-3, 1>(),
S.template getValue< 0, 3,-1>(), S.template getValue< 0,-3,-1>(),
S.template getValue< 0, 1, 2>(), S.template getValue< 0,-1, 2>(),
S.template getValue< 0, 1,-2>(), S.template getValue< 0,-1,-2>(),
S.template getValue< 0, 2, 2>(), S.template getValue< 0,-2, 2>(),
S.template getValue< 0, 2,-2>(), S.template getValue< 0,-2,-2>(),
S.template getValue< 0, 3, 2>(), S.template getValue< 0,-3, 2>(),
S.template getValue< 0, 3,-2>(), S.template getValue< 0,-3,-2>(),
S.template getValue< 0, 1, 3>(), S.template getValue< 0,-1, 3>(),
S.template getValue< 0, 1,-3>(), S.template getValue< 0,-1,-3>(),
S.template getValue< 0, 2, 3>(), S.template getValue< 0,-2, 3>(),
S.template getValue< 0, 2,-3>(), S.template getValue< 0,-2,-3>(),
S.template getValue< 0, 3, 3>(), S.template getValue< 0,-3, 3>(),
S.template getValue< 0, 3,-3>(), S.template getValue< 0,-3,-3>() );
}
};
} // end math namespace
} // namespace OPENVDB_VERSION_NAME
} // end openvdb namespace
#endif // OPENVDB_MATH_FINITEDIFFERENCE_HAS_BEEN_INCLUDED
| 88,278 | C | 36.920533 | 99 | 0.562281 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Vec4.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_VEC4_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_VEC4_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include "Math.h"
#include "Tuple.h"
#include "Vec3.h"
#include <algorithm>
#include <cmath>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Mat3;
template<typename T>
class Vec4: public Tuple<4, T>
{
public:
using value_type = T;
using ValueType = T;
/// Trivial constructor, the vector is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Vec4() = default;
#else
Vec4() {}
#endif
/// @brief Construct a vector all of whose components have the given value.
explicit Vec4(T val) { this->mm[0] = this->mm[1] = this->mm[2] = this->mm[3] = val; }
/// Constructor with four arguments, e.g. Vec4f v(1,2,3,4);
Vec4(T x, T y, T z, T w)
{
this->mm[0] = x;
this->mm[1] = y;
this->mm[2] = z;
this->mm[3] = w;
}
/// Constructor with array argument, e.g. float a[4]; Vec4f v(a);
template <typename Source>
Vec4(Source *a)
{
this->mm[0] = static_cast<T>(a[0]);
this->mm[1] = static_cast<T>(a[1]);
this->mm[2] = static_cast<T>(a[2]);
this->mm[3] = static_cast<T>(a[3]);
}
/// Conversion constructor
template<typename Source>
explicit Vec4(const Tuple<4, Source> &v)
{
this->mm[0] = static_cast<T>(v[0]);
this->mm[1] = static_cast<T>(v[1]);
this->mm[2] = static_cast<T>(v[2]);
this->mm[3] = static_cast<T>(v[3]);
}
/// @brief Construct a vector all of whose components have the given value,
/// which may be of an arithmetic type different from this vector's value type.
/// @details Type conversion warnings are suppressed.
template<typename Other>
explicit Vec4(Other val,
typename std::enable_if<std::is_arithmetic<Other>::value, Conversion>::type = Conversion{})
{
this->mm[0] = this->mm[1] = this->mm[2] = this->mm[3] = static_cast<T>(val);
}
/// Reference to the component, e.g. v.x() = 4.5f;
T& x() { return this->mm[0]; }
T& y() { return this->mm[1]; }
T& z() { return this->mm[2]; }
T& w() { return this->mm[3]; }
/// Get the component, e.g. float f = v.y();
T x() const { return this->mm[0]; }
T y() const { return this->mm[1]; }
T z() const { return this->mm[2]; }
T w() const { return this->mm[3]; }
T* asPointer() { return this->mm; }
const T* asPointer() const { return this->mm; }
/// Alternative indexed reference to the elements
T& operator()(int i) { return this->mm[i]; }
/// Alternative indexed constant reference to the elements,
T operator()(int i) const { return this->mm[i]; }
/// Returns a Vec3 with the first three elements of the Vec4.
Vec3<T> getVec3() const { return Vec3<T>(this->mm[0], this->mm[1], this->mm[2]); }
/// "this" vector gets initialized to [x, y, z, w],
/// calling v.init(); has same effect as calling v = Vec4::zero();
const Vec4<T>& init(T x=0, T y=0, T z=0, T w=0)
{
this->mm[0] = x; this->mm[1] = y; this->mm[2] = z; this->mm[3] = w;
return *this;
}
/// Set "this" vector to zero
const Vec4<T>& setZero()
{
this->mm[0] = 0; this->mm[1] = 0; this->mm[2] = 0; this->mm[3] = 0;
return *this;
}
/// Assignment operator
template<typename Source>
const Vec4<T>& operator=(const Vec4<Source> &v)
{
// note: don't static_cast because that suppresses warnings
this->mm[0] = v[0];
this->mm[1] = v[1];
this->mm[2] = v[2];
this->mm[3] = v[3];
return *this;
}
/// Test if "this" vector is equivalent to vector v with tolerance
/// of eps
bool eq(const Vec4<T> &v, T eps = static_cast<T>(1.0e-8)) const
{
return isApproxEqual(this->mm[0], v.mm[0], eps) &&
isApproxEqual(this->mm[1], v.mm[1], eps) &&
isApproxEqual(this->mm[2], v.mm[2], eps) &&
isApproxEqual(this->mm[3], v.mm[3], eps);
}
/// Negation operator, for e.g. v1 = -v2;
Vec4<T> operator-() const
{
return Vec4<T>(
-this->mm[0],
-this->mm[1],
-this->mm[2],
-this->mm[3]);
}
/// this = v1 + v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.add(v1,v);
template <typename T0, typename T1>
const Vec4<T>& add(const Vec4<T0> &v1, const Vec4<T1> &v2)
{
this->mm[0] = v1[0] + v2[0];
this->mm[1] = v1[1] + v2[1];
this->mm[2] = v1[2] + v2[2];
this->mm[3] = v1[3] + v2[3];
return *this;
}
/// this = v1 - v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.sub(v1,v);
template <typename T0, typename T1>
const Vec4<T>& sub(const Vec4<T0> &v1, const Vec4<T1> &v2)
{
this->mm[0] = v1[0] - v2[0];
this->mm[1] = v1[1] - v2[1];
this->mm[2] = v1[2] - v2[2];
this->mm[3] = v1[3] - v2[3];
return *this;
}
/// this = scalar*v, v need not be a distinct object from "this",
/// e.g. v.scale(1.5,v1);
template <typename T0, typename T1>
const Vec4<T>& scale(T0 scale, const Vec4<T1> &v)
{
this->mm[0] = scale * v[0];
this->mm[1] = scale * v[1];
this->mm[2] = scale * v[2];
this->mm[3] = scale * v[3];
return *this;
}
template <typename T0, typename T1>
const Vec4<T> &div(T0 scalar, const Vec4<T1> &v)
{
this->mm[0] = v[0] / scalar;
this->mm[1] = v[1] / scalar;
this->mm[2] = v[2] / scalar;
this->mm[3] = v[3] / scalar;
return *this;
}
/// Dot product
T dot(const Vec4<T> &v) const
{
return (this->mm[0]*v.mm[0] + this->mm[1]*v.mm[1]
+ this->mm[2]*v.mm[2] + this->mm[3]*v.mm[3]);
}
/// Length of the vector
T length() const
{
return std::sqrt(
this->mm[0]*this->mm[0] +
this->mm[1]*this->mm[1] +
this->mm[2]*this->mm[2] +
this->mm[3]*this->mm[3]);
}
/// Squared length of the vector, much faster than length() as it
/// does not involve square root
T lengthSqr() const
{
return (this->mm[0]*this->mm[0] + this->mm[1]*this->mm[1]
+ this->mm[2]*this->mm[2] + this->mm[3]*this->mm[3]);
}
/// Return a reference to itself after the exponent has been
/// applied to all the vector components.
inline const Vec4<T>& exp()
{
this->mm[0] = std::exp(this->mm[0]);
this->mm[1] = std::exp(this->mm[1]);
this->mm[2] = std::exp(this->mm[2]);
this->mm[3] = std::exp(this->mm[3]);
return *this;
}
/// Return a reference to itself after log has been
/// applied to all the vector components.
inline const Vec4<T>& log()
{
this->mm[0] = std::log(this->mm[0]);
this->mm[1] = std::log(this->mm[1]);
this->mm[2] = std::log(this->mm[2]);
this->mm[3] = std::log(this->mm[3]);
return *this;
}
/// Return the sum of all the vector components.
inline T sum() const
{
return this->mm[0] + this->mm[1] + this->mm[2] + this->mm[3];
}
/// Return the product of all the vector components.
inline T product() const
{
return this->mm[0] * this->mm[1] * this->mm[2] * this->mm[3];
}
/// this = normalized this
bool normalize(T eps = static_cast<T>(1.0e-8))
{
T d = length();
if (isApproxEqual(d, T(0), eps)) {
return false;
}
*this *= (T(1) / d);
return true;
}
/// return normalized this, throws if null vector
Vec4<T> unit(T eps=0) const
{
T d;
return unit(eps, d);
}
/// return normalized this and length, throws if null vector
Vec4<T> unit(T eps, T& len) const
{
len = length();
if (isApproxEqual(len, T(0), eps)) {
throw ArithmeticError("Normalizing null 4-vector");
}
return *this / len;
}
/// return normalized this, or (1, 0, 0, 0) if this is null vector
Vec4<T> unitSafe() const
{
T l2 = lengthSqr();
return l2 ? *this / static_cast<T>(sqrt(l2)) : Vec4<T>(1, 0, 0, 0);
}
/// Multiply each element of this vector by @a scalar.
template <typename S>
const Vec4<T> &operator*=(S scalar)
{
this->mm[0] *= scalar;
this->mm[1] *= scalar;
this->mm[2] *= scalar;
this->mm[3] *= scalar;
return *this;
}
/// Multiply each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec4<T> &operator*=(const Vec4<S> &v1)
{
this->mm[0] *= v1[0];
this->mm[1] *= v1[1];
this->mm[2] *= v1[2];
this->mm[3] *= v1[3];
return *this;
}
/// Divide each element of this vector by @a scalar.
template <typename S>
const Vec4<T> &operator/=(S scalar)
{
this->mm[0] /= scalar;
this->mm[1] /= scalar;
this->mm[2] /= scalar;
this->mm[3] /= scalar;
return *this;
}
/// Divide each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec4<T> &operator/=(const Vec4<S> &v1)
{
this->mm[0] /= v1[0];
this->mm[1] /= v1[1];
this->mm[2] /= v1[2];
this->mm[3] /= v1[3];
return *this;
}
/// Add @a scalar to each element of this vector.
template <typename S>
const Vec4<T> &operator+=(S scalar)
{
this->mm[0] += scalar;
this->mm[1] += scalar;
this->mm[2] += scalar;
this->mm[3] += scalar;
return *this;
}
/// Add each element of the given vector to the corresponding element of this vector.
template <typename S>
const Vec4<T> &operator+=(const Vec4<S> &v1)
{
this->mm[0] += v1[0];
this->mm[1] += v1[1];
this->mm[2] += v1[2];
this->mm[3] += v1[3];
return *this;
}
/// Subtract @a scalar from each element of this vector.
template <typename S>
const Vec4<T> &operator-=(S scalar)
{
this->mm[0] -= scalar;
this->mm[1] -= scalar;
this->mm[2] -= scalar;
this->mm[3] -= scalar;
return *this;
}
/// Subtract each element of the given vector from the corresponding element of this vector.
template <typename S>
const Vec4<T> &operator-=(const Vec4<S> &v1)
{
this->mm[0] -= v1[0];
this->mm[1] -= v1[1];
this->mm[2] -= v1[2];
this->mm[3] -= v1[3];
return *this;
}
// Number of cols, rows, elements
static unsigned numRows() { return 1; }
static unsigned numColumns() { return 4; }
static unsigned numElements() { return 4; }
/// Predefined constants, e.g. Vec4f v = Vec4f::xNegAxis();
static Vec4<T> zero() { return Vec4<T>(0, 0, 0, 0); }
static Vec4<T> origin() { return Vec4<T>(0, 0, 0, 1); }
static Vec4<T> ones() { return Vec4<T>(1, 1, 1, 1); }
};
/// Equality operator, does exact floating point comparisons
template <typename T0, typename T1>
inline bool operator==(const Vec4<T0> &v0, const Vec4<T1> &v1)
{
return
isExactlyEqual(v0[0], v1[0]) &&
isExactlyEqual(v0[1], v1[1]) &&
isExactlyEqual(v0[2], v1[2]) &&
isExactlyEqual(v0[3], v1[3]);
}
/// Inequality operator, does exact floating point comparisons
template <typename T0, typename T1>
inline bool operator!=(const Vec4<T0> &v0, const Vec4<T1> &v1) { return !(v0==v1); }
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator*(S scalar, const Vec4<T> &v)
{ return v*scalar; }
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator*(const Vec4<T> &v, S scalar)
{
Vec4<typename promote<S, T>::type> result(v);
result *= scalar;
return result;
}
/// Multiply corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec4<typename promote<T0, T1>::type> operator*(const Vec4<T0> &v0, const Vec4<T1> &v1)
{
Vec4<typename promote<T0, T1>::type> result(v0[0]*v1[0],
v0[1]*v1[1],
v0[2]*v1[2],
v0[3]*v1[3]);
return result;
}
/// Divide @a scalar by each element of the given vector and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator/(S scalar, const Vec4<T> &v)
{
return Vec4<typename promote<S, T>::type>(scalar/v[0],
scalar/v[1],
scalar/v[2],
scalar/v[3]);
}
/// Divide each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator/(const Vec4<T> &v, S scalar)
{
Vec4<typename promote<S, T>::type> result(v);
result /= scalar;
return result;
}
/// Divide corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec4<typename promote<T0, T1>::type> operator/(const Vec4<T0> &v0, const Vec4<T1> &v1)
{
Vec4<typename promote<T0, T1>::type>
result(v0[0]/v1[0], v0[1]/v1[1], v0[2]/v1[2], v0[3]/v1[3]);
return result;
}
/// Add corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec4<typename promote<T0, T1>::type> operator+(const Vec4<T0> &v0, const Vec4<T1> &v1)
{
Vec4<typename promote<T0, T1>::type> result(v0);
result += v1;
return result;
}
/// Add @a scalar to each element of the given vector and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator+(const Vec4<T> &v, S scalar)
{
Vec4<typename promote<S, T>::type> result(v);
result += scalar;
return result;
}
/// Subtract corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec4<typename promote<T0, T1>::type> operator-(const Vec4<T0> &v0, const Vec4<T1> &v1)
{
Vec4<typename promote<T0, T1>::type> result(v0);
result -= v1;
return result;
}
/// Subtract @a scalar from each element of the given vector and return the result.
template <typename S, typename T>
inline Vec4<typename promote<S, T>::type> operator-(const Vec4<T> &v, S scalar)
{
Vec4<typename promote<S, T>::type> result(v);
result -= scalar;
return result;
}
template <typename T>
inline bool
isApproxEqual(const Vec4<T>& a, const Vec4<T>& b)
{
return a.eq(b);
}
template <typename T>
inline bool
isApproxEqual(const Vec4<T>& a, const Vec4<T>& b, const Vec4<T>& eps)
{
return isApproxEqual(a[0], b[0], eps[0]) &&
isApproxEqual(a[1], b[1], eps[1]) &&
isApproxEqual(a[2], b[2], eps[2]) &&
isApproxEqual(a[3], b[3], eps[3]);
}
template<typename T>
inline Vec4<T>
Abs(const Vec4<T>& v)
{
return Vec4<T>(Abs(v[0]), Abs(v[1]), Abs(v[2]), Abs(v[3]));
}
/// @remark We are switching to a more explicit name because the semantics
/// are different from std::min/max. In that case, the function returns a
/// reference to one of the objects based on a comparator. Here, we must
/// fabricate a new object which might not match either of the inputs.
/// Return component-wise minimum of the two vectors.
template <typename T>
inline Vec4<T> minComponent(const Vec4<T> &v1, const Vec4<T> &v2)
{
return Vec4<T>(
std::min(v1.x(), v2.x()),
std::min(v1.y(), v2.y()),
std::min(v1.z(), v2.z()),
std::min(v1.w(), v2.w()));
}
/// Return component-wise maximum of the two vectors.
template <typename T>
inline Vec4<T> maxComponent(const Vec4<T> &v1, const Vec4<T> &v2)
{
return Vec4<T>(
std::max(v1.x(), v2.x()),
std::max(v1.y(), v2.y()),
std::max(v1.z(), v2.z()),
std::max(v1.w(), v2.w()));
}
/// @brief Return a vector with the exponent applied to each of
/// the components of the input vector.
template <typename T>
inline Vec4<T> Exp(Vec4<T> v) { return v.exp(); }
/// @brief Return a vector with log applied to each of
/// the components of the input vector.
template <typename T>
inline Vec4<T> Log(Vec4<T> v) { return v.log(); }
using Vec4i = Vec4<int32_t>;
using Vec4ui = Vec4<uint32_t>;
using Vec4s = Vec4<float>;
using Vec4d = Vec4<double>;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Vec4i)
OPENVDB_IS_POD(Vec4ui)
OPENVDB_IS_POD(Vec4s)
OPENVDB_IS_POD(Vec4d)
#endif
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_VEC4_HAS_BEEN_INCLUDED
| 17,455 | C | 29.096552 | 99 | 0.562246 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/QuantizedUnitVec.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_QUANTIZED_UNIT_VEC_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_QUANTIZED_UNIT_VEC_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h>
#include <openvdb/version.h>
#include "Vec3.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Unit vector occupying only 16 bits
/// @details Stores two quantized components. Based on the
/// "Higher Accuracy Quantized Normals" article from GameDev.Net LLC, 2000
class OPENVDB_API QuantizedUnitVec
{
public:
template<typename T> static uint16_t pack(const Vec3<T>& vec);
static Vec3s unpack(const uint16_t data);
static void flipSignBits(uint16_t&);
private:
QuantizedUnitVec() {}
// bit masks
static const uint16_t MASK_SLOTS = 0x1FFF; // 0001111111111111
static const uint16_t MASK_XSLOT = 0x1F80; // 0001111110000000
static const uint16_t MASK_YSLOT = 0x007F; // 0000000001111111
static const uint16_t MASK_XSIGN = 0x8000; // 1000000000000000
static const uint16_t MASK_YSIGN = 0x4000; // 0100000000000000
static const uint16_t MASK_ZSIGN = 0x2000; // 0010000000000000
// normalization weights, 32 kilobytes.
static float sNormalizationWeights[MASK_SLOTS + 1];
}; // class QuantizedUnitVec
////////////////////////////////////////
template<typename T>
inline uint16_t
QuantizedUnitVec::pack(const Vec3<T>& vec)
{
if (math::isZero(vec)) return 0;
uint16_t data = 0;
T x(vec[0]), y(vec[1]), z(vec[2]);
// The sign of the three components are first stored using
// 3-bits and can then safely be discarded.
if (x < T(0.0)) { data |= MASK_XSIGN; x = -x; }
if (y < T(0.0)) { data |= MASK_YSIGN; y = -y; }
if (z < T(0.0)) { data |= MASK_ZSIGN; z = -z; }
// The z component is discarded and x & y are quantized in
// the 0 to 126 range.
T w = T(126.0) / (x + y + z);
uint16_t xbits = static_cast<uint16_t>((x * w));
uint16_t ybits = static_cast<uint16_t>((y * w));
// The remaining 13 bits in our 16 bit word are dividied into a
// 6-bit x-slot and a 7-bit y-slot. Both the xbits and the ybits
// can still be represented using (2^7 - 1) quantization levels.
// If the xbits requre more than 6-bits, store the complement.
// (xbits + ybits < 127, thus if xbits > 63 => ybits <= 63)
if (xbits > 63) {
xbits = static_cast<uint16_t>(127 - xbits);
ybits = static_cast<uint16_t>(127 - ybits);
}
// Pack components into their respective slots.
data = static_cast<uint16_t>(data | (xbits << 7));
data = static_cast<uint16_t>(data | ybits);
return data;
}
inline Vec3s
QuantizedUnitVec::unpack(const uint16_t data)
{
const float w = sNormalizationWeights[data & MASK_SLOTS];
uint16_t xbits = static_cast<uint16_t>((data & MASK_XSLOT) >> 7);
uint16_t ybits = static_cast<uint16_t>(data & MASK_YSLOT);
// Check if the complement components where stored and revert.
if ((xbits + ybits) > 126) {
xbits = static_cast<uint16_t>(127 - xbits);
ybits = static_cast<uint16_t>(127 - ybits);
}
Vec3s vec(float(xbits) * w, float(ybits) * w, float(126 - xbits - ybits) * w);
if (data & MASK_XSIGN) vec[0] = -vec[0];
if (data & MASK_YSIGN) vec[1] = -vec[1];
if (data & MASK_ZSIGN) vec[2] = -vec[2];
return vec;
}
////////////////////////////////////////
inline void
QuantizedUnitVec::flipSignBits(uint16_t& v)
{
v = static_cast<uint16_t>((v & MASK_SLOTS) | (~v & ~MASK_SLOTS));
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_QUANTIZED_UNIT_VEC_HAS_BEEN_INCLUDED
| 3,745 | C | 29.704918 | 82 | 0.641389 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Math.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Math.h
/// @brief General-purpose arithmetic and comparison routines, most of which
/// accept arbitrary value types (or at least arbitrary numeric value types)
#ifndef OPENVDB_MATH_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h>
#include <openvdb/version.h>
#include <boost/numeric/conversion/conversion_traits.hpp>
#include <algorithm> // for std::max()
#include <cassert>
#include <cmath> // for std::ceil(), std::fabs(), std::pow(), std::sqrt(), etc.
#include <cstdlib> // for abs(int)
#include <random>
#include <string>
#include <type_traits> // for std::is_arithmetic
// Compile pragmas
// Intel(r) compiler fires remark #1572: floating-point equality and inequality
// comparisons are unrealiable when == or != is used with floating point operands.
#if defined(__INTEL_COMPILER)
#define OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN \
_Pragma("warning (push)") \
_Pragma("warning (disable:1572)")
#define OPENVDB_NO_FP_EQUALITY_WARNING_END \
_Pragma("warning (pop)")
#elif defined(__clang__)
#define OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN \
PRAGMA(clang diagnostic push) \
PRAGMA(clang diagnostic ignored "-Wfloat-equal")
#define OPENVDB_NO_FP_EQUALITY_WARNING_END \
PRAGMA(clang diagnostic pop)
#else
// For GCC, #pragma GCC diagnostic ignored "-Wfloat-equal"
// isn't working until gcc 4.2+,
// Trying
// #pragma GCC system_header
// creates other problems, most notably "warning: will never be executed"
// in from templates, unsure of how to work around.
// If necessary, could use integer based comparisons for equality
#define OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN
#define OPENVDB_NO_FP_EQUALITY_WARNING_END
#endif
#ifdef OPENVDB_IS_POD
#undef OPENVDB_IS_POD
#endif
#define OPENVDB_IS_POD(Type) \
static_assert(std::is_standard_layout<Type>::value, \
#Type" must be a POD type (satisfy StandardLayoutType.)"); \
static_assert(std::is_trivial<Type>::value, \
#Type" must be a POD type (satisfy TrivialType.)");
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
/// @brief Return the value of type T that corresponds to zero.
/// @note A zeroVal<T>() specialization must be defined for each @c ValueType T
/// that cannot be constructed using the form @c T(0). For example, @c std::string(0)
/// treats 0 as @c nullptr and throws a @c std::logic_error.
template<typename T> inline T zeroVal() { return T(0); }
/// Return the @c std::string value that corresponds to zero.
template<> inline std::string zeroVal<std::string>() { return ""; }
/// Return the @c bool value that corresponds to zero.
template<> inline bool zeroVal<bool>() { return false; }
namespace math {
/// @todo These won't be needed if we eliminate StringGrids.
//@{
/// @brief Needed to support the <tt>(zeroVal<ValueType>() + val)</tt> idiom
/// when @c ValueType is @c std::string
inline std::string operator+(const std::string& s, bool) { return s; }
inline std::string operator+(const std::string& s, int) { return s; }
inline std::string operator+(const std::string& s, float) { return s; }
inline std::string operator+(const std::string& s, double) { return s; }
//@}
/// @brief Componentwise adder for POD types.
template<typename Type1, typename Type2>
inline auto cwiseAdd(const Type1& v, const Type2 s)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
return v + s;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
}
/// @brief Componentwise less than for POD types.
template<typename Type1, typename Type2>
inline bool cwiseLessThan(const Type1& a, const Type2& b)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
return a < b;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
}
/// @brief Componentwise greater than for POD types.
template<typename Type1, typename Type2>
inline bool cwiseGreaterThan(const Type1& a, const Type2& b)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
return a > b;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
}
/// @brief Pi constant taken from Boost to match old behaviour
/// @note Available in C++20
template <typename T> inline constexpr T pi() { return 3.141592653589793238462643383279502884e+00; }
template <> inline constexpr float pi() { return 3.141592653589793238462643383279502884e+00F; }
template <> inline constexpr double pi() { return 3.141592653589793238462643383279502884e+00; }
template <> inline constexpr long double pi() { return 3.141592653589793238462643383279502884e+00L; }
/// @brief Return the unary negation of the given value.
/// @note A negative<T>() specialization must be defined for each ValueType T
/// for which unary negation is not defined.
template<typename T> inline T negative(const T& val)
{
// disable unary minus on unsigned warning
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable:4146)
#endif
return T(-val);
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
}
/// Return the negation of the given boolean.
template<> inline bool negative(const bool& val) { return !val; }
/// Return the "negation" of the given string.
template<> inline std::string negative(const std::string& val) { return val; }
//@{
/// Tolerance for floating-point comparison
template<typename T> struct Tolerance { static T value() { return zeroVal<T>(); } };
template<> struct Tolerance<float> { static float value() { return 1e-8f; } };
template<> struct Tolerance<double> { static double value() { return 1e-15; } };
//@}
//@{
/// Delta for small floating-point offsets
template<typename T> struct Delta { static T value() { return zeroVal<T>(); } };
template<> struct Delta<float> { static float value() { return 1e-5f; } };
template<> struct Delta<double> { static double value() { return 1e-9; } };
//@}
// ==========> Random Values <==================
/// @brief Simple generator of random numbers over the range [0, 1)
/// @details Thread-safe as long as each thread has its own Rand01 instance
template<typename FloatType = double, typename EngineType = std::mt19937>
class Rand01
{
private:
EngineType mEngine;
std::uniform_real_distribution<FloatType> mRand;
public:
using ValueType = FloatType;
/// @brief Initialize the generator.
/// @param engine random number generator
Rand01(const EngineType& engine): mEngine(engine) {}
/// @brief Initialize the generator.
/// @param seed seed value for the random number generator
Rand01(unsigned int seed): mEngine(static_cast<typename EngineType::result_type>(seed)) {}
/// Set the seed value for the random number generator
void setSeed(unsigned int seed)
{
mEngine.seed(static_cast<typename EngineType::result_type>(seed));
}
/// Return a const reference to the random number generator.
const EngineType& engine() const { return mEngine; }
/// Return a uniformly distributed random number in the range [0, 1).
FloatType operator()() { return mRand(mEngine); }
};
using Random01 = Rand01<double, std::mt19937>;
/// @brief Simple random integer generator
/// @details Thread-safe as long as each thread has its own RandInt instance
template<typename IntType = int, typename EngineType = std::mt19937>
class RandInt
{
private:
using Distr = std::uniform_int_distribution<IntType>;
EngineType mEngine;
Distr mRand;
public:
/// @brief Initialize the generator.
/// @param engine random number generator
/// @param imin,imax generate integers that are uniformly distributed over [imin, imax]
RandInt(const EngineType& engine, IntType imin, IntType imax):
mEngine(engine),
mRand(std::min(imin, imax), std::max(imin, imax))
{}
/// @brief Initialize the generator.
/// @param seed seed value for the random number generator
/// @param imin,imax generate integers that are uniformly distributed over [imin, imax]
RandInt(unsigned int seed, IntType imin, IntType imax):
mEngine(static_cast<typename EngineType::result_type>(seed)),
mRand(std::min(imin, imax), std::max(imin, imax))
{}
/// Change the range over which integers are distributed to [imin, imax].
void setRange(IntType imin, IntType imax)
{
mRand = Distr(std::min(imin, imax), std::max(imin, imax));
}
/// Set the seed value for the random number generator
void setSeed(unsigned int seed)
{
mEngine.seed(static_cast<typename EngineType::result_type>(seed));
}
/// Return a const reference to the random number generator.
const EngineType& engine() const { return mEngine; }
/// Return a randomly-generated integer in the current range.
IntType operator()() { return mRand(mEngine); }
/// @brief Return a randomly-generated integer in the new range [imin, imax],
/// without changing the current range.
IntType operator()(IntType imin, IntType imax)
{
const IntType lo = std::min(imin, imax), hi = std::max(imin, imax);
return mRand(mEngine, typename Distr::param_type(lo, hi));
}
};
using RandomInt = RandInt<int, std::mt19937>;
// ==========> Clamp <==================
/// Return @a x clamped to [@a min, @a max]
template<typename Type>
inline Type
Clamp(Type x, Type min, Type max)
{
assert( !(min>max) );
return x > min ? x < max ? x : max : min;
}
/// Return @a x clamped to [0, 1]
template<typename Type>
inline Type
Clamp01(Type x) { return x > Type(0) ? x < Type(1) ? x : Type(1) : Type(0); }
/// Return @c true if @a x is outside [0,1]
template<typename Type>
inline bool
ClampTest01(Type &x)
{
if (x >= Type(0) && x <= Type(1)) return false;
x = x < Type(0) ? Type(0) : Type(1);
return true;
}
/// @brief Return 0 if @a x < @a 0, 1 if @a x > 1 or else (3 − 2 @a x) @a x².
template<typename Type>
inline Type
SmoothUnitStep(Type x)
{
return x > 0 ? x < 1 ? (3-2*x)*x*x : Type(1) : Type(0);
}
/// @brief Return 0 if @a x < @a min, 1 if @a x > @a max or else (3 − 2 @a t) @a t²,
/// where @a t = (@a x − @a min)/(@a max − @a min).
template<typename Type>
inline Type
SmoothUnitStep(Type x, Type min, Type max)
{
assert(min < max);
return SmoothUnitStep((x-min)/(max-min));
}
// ==========> Absolute Value <==================
//@{
/// Return the absolute value of the given quantity.
inline int32_t Abs(int32_t i) { return abs(i); }
inline int64_t Abs(int64_t i)
{
#ifdef _MSC_VER
return (i < int64_t(0) ? -i : i);
#else
return labs(i);
#endif
}
inline float Abs(float x) { return std::fabs(x); }
inline double Abs(double x) { return std::fabs(x); }
inline long double Abs(long double x) { return std::fabs(x); }
inline uint32_t Abs(uint32_t i) { return i; }
inline uint64_t Abs(uint64_t i) { return i; }
inline bool Abs(bool b) { return b; }
// On OSX size_t and uint64_t are different types
#if defined(__APPLE__) || defined(MACOSX)
inline size_t Abs(size_t i) { return i; }
#endif
//@}
////////////////////////////////////////
// ==========> Value Comparison <==================
/// Return @c true if @a x is exactly equal to zero.
template<typename Type>
inline bool
isZero(const Type& x)
{
OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN
return x == zeroVal<Type>();
OPENVDB_NO_FP_EQUALITY_WARNING_END
}
/// @brief Return @c true if @a x is equal to zero to within
/// the default floating-point comparison tolerance.
template<typename Type>
inline bool
isApproxZero(const Type& x)
{
const Type tolerance = Type(zeroVal<Type>() + Tolerance<Type>::value());
return !(x > tolerance) && !(x < -tolerance);
}
/// Return @c true if @a x is equal to zero to within the given tolerance.
template<typename Type>
inline bool
isApproxZero(const Type& x, const Type& tolerance)
{
return !(x > tolerance) && !(x < -tolerance);
}
/// Return @c true if @a x is less than zero.
template<typename Type>
inline bool
isNegative(const Type& x) { return x < zeroVal<Type>(); }
// Return false, since bool values are never less than zero.
template<> inline bool isNegative<bool>(const bool&) { return false; }
/// Return @c true if @a x is finite.
inline bool
isFinite(const float x) { return std::isfinite(x); }
/// Return @c true if @a x is finite.
template<typename Type, typename std::enable_if<std::is_arithmetic<Type>::value, int>::type = 0>
inline bool
isFinite(const Type& x) { return std::isfinite(static_cast<double>(x)); }
/// Return @c true if @a x is an infinity value (either positive infinity or negative infinity).
inline bool
isInfinite(const float x) { return std::isinf(x); }
/// Return @c true if @a x is an infinity value (either positive infinity or negative infinity).
template<typename Type, typename std::enable_if<std::is_arithmetic<Type>::value, int>::type = 0>
inline bool
isInfinite(const Type& x) { return std::isinf(static_cast<double>(x)); }
/// Return @c true if @a x is a NaN (Not-A-Number) value.
inline bool
isNan(const float x) { return std::isnan(x); }
/// Return @c true if @a x is a NaN (Not-A-Number) value.
template<typename Type, typename std::enable_if<std::is_arithmetic<Type>::value, int>::type = 0>
inline bool
isNan(const Type& x) { return std::isnan(static_cast<double>(x)); }
/// Return @c true if @a a is equal to @a b to within the given tolerance.
template<typename Type>
inline bool
isApproxEqual(const Type& a, const Type& b, const Type& tolerance)
{
return !cwiseGreaterThan(Abs(a - b), tolerance);
}
/// @brief Return @c true if @a a is equal to @a b to within
/// the default floating-point comparison tolerance.
template<typename Type>
inline bool
isApproxEqual(const Type& a, const Type& b)
{
const Type tolerance = Type(zeroVal<Type>() + Tolerance<Type>::value());
return isApproxEqual(a, b, tolerance);
}
#define OPENVDB_EXACT_IS_APPROX_EQUAL(T) \
template<> inline bool isApproxEqual<T>(const T& a, const T& b) { return a == b; } \
template<> inline bool isApproxEqual<T>(const T& a, const T& b, const T&) { return a == b; } \
/**/
OPENVDB_EXACT_IS_APPROX_EQUAL(bool)
OPENVDB_EXACT_IS_APPROX_EQUAL(std::string)
/// @brief Return @c true if @a a is larger than @a b to within
/// the given tolerance, i.e., if @a b - @a a < @a tolerance.
template<typename Type>
inline bool
isApproxLarger(const Type& a, const Type& b, const Type& tolerance)
{
return (b - a < tolerance);
}
/// @brief Return @c true if @a a is exactly equal to @a b.
template<typename T0, typename T1>
inline bool
isExactlyEqual(const T0& a, const T1& b)
{
OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN
return a == b;
OPENVDB_NO_FP_EQUALITY_WARNING_END
}
template<typename Type>
inline bool
isRelOrApproxEqual(const Type& a, const Type& b, const Type& absTol, const Type& relTol)
{
// First check to see if we are inside the absolute tolerance
// Necessary for numbers close to 0
if (!(Abs(a - b) > absTol)) return true;
// Next check to see if we are inside the relative tolerance
// to handle large numbers that aren't within the abs tolerance
// but could be the closest floating point representation
double relError;
if (Abs(b) > Abs(a)) {
relError = Abs((a - b) / b);
} else {
relError = Abs((a - b) / a);
}
return (relError <= relTol);
}
template<>
inline bool
isRelOrApproxEqual(const bool& a, const bool& b, const bool&, const bool&)
{
return (a == b);
}
// Avoid strict aliasing issues by using type punning
// http://cellperformance.beyond3d.com/articles/2006/06/understanding-strict-aliasing.html
// Using "casting through a union(2)"
inline int32_t
floatToInt32(const float aFloatValue)
{
union FloatOrInt32 { float floatValue; int32_t int32Value; };
const FloatOrInt32* foi = reinterpret_cast<const FloatOrInt32*>(&aFloatValue);
return foi->int32Value;
}
inline int64_t
doubleToInt64(const double aDoubleValue)
{
union DoubleOrInt64 { double doubleValue; int64_t int64Value; };
const DoubleOrInt64* dol = reinterpret_cast<const DoubleOrInt64*>(&aDoubleValue);
return dol->int64Value;
}
// aUnitsInLastPlace is the allowed difference between the least significant digits
// of the numbers' floating point representation
// Please read the reference paper before trying to use isUlpsEqual
// http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
inline bool
isUlpsEqual(const double aLeft, const double aRight, const int64_t aUnitsInLastPlace)
{
int64_t longLeft = doubleToInt64(aLeft);
// Because of 2's complement, must restore lexicographical order
if (longLeft < 0) {
longLeft = INT64_C(0x8000000000000000) - longLeft;
}
int64_t longRight = doubleToInt64(aRight);
// Because of 2's complement, must restore lexicographical order
if (longRight < 0) {
longRight = INT64_C(0x8000000000000000) - longRight;
}
int64_t difference = labs(longLeft - longRight);
return (difference <= aUnitsInLastPlace);
}
inline bool
isUlpsEqual(const float aLeft, const float aRight, const int32_t aUnitsInLastPlace)
{
int32_t intLeft = floatToInt32(aLeft);
// Because of 2's complement, must restore lexicographical order
if (intLeft < 0) {
intLeft = 0x80000000 - intLeft;
}
int32_t intRight = floatToInt32(aRight);
// Because of 2's complement, must restore lexicographical order
if (intRight < 0) {
intRight = 0x80000000 - intRight;
}
int32_t difference = abs(intLeft - intRight);
return (difference <= aUnitsInLastPlace);
}
////////////////////////////////////////
// ==========> Pow <==================
/// Return @a x<sup>2</sup>.
template<typename Type>
inline Type Pow2(Type x) { return x*x; }
/// Return @a x<sup>3</sup>.
template<typename Type>
inline Type Pow3(Type x) { return x*x*x; }
/// Return @a x<sup>4</sup>.
template<typename Type>
inline Type Pow4(Type x) { return Pow2(Pow2(x)); }
/// Return @a x<sup>@a n</sup>.
template<typename Type>
Type
Pow(Type x, int n)
{
Type ans = 1;
if (n < 0) {
n = -n;
x = Type(1)/x;
}
while (n--) ans *= x;
return ans;
}
//@{
/// Return @a b<sup>@a e</sup>.
inline float
Pow(float b, float e)
{
assert( b >= 0.0f && "Pow(float,float): base is negative" );
return powf(b,e);
}
inline double
Pow(double b, double e)
{
assert( b >= 0.0 && "Pow(double,double): base is negative" );
return std::pow(b,e);
}
//@}
// ==========> Max <==================
/// Return the maximum of two values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b)
{
return std::max(a,b);
}
/// Return the maximum of three values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c)
{
return std::max(std::max(a,b), c);
}
/// Return the maximum of four values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c, const Type& d)
{
return std::max(std::max(a,b), std::max(c,d));
}
/// Return the maximum of five values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c, const Type& d, const Type& e)
{
return std::max(std::max(a,b), Max(c,d,e));
}
/// Return the maximum of six values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c, const Type& d, const Type& e, const Type& f)
{
return std::max(Max(a,b,c), Max(d,e,f));
}
/// Return the maximum of seven values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c, const Type& d,
const Type& e, const Type& f, const Type& g)
{
return std::max(Max(a,b,c,d), Max(e,f,g));
}
/// Return the maximum of eight values
template<typename Type>
inline const Type&
Max(const Type& a, const Type& b, const Type& c, const Type& d,
const Type& e, const Type& f, const Type& g, const Type& h)
{
return std::max(Max(a,b,c,d), Max(e,f,g,h));
}
// ==========> Min <==================
/// Return the minimum of two values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b) { return std::min(a, b); }
/// Return the minimum of three values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c) { return std::min(std::min(a, b), c); }
/// Return the minimum of four values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c, const Type& d)
{
return std::min(std::min(a, b), std::min(c, d));
}
/// Return the minimum of five values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c, const Type& d, const Type& e)
{
return std::min(std::min(a,b), Min(c,d,e));
}
/// Return the minimum of six values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c, const Type& d, const Type& e, const Type& f)
{
return std::min(Min(a,b,c), Min(d,e,f));
}
/// Return the minimum of seven values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c, const Type& d,
const Type& e, const Type& f, const Type& g)
{
return std::min(Min(a,b,c,d), Min(e,f,g));
}
/// Return the minimum of eight values
template<typename Type>
inline const Type&
Min(const Type& a, const Type& b, const Type& c, const Type& d,
const Type& e, const Type& f, const Type& g, const Type& h)
{
return std::min(Min(a,b,c,d), Min(e,f,g,h));
}
// ============> Exp <==================
/// Return @a e<sup>@a x</sup>.
template<typename Type>
inline Type Exp(const Type& x) { return std::exp(x); }
// ============> Sin <==================
//@{
/// Return sin @a x.
inline float Sin(const float& x) { return std::sin(x); }
inline double Sin(const double& x) { return std::sin(x); }
//@}
// ============> Cos <==================
//@{
/// Return cos @a x.
inline float Cos(const float& x) { return std::cos(x); }
inline double Cos(const double& x) { return std::cos(x); }
//@}
////////////////////////////////////////
/// Return the sign of the given value as an integer (either -1, 0 or 1).
template <typename Type>
inline int Sign(const Type &x) { return (zeroVal<Type>() < x) - (x < zeroVal<Type>()); }
/// @brief Return @c true if @a a and @a b have different signs.
/// @note Zero is considered a positive number.
template <typename Type>
inline bool
SignChange(const Type& a, const Type& b)
{
return ( (a<zeroVal<Type>()) ^ (b<zeroVal<Type>()) );
}
/// @brief Return @c true if the interval [@a a, @a b] includes zero,
/// i.e., if either @a a or @a b is zero or if they have different signs.
template <typename Type>
inline bool
ZeroCrossing(const Type& a, const Type& b)
{
return a * b <= zeroVal<Type>();
}
//@{
/// Return the square root of a floating-point value.
inline float Sqrt(float x) { return std::sqrt(x); }
inline double Sqrt(double x) { return std::sqrt(x); }
inline long double Sqrt(long double x) { return std::sqrt(x); }
//@}
//@{
/// Return the cube root of a floating-point value.
inline float Cbrt(float x) { return std::cbrt(x); }
inline double Cbrt(double x) { return std::cbrt(x); }
inline long double Cbrt(long double x) { return std::cbrt(x); }
//@}
//@{
/// Return the remainder of @a x / @a y.
inline int Mod(int x, int y) { return (x % y); }
inline float Mod(float x, float y) { return std::fmod(x, y); }
inline double Mod(double x, double y) { return std::fmod(x, y); }
inline long double Mod(long double x, long double y) { return std::fmod(x, y); }
template<typename Type> inline Type Remainder(Type x, Type y) { return Mod(x, y); }
//@}
//@{
/// Return @a x rounded up to the nearest integer.
inline float RoundUp(float x) { return std::ceil(x); }
inline double RoundUp(double x) { return std::ceil(x); }
inline long double RoundUp(long double x) { return std::ceil(x); }
//@}
/// Return @a x rounded up to the nearest multiple of @a base.
template<typename Type>
inline Type
RoundUp(Type x, Type base)
{
Type remainder = Remainder(x, base);
return remainder ? x-remainder+base : x;
}
//@{
/// Return @a x rounded down to the nearest integer.
inline float RoundDown(float x) { return std::floor(x); }
inline double RoundDown(double x) { return std::floor(x); }
inline long double RoundDown(long double x) { return std::floor(x); }
//@}
/// Return @a x rounded down to the nearest multiple of @a base.
template<typename Type>
inline Type
RoundDown(Type x, Type base)
{
Type remainder = Remainder(x, base);
return remainder ? x-remainder : x;
}
//@{
/// Return @a x rounded to the nearest integer.
inline float Round(float x) { return RoundDown(x + 0.5f); }
inline double Round(double x) { return RoundDown(x + 0.5); }
inline long double Round(long double x) { return RoundDown(x + 0.5l); }
//@}
/// Return the euclidean remainder of @a x.
/// Note unlike % operator this will always return a positive result
template<typename Type>
inline Type
EuclideanRemainder(Type x) { return x - RoundDown(x); }
/// Return the integer part of @a x.
template<typename Type>
inline Type
IntegerPart(Type x)
{
return (x > 0 ? RoundDown(x) : RoundUp(x));
}
/// Return the fractional part of @a x.
template<typename Type>
inline Type
FractionalPart(Type x) { return Mod(x,Type(1)); }
//@{
/// Return the floor of @a x.
inline int Floor(float x) { return int(RoundDown(x)); }
inline int Floor(double x) { return int(RoundDown(x)); }
inline int Floor(long double x) { return int(RoundDown(x)); }
//@}
//@{
/// Return the ceiling of @a x.
inline int Ceil(float x) { return int(RoundUp(x)); }
inline int Ceil(double x) { return int(RoundUp(x)); }
inline int Ceil(long double x) { return int(RoundUp(x)); }
//@}
/// Return @a x if it is greater or equal in magnitude than @a delta. Otherwise, return zero.
template<typename Type>
inline Type Chop(Type x, Type delta) { return (Abs(x) < delta ? zeroVal<Type>() : x); }
/// Return @a x truncated to the given number of decimal digits.
template<typename Type>
inline Type
Truncate(Type x, unsigned int digits)
{
Type tenth = Pow(10,digits);
return RoundDown(x*tenth+0.5)/tenth;
}
////////////////////////////////////////
/// @brief 8-bit integer values print to std::ostreams as characters.
/// Cast them so that they print as integers instead.
template<typename T>
inline auto PrintCast(const T& val) -> typename std::enable_if<!std::is_same<T, int8_t>::value
&& !std::is_same<T, uint8_t>::value, const T&>::type { return val; }
inline int32_t PrintCast(int8_t val) { return int32_t(val); }
inline uint32_t PrintCast(uint8_t val) { return uint32_t(val); }
////////////////////////////////////////
/// Return the inverse of @a x.
template<typename Type>
inline Type
Inv(Type x)
{
assert(x);
return Type(1)/x;
}
enum Axis {
X_AXIS = 0,
Y_AXIS = 1,
Z_AXIS = 2
};
// enum values are consistent with their historical mx analogs.
enum RotationOrder {
XYZ_ROTATION = 0,
XZY_ROTATION,
YXZ_ROTATION,
YZX_ROTATION,
ZXY_ROTATION,
ZYX_ROTATION,
XZX_ROTATION,
ZXZ_ROTATION
};
template <typename S, typename T>
struct promote {
using type = typename boost::numeric::conversion_traits<S, T>::supertype;
};
/// @brief Return the index [0,1,2] of the smallest value in a 3D vector.
/// @note This methods assumes operator[] exists and avoids branching.
/// @details If two components of the input vector are equal and smaller than the
/// third component, the largest index of the two is always returned.
/// If all three vector components are equal the largest index, i.e. 2, is
/// returned. In other words the return value corresponds to the largest index
/// of the of the smallest vector components.
template<typename Vec3T>
size_t
MinIndex(const Vec3T& v)
{
static const size_t hashTable[8] = { 2, 1, 9, 1, 2, 9, 0, 0 };//9 is a dummy value
const size_t hashKey =
((v[0] < v[1]) << 2) + ((v[0] < v[2]) << 1) + (v[1] < v[2]);// ?*4+?*2+?*1
return hashTable[hashKey];
}
/// @brief Return the index [0,1,2] of the largest value in a 3D vector.
/// @note This methods assumes operator[] exists and avoids branching.
/// @details If two components of the input vector are equal and larger than the
/// third component, the largest index of the two is always returned.
/// If all three vector components are equal the largest index, i.e. 2, is
/// returned. In other words the return value corresponds to the largest index
/// of the largest vector components.
template<typename Vec3T>
size_t
MaxIndex(const Vec3T& v)
{
static const size_t hashTable[8] = { 2, 1, 9, 1, 2, 9, 0, 0 };//9 is a dummy value
const size_t hashKey =
((v[0] > v[1]) << 2) + ((v[0] > v[2]) << 1) + (v[1] > v[2]);// ?*4+?*2+?*1
return hashTable[hashKey];
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_MATH_HAS_BEEN_INCLUDED
| 28,968 | C | 28.895769 | 101 | 0.663594 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Coord.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_COORD_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_COORD_HAS_BEEN_INCLUDED
#include <functional>// for std::hash
#include <algorithm> // for std::min(), std::max()
#include <array> // for std::array
#include <iostream>
#include <limits>
#include <openvdb/Platform.h>
#include "Math.h"
#include "Vec3.h"
namespace tbb { class split; } // forward declaration
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Signed (x, y, z) 32-bit integer coordinates
class Coord
{
public:
using Int32 = int32_t;
using Index32 = uint32_t;
using Vec3i = Vec3<Int32>;
using Vec3I = Vec3<Index32>;
using ValueType = Int32;
using Limits = std::numeric_limits<ValueType>;
Coord(): mVec{{0, 0, 0}} {}
explicit Coord(Int32 xyz): mVec{{xyz, xyz, xyz}} {}
Coord(Int32 x, Int32 y, Int32 z): mVec{{x, y, z}} {}
explicit Coord(const Vec3i& v): mVec{{v[0], v[1], v[2]}} {}
explicit Coord(const Vec3I& v): mVec{{Int32(v[0]), Int32(v[1]), Int32(v[2])}} {}
explicit Coord(const Int32* v): mVec{{v[0], v[1], v[2]}} {}
/// @brief Return the smallest possible coordinate
static Coord min() { return Coord(Limits::min()); }
/// @brief Return the largest possible coordinate
static Coord max() { return Coord(Limits::max()); }
/// @brief Return @a xyz rounded to the closest integer coordinates
/// (cell centered conversion).
template<typename T> static Coord round(const Vec3<T>& xyz)
{
return Coord(Int32(Round(xyz[0])), Int32(Round(xyz[1])), Int32(Round(xyz[2])));
}
/// @brief Return the largest integer coordinates that are not greater
/// than @a xyz (node centered conversion).
template<typename T> static Coord floor(const Vec3<T>& xyz)
{
return Coord(Int32(Floor(xyz[0])), Int32(Floor(xyz[1])), Int32(Floor(xyz[2])));
}
/// @brief Return the largest integer coordinates that are not greater
/// than @a xyz+1 (node centered conversion).
template<typename T> static Coord ceil(const Vec3<T>& xyz)
{
return Coord(Int32(Ceil(xyz[0])), Int32(Ceil(xyz[1])), Int32(Ceil(xyz[2])));
}
/// @brief Reset all three coordinates with the specified arguments
Coord& reset(Int32 x, Int32 y, Int32 z)
{
mVec[0] = x;
mVec[1] = y;
mVec[2] = z;
return *this;
}
/// @brief Reset all three coordinates with the same specified argument
Coord& reset(Int32 xyz) { return this->reset(xyz, xyz, xyz); }
Coord& setX(Int32 x) { mVec[0] = x; return *this; }
Coord& setY(Int32 y) { mVec[1] = y; return *this; }
Coord& setZ(Int32 z) { mVec[2] = z; return *this; }
Coord& offset(Int32 dx, Int32 dy, Int32 dz)
{
mVec[0] += dx;
mVec[1] += dy;
mVec[2] += dz;
return *this;
}
Coord& offset(Int32 n) { return this->offset(n, n, n); }
Coord offsetBy(Int32 dx, Int32 dy, Int32 dz) const
{
return Coord(mVec[0] + dx, mVec[1] + dy, mVec[2] + dz);
}
Coord offsetBy(Int32 n) const { return offsetBy(n, n, n); }
Coord& operator+=(const Coord& rhs)
{
mVec[0] += rhs[0];
mVec[1] += rhs[1];
mVec[2] += rhs[2];
return *this;
}
Coord& operator-=(const Coord& rhs)
{
mVec[0] -= rhs[0];
mVec[1] -= rhs[1];
mVec[2] -= rhs[2];
return *this;
}
Coord operator+(const Coord& rhs) const
{
return Coord(mVec[0] + rhs[0], mVec[1] + rhs[1], mVec[2] + rhs[2]);
}
Coord operator-(const Coord& rhs) const
{
return Coord(mVec[0] - rhs[0], mVec[1] - rhs[1], mVec[2] - rhs[2]);
}
Coord operator-() const { return Coord(-mVec[0], -mVec[1], -mVec[2]); }
Coord operator>> (size_t n) const { return Coord(mVec[0]>>n, mVec[1]>>n, mVec[2]>>n); }
Coord operator<< (size_t n) const { return Coord(mVec[0]<<n, mVec[1]<<n, mVec[2]<<n); }
Coord& operator<<=(size_t n) { mVec[0]<<=n; mVec[1]<<=n; mVec[2]<<=n; return *this; }
Coord& operator>>=(size_t n) { mVec[0]>>=n; mVec[1]>>=n; mVec[2]>>=n; return *this; }
Coord operator& (Int32 n) const { return Coord(mVec[0] & n, mVec[1] & n, mVec[2] & n); }
Coord operator| (Int32 n) const { return Coord(mVec[0] | n, mVec[1] | n, mVec[2] | n); }
Coord& operator&= (Int32 n) { mVec[0]&=n; mVec[1]&=n; mVec[2]&=n; return *this; }
Coord& operator|= (Int32 n) { mVec[0]|=n; mVec[1]|=n; mVec[2]|=n; return *this; }
Int32 x() const { return mVec[0]; }
Int32 y() const { return mVec[1]; }
Int32 z() const { return mVec[2]; }
Int32 operator[](size_t i) const { assert(i < 3); return mVec[i]; }
Int32& x() { return mVec[0]; }
Int32& y() { return mVec[1]; }
Int32& z() { return mVec[2]; }
Int32& operator[](size_t i) { assert(i < 3); return mVec[i]; }
const Int32* data() const { return mVec.data(); }
Int32* data() { return mVec.data(); }
const Int32* asPointer() const { return mVec.data(); }
Int32* asPointer() { return mVec.data(); }
Vec3d asVec3d() const { return Vec3d(double(mVec[0]), double(mVec[1]), double(mVec[2])); }
Vec3s asVec3s() const { return Vec3s(float(mVec[0]), float(mVec[1]), float(mVec[2])); }
Vec3i asVec3i() const { return Vec3i(mVec.data()); }
Vec3I asVec3I() const { return Vec3I(Index32(mVec[0]), Index32(mVec[1]), Index32(mVec[2])); }
void asXYZ(Int32& x, Int32& y, Int32& z) const { x = mVec[0]; y = mVec[1]; z = mVec[2]; }
bool operator==(const Coord& rhs) const
{
return (mVec[0] == rhs.mVec[0] && mVec[1] == rhs.mVec[1] && mVec[2] == rhs.mVec[2]);
}
bool operator!=(const Coord& rhs) const { return !(*this == rhs); }
/// Lexicographic less than
bool operator<(const Coord& rhs) const
{
return this->x() < rhs.x() ? true : this->x() > rhs.x() ? false
: this->y() < rhs.y() ? true : this->y() > rhs.y() ? false
: this->z() < rhs.z() ? true : false;
}
/// Lexicographic less than or equal to
bool operator<=(const Coord& rhs) const
{
return this->x() < rhs.x() ? true : this->x() > rhs.x() ? false
: this->y() < rhs.y() ? true : this->y() > rhs.y() ? false
: this->z() <=rhs.z() ? true : false;
}
/// Lexicographic greater than
bool operator>(const Coord& rhs) const { return !(*this <= rhs); }
/// Lexicographic greater than or equal to
bool operator>=(const Coord& rhs) const { return !(*this < rhs); }
/// Perform a component-wise minimum with the other Coord.
void minComponent(const Coord& other)
{
mVec[0] = std::min(mVec[0], other.mVec[0]);
mVec[1] = std::min(mVec[1], other.mVec[1]);
mVec[2] = std::min(mVec[2], other.mVec[2]);
}
/// Perform a component-wise maximum with the other Coord.
void maxComponent(const Coord& other)
{
mVec[0] = std::max(mVec[0], other.mVec[0]);
mVec[1] = std::max(mVec[1], other.mVec[1]);
mVec[2] = std::max(mVec[2], other.mVec[2]);
}
/// Return the component-wise minimum of the two Coords.
static inline Coord minComponent(const Coord& lhs, const Coord& rhs)
{
return Coord(std::min(lhs.x(), rhs.x()),
std::min(lhs.y(), rhs.y()),
std::min(lhs.z(), rhs.z()));
}
/// Return the component-wise maximum of the two Coords.
static inline Coord maxComponent(const Coord& lhs, const Coord& rhs)
{
return Coord(std::max(lhs.x(), rhs.x()),
std::max(lhs.y(), rhs.y()),
std::max(lhs.z(), rhs.z()));
}
/// Return true if any of the components of @a a are smaller than the
/// corresponding components of @a b.
static inline bool lessThan(const Coord& a, const Coord& b)
{
return (a[0] < b[0] || a[1] < b[1] || a[2] < b[2]);
}
/// @brief Return the index (0, 1 or 2) with the smallest value.
size_t minIndex() const { return MinIndex(mVec); }
/// @brief Return the index (0, 1 or 2) with the largest value.
size_t maxIndex() const { return MaxIndex(mVec); }
void read(std::istream& is) { is.read(reinterpret_cast<char*>(mVec.data()), sizeof(mVec)); }
void write(std::ostream& os) const
{
os.write(reinterpret_cast<const char*>(mVec.data()), sizeof(mVec));
}
/// @brief Return a hash value for this coordinate
/// @note Log2N is the binary logarithm of the hash table size.
/// @details The hash function is taken from the SIGGRAPH paper:
/// "VDB: High-resolution sparse volumes with dynamic topology"
template<int Log2N = 20>
size_t hash() const
{
return ((1<<Log2N)-1) & (mVec[0]*73856093 ^ mVec[1]*19349663 ^ mVec[2]*83492791);
}
private:
std::array<Int32, 3> mVec;
}; // class Coord
////////////////////////////////////////
/// @brief Axis-aligned bounding box of signed integer coordinates
/// @note The range of the integer coordinates, [min, max], is inclusive.
/// Thus, a bounding box with min = max is not empty but rather encloses
/// a single coordinate.
class CoordBBox
{
public:
using Index64 = uint64_t;
using ValueType = Coord::ValueType;
/// @brief Iterator over the Coord domain covered by a CoordBBox
/// @note If ZYXOrder is @c true, @e z is the fastest-moving coordinate,
/// otherwise the traversal is in XYZ order (i.e., @e x is fastest-moving).
template<bool ZYXOrder>
class Iterator
{
public:
/// @brief C-tor from a bounding box
Iterator(const CoordBBox& b): mPos(b.min()), mMin(b.min()), mMax(b.max()) {}
/// @brief Increment the iterator to point to the next coordinate.
/// @details Iteration stops one past the maximum coordinate
/// along the axis determined by the template parameter.
Iterator& operator++() { ZYXOrder ? next<2,1,0>() : next<0,1,2>(); return *this; }
/// @brief Return @c true if the iterator still points to a valid coordinate.
operator bool() const { return ZYXOrder ? (mPos[0] <= mMax[0]) : (mPos[2] <= mMax[2]); }
/// @brief Return a const reference to the coordinate currently pointed to.
const Coord& operator*() const { return mPos; }
/// Return @c true if this iterator and the given iterator point to the same coordinate.
bool operator==(const Iterator& other) const
{
return ((mPos == other.mPos) && (mMin == other.mMin) && (mMax == other.mMax));
}
/// Return @c true if this iterator and the given iterator point to different coordinates.
bool operator!=(const Iterator& other) const { return !(*this == other); }
private:
template<size_t a, size_t b, size_t c>
void next()
{
if (mPos[a] < mMax[a]) { ++mPos[a]; } // this is the most common case
else if (mPos[b] < mMax[b]) { mPos[a] = mMin[a]; ++mPos[b]; }
else if (mPos[c] <= mMax[c]) { mPos[a] = mMin[a]; mPos[b] = mMin[b]; ++mPos[c]; }
}
Coord mPos, mMin, mMax;
friend class CoordBBox; // for CoordBBox::end()
};// CoordBBox::Iterator
using ZYXIterator = Iterator</*ZYX=*/true>;
using XYZIterator = Iterator</*ZYX=*/false>;
/// @brief The default constructor produces an empty bounding box.
CoordBBox(): mMin(Coord::max()), mMax(Coord::min()) {}
/// @brief Construct a bounding box with the given @a min and @a max bounds.
CoordBBox(const Coord& min, const Coord& max): mMin(min), mMax(max) {}
/// @brief Construct from individual components of the min and max bounds.
CoordBBox(ValueType xMin, ValueType yMin, ValueType zMin,
ValueType xMax, ValueType yMax, ValueType zMax)
: mMin(xMin, yMin, zMin), mMax(xMax, yMax, zMax)
{
}
/// @brief Splitting constructor for use in TBB ranges
/// @note The other bounding box is assumed to be divisible.
CoordBBox(CoordBBox& other, const tbb::split&): mMin(other.mMin), mMax(other.mMax)
{
assert(this->is_divisible());
const size_t n = this->maxExtent();
mMax[n] = (mMin[n] + mMax[n]) >> 1;
other.mMin[n] = mMax[n] + 1;
}
static CoordBBox createCube(const Coord& min, ValueType dim)
{
return CoordBBox(min, min.offsetBy(dim - 1));
}
/// Return an "infinite" bounding box, as defined by the Coord value range.
static CoordBBox inf() { return CoordBBox(Coord::min(), Coord::max()); }
const Coord& min() const { return mMin; }
const Coord& max() const { return mMax; }
Coord& min() { return mMin; }
Coord& max() { return mMax; }
void reset() { mMin = Coord::max(); mMax = Coord::min(); }
void reset(const Coord& min, const Coord& max) { mMin = min; mMax = max; }
void resetToCube(const Coord& min, ValueType dim) { mMin = min; mMax = min.offsetBy(dim - 1); }
/// @brief Return the minimum coordinate.
/// @note The start coordinate is inclusive.
Coord getStart() const { return mMin; }
/// @brief Return the maximum coordinate plus one.
/// @note This end coordinate is exclusive.
Coord getEnd() const { return mMax.offsetBy(1); }
/// @brief Return a ZYX-order iterator that points to the minimum coordinate.
ZYXIterator begin() const { return ZYXIterator{*this}; }
/// @brief Return a ZYX-order iterator that points to the minimum coordinate.
ZYXIterator beginZYX() const { return ZYXIterator{*this}; }
/// @brief Return an XYZ-order iterator that points to the minimum coordinate.
XYZIterator beginXYZ() const { return XYZIterator{*this}; }
/// @brief Return a ZYX-order iterator that points past the maximum coordinate.
ZYXIterator end() const { ZYXIterator it{*this}; it.mPos[0] = mMax[0] + 1; return it; }
/// @brief Return a ZYX-order iterator that points past the maximum coordinate.
ZYXIterator endZYX() const { return end(); }
/// @brief Return an XYZ-order iterator that points past the maximum coordinate.
XYZIterator endXYZ() const { XYZIterator it{*this}; it.mPos[2] = mMax[2] + 1; return it; }
bool operator==(const CoordBBox& rhs) const { return mMin == rhs.mMin && mMax == rhs.mMax; }
bool operator!=(const CoordBBox& rhs) const { return !(*this == rhs); }
/// @brief Return @c true if this bounding box is empty (i.e., encloses no coordinates).
bool empty() const
{
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-overflow"
#endif
return (mMin[0] > mMax[0] || mMin[1] > mMax[1] || mMin[2] > mMax[2]);
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
#pragma GCC diagnostic pop
#endif
}
/// @brief Return @c true if this bounding box is nonempty
/// (i.e., encloses at least one coordinate).
operator bool() const { return !this->empty(); }
/// @brief Return @c true if this bounding box is nonempty
/// (i.e., encloses at least one coordinate).
bool hasVolume() const { return !this->empty(); }
/// @brief Return the floating-point position of the center of this bounding box.
Vec3d getCenter() const { return 0.5 * Vec3d((mMin + mMax).asPointer()); }
/// @brief Return the dimensions of the coordinates spanned by this bounding box.
/// @note Since coordinates are inclusive, a bounding box with min = max
/// has dimensions of (1, 1, 1).
Coord dim() const { return empty() ? Coord(0) : (mMax.offsetBy(1) - mMin); }
/// @todo deprecate - use dim instead
Coord extents() const { return this->dim(); }
/// @brief Return the integer volume of coordinates spanned by this bounding box.
/// @note Since coordinates are inclusive, a bounding box with min = max has volume one.
Index64 volume() const
{
const Coord d = this->dim();
return Index64(d[0]) * Index64(d[1]) * Index64(d[2]);
}
/// @brief Return @c true if this bounding box can be subdivided [mainly for use by TBB].
bool is_divisible() const { return mMin[0]<mMax[0] && mMin[1]<mMax[1] && mMin[2]<mMax[2]; }
/// @brief Return the index (0, 1 or 2) of the shortest axis.
size_t minExtent() const { return this->dim().minIndex(); }
/// @brief Return the index (0, 1 or 2) of the longest axis.
size_t maxExtent() const { return this->dim().maxIndex(); }
/// @brief Return @c true if point (x, y, z) is inside this bounding box.
bool isInside(const Coord& xyz) const
{
return !(Coord::lessThan(xyz,mMin) || Coord::lessThan(mMax,xyz));
}
/// @brief Return @c true if the given bounding box is inside this bounding box.
bool isInside(const CoordBBox& b) const
{
return !(Coord::lessThan(b.mMin,mMin) || Coord::lessThan(mMax,b.mMax));
}
/// @brief Return @c true if the given bounding box overlaps with this bounding box.
bool hasOverlap(const CoordBBox& b) const
{
return !(Coord::lessThan(mMax,b.mMin) || Coord::lessThan(b.mMax,mMin));
}
/// @brief Pad this bounding box with the specified padding.
void expand(ValueType padding)
{
mMin.offset(-padding);
mMax.offset( padding);
}
/// @brief Return a new instance that is expanded by the specified padding.
CoordBBox expandBy(ValueType padding) const
{
return CoordBBox(mMin.offsetBy(-padding),mMax.offsetBy(padding));
}
/// @brief Expand this bounding box to enclose point (x, y, z).
void expand(const Coord& xyz)
{
mMin.minComponent(xyz);
mMax.maxComponent(xyz);
}
/// @brief Union this bounding box with the given bounding box.
void expand(const CoordBBox& bbox)
{
mMin.minComponent(bbox.min());
mMax.maxComponent(bbox.max());
}
/// @brief Intersect this bounding box with the given bounding box.
void intersect(const CoordBBox& bbox)
{
mMin.maxComponent(bbox.min());
mMax.minComponent(bbox.max());
}
/// @brief Union this bounding box with the cubical bounding box
/// of the given size and with the given minimum coordinates.
void expand(const Coord& min, Coord::ValueType dim)
{
mMin.minComponent(min);
mMax.maxComponent(min.offsetBy(dim-1));
}
/// @brief Translate this bounding box by
/// (<i>t<sub>x</sub></i>, <i>t<sub>y</sub></i>, <i>t<sub>z</sub></i>).
void translate(const Coord& t) { mMin += t; mMax += t; }
/// @brief Move this bounding box to the specified min
void moveMin(const Coord& min) { mMax += min - mMin; mMin = min; }
/// @brief Move this bounding box to the specified max
void moveMax(const Coord& max) { mMin += max - mMax; mMax = max; }
/// @brief Populates an array with the eight corner points of this bounding box.
/// @details The ordering of the corner points is lexicographic.
/// @warning It is assumed that the pointer can be incremented at
/// least seven times, i.e. has storage for eight Coord elements!
void getCornerPoints(Coord *p) const
{
assert(p != nullptr);
p->reset(mMin.x(), mMin.y(), mMin.z()); ++p;
p->reset(mMin.x(), mMin.y(), mMax.z()); ++p;
p->reset(mMin.x(), mMax.y(), mMin.z()); ++p;
p->reset(mMin.x(), mMax.y(), mMax.z()); ++p;
p->reset(mMax.x(), mMin.y(), mMin.z()); ++p;
p->reset(mMax.x(), mMin.y(), mMax.z()); ++p;
p->reset(mMax.x(), mMax.y(), mMin.z()); ++p;
p->reset(mMax.x(), mMax.y(), mMax.z());
}
//@{
/// @brief Bit-wise operations performed on both the min and max members
CoordBBox operator>> (size_t n) const { return CoordBBox(mMin>>n, mMax>>n); }
CoordBBox operator<< (size_t n) const { return CoordBBox(mMin<<n, mMax<<n); }
CoordBBox& operator<<=(size_t n) { mMin <<= n; mMax <<= n; return *this; }
CoordBBox& operator>>=(size_t n) { mMin >>= n; mMax >>= n; return *this; }
CoordBBox operator& (Coord::Int32 n) const { return CoordBBox(mMin & n, mMax & n); }
CoordBBox operator| (Coord::Int32 n) const { return CoordBBox(mMin | n, mMax | n); }
CoordBBox& operator&= (Coord::Int32 n) { mMin &= n; mMax &= n; return *this; }
CoordBBox& operator|= (Coord::Int32 n) { mMin |= n; mMax |= n; return *this; }
//@}
/// @brief Unserialize this bounding box from the given stream.
void read(std::istream& is) { mMin.read(is); mMax.read(is); }
/// @brief Serialize this bounding box to the given stream.
void write(std::ostream& os) const { mMin.write(os); mMax.write(os); }
private:
Coord mMin, mMax;
}; // class CoordBBox
////////////////////////////////////////
inline std::ostream& operator<<(std::ostream& os, const Coord& xyz)
{
os << xyz.asVec3i(); return os;
}
inline Coord
Abs(const Coord& xyz)
{
return Coord(Abs(xyz[0]), Abs(xyz[1]), Abs(xyz[2]));
}
//@{
/// Allow a Coord to be added to or subtracted from a Vec3.
template<typename T>
inline Vec3<typename promote<T, typename Coord::ValueType>::type>
operator+(const Vec3<T>& v0, const Coord& v1)
{
Vec3<typename promote<T, typename Coord::ValueType>::type> result(v0);
result[0] += v1[0];
result[1] += v1[1];
result[2] += v1[2];
return result;
}
template<typename T>
inline Vec3<typename promote<T, typename Coord::ValueType>::type>
operator+(const Coord& v1, const Vec3<T>& v0)
{
Vec3<typename promote<T, typename Coord::ValueType>::type> result(v0);
result[0] += v1[0];
result[1] += v1[1];
result[2] += v1[2];
return result;
}
//@}
//@{
/// Allow a Coord to be subtracted from a Vec3.
template <typename T>
inline Vec3<typename promote<T, Coord::ValueType>::type>
operator-(const Vec3<T>& v0, const Coord& v1)
{
Vec3<typename promote<T, Coord::ValueType>::type> result(v0);
result[0] -= v1[0];
result[1] -= v1[1];
result[2] -= v1[2];
return result;
}
template <typename T>
inline Vec3<typename promote<T, Coord::ValueType>::type>
operator-(const Coord& v1, const Vec3<T>& v0)
{
Vec3<typename promote<T, Coord::ValueType>::type> result(v0);
result[0] -= v1[0];
result[1] -= v1[1];
result[2] -= v1[2];
return -result;
}
//@}
inline std::ostream&
operator<<(std::ostream& os, const CoordBBox& b)
{
os << b.min() << " -> " << b.max();
return os;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
////////////////////////////////////////
// template specialization of std::hash with Coord, which
// allows for Coord to be used as the key in std::unordered_map
namespace std {// injected in namespace std
template<>
struct hash<openvdb::math::Coord>
{
using Coord = openvdb::math::Coord;
using argument_type = Coord;
using result_type = std::size_t;
std::size_t operator()(const Coord& ijk) const noexcept { return ijk.Coord::hash<>(); }
};// std::hash<openvdb::math::Coord>
}// namespace std
#endif // OPENVDB_MATH_COORD_HAS_BEEN_INCLUDED
| 23,091 | C | 37.422629 | 99 | 0.603698 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Operators.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file math/Operators.h
#ifndef OPENVDB_MATH_OPERATORS_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_OPERATORS_HAS_BEEN_INCLUDED
#include "FiniteDifference.h"
#include "Stencils.h"
#include "Maps.h"
#include "Transform.h"
#include <cmath> // for std::sqrt()
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
// Simple tools to help determine when type conversions are needed
template<typename Vec3T> struct is_vec3d { static const bool value = false; };
template<> struct is_vec3d<Vec3d> { static const bool value = true; };
template<typename T> struct is_double { static const bool value = false; };
template<> struct is_double<double> { static const bool value = true; };
/// @brief Adapter to associate a map with a world-space operator,
/// giving it the same call signature as an index-space operator
/// @todo For now, the operator's result type must be specified explicitly,
/// but eventually it should be possible, via traits, to derive the result type
/// from the operator type.
template<typename MapType, typename OpType, typename ResultType>
struct MapAdapter {
MapAdapter(const MapType& m): map(m) {}
template<typename AccessorType>
inline ResultType
result(const AccessorType& grid, const Coord& ijk) { return OpType::result(map, grid, ijk); }
template<typename StencilType>
inline ResultType
result(const StencilType& stencil) { return OpType::result(map, stencil); }
const MapType map;
};
/// Adapter for vector-valued index-space operators to return the vector magnitude
template<typename OpType>
struct ISOpMagnitude {
template<typename AccessorType>
static inline double result(const AccessorType& grid, const Coord& ijk) {
return double(OpType::result(grid, ijk).length());
}
template<typename StencilType>
static inline double result(const StencilType& stencil) {
return double(OpType::result(stencil).length());
}
};
/// Adapter for vector-valued world-space operators to return the vector magnitude
template<typename OpType, typename MapT>
struct OpMagnitude {
template<typename AccessorType>
static inline double result(const MapT& map, const AccessorType& grid, const Coord& ijk) {
return double(OpType::result(map, grid, ijk).length());
}
template<typename StencilType>
static inline double result(const MapT& map, const StencilType& stencil) {
return double(OpType::result(map, stencil).length());
}
};
namespace internal {
// This additional layer is necessary for Visual C++ to compile.
template<typename T>
struct ReturnValue {
using ValueType = typename T::ValueType;
using Vec3Type = math::Vec3<ValueType>;
};
} // namespace internal
// ---- Operators defined in index space
//@{
/// @brief Gradient operators defined in index space of various orders
template<DScheme DiffScheme>
struct ISGradient
{
// random access version
template<typename Accessor> static Vec3<typename Accessor::ValueType>
result(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = Vec3<ValueType>;
return Vec3Type( D1<DiffScheme>::inX(grid, ijk),
D1<DiffScheme>::inY(grid, ijk),
D1<DiffScheme>::inZ(grid, ijk) );
}
// stencil access version
template<typename StencilT> static Vec3<typename StencilT::ValueType>
result(const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = Vec3<ValueType>;
return Vec3Type( D1<DiffScheme>::inX(stencil),
D1<DiffScheme>::inY(stencil),
D1<DiffScheme>::inZ(stencil) );
}
};
//@}
/// struct that relates the BiasedGradientScheme to the
/// forward and backward difference methods used, as well as to
/// the correct stencil type for index space use
template<BiasedGradientScheme bgs>
struct BIAS_SCHEME {
static const DScheme FD = FD_1ST;
static const DScheme BD = BD_1ST;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = SevenPointStencil<GridType, IsSafe>;
};
};
template<> struct BIAS_SCHEME<FIRST_BIAS>
{
static const DScheme FD = FD_1ST;
static const DScheme BD = BD_1ST;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = SevenPointStencil<GridType, IsSafe>;
};
};
template<> struct BIAS_SCHEME<SECOND_BIAS>
{
static const DScheme FD = FD_2ND;
static const DScheme BD = BD_2ND;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = ThirteenPointStencil<GridType, IsSafe>;
};
};
template<> struct BIAS_SCHEME<THIRD_BIAS>
{
static const DScheme FD = FD_3RD;
static const DScheme BD = BD_3RD;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = NineteenPointStencil<GridType, IsSafe>;
};
};
template<> struct BIAS_SCHEME<WENO5_BIAS>
{
static const DScheme FD = FD_WENO5;
static const DScheme BD = BD_WENO5;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = NineteenPointStencil<GridType, IsSafe>;
};
};
template<> struct BIAS_SCHEME<HJWENO5_BIAS>
{
static const DScheme FD = FD_HJWENO5;
static const DScheme BD = BD_HJWENO5;
template<typename GridType, bool IsSafe = true>
struct ISStencil {
using StencilType = NineteenPointStencil<GridType, IsSafe>;
};
};
//@{
/// @brief Biased Gradient Operators, using upwinding defined by the @c Vec3Bias input
template<BiasedGradientScheme GradScheme, typename Vec3Bias>
struct ISGradientBiased
{
static const DScheme FD = BIAS_SCHEME<GradScheme>::FD;
static const DScheme BD = BIAS_SCHEME<GradScheme>::BD;
// random access version
template<typename Accessor>
static Vec3<typename Accessor::ValueType>
result(const Accessor& grid, const Coord& ijk, const Vec3Bias& V)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = Vec3<ValueType>;
return Vec3Type(V[0]<0 ? D1<FD>::inX(grid,ijk) : D1<BD>::inX(grid,ijk),
V[1]<0 ? D1<FD>::inY(grid,ijk) : D1<BD>::inY(grid,ijk),
V[2]<0 ? D1<FD>::inZ(grid,ijk) : D1<BD>::inZ(grid,ijk) );
}
// stencil access version
template<typename StencilT>
static Vec3<typename StencilT::ValueType>
result(const StencilT& stencil, const Vec3Bias& V)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = Vec3<ValueType>;
return Vec3Type(V[0]<0 ? D1<FD>::inX(stencil) : D1<BD>::inX(stencil),
V[1]<0 ? D1<FD>::inY(stencil) : D1<BD>::inY(stencil),
V[2]<0 ? D1<FD>::inZ(stencil) : D1<BD>::inZ(stencil) );
}
};
template<BiasedGradientScheme GradScheme>
struct ISGradientNormSqrd
{
static const DScheme FD = BIAS_SCHEME<GradScheme>::FD;
static const DScheme BD = BIAS_SCHEME<GradScheme>::BD;
// random access version
template<typename Accessor>
static typename Accessor::ValueType
result(const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3Type up = ISGradient<FD>::result(grid, ijk);
Vec3Type down = ISGradient<BD>::result(grid, ijk);
return math::GodunovsNormSqrd(grid.getValue(ijk)>0, down, up);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType
result(const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3Type up = ISGradient<FD>::result(stencil);
Vec3Type down = ISGradient<BD>::result(stencil);
return math::GodunovsNormSqrd(stencil.template getValue<0, 0, 0>()>0, down, up);
}
};
#ifdef DWA_OPENVDB // for SIMD - note will do the computations in float
template<>
struct ISGradientNormSqrd<HJWENO5_BIAS>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk)
{
struct GetValue
{
const Accessor& acc;
GetValue(const Accessor& acc_): acc(acc_) {}
// Return the grid value at ijk converted to simd::Float4::value_type (= float).
inline simd::Float4::value_type operator()(const Coord& ijk_) {
return static_cast<simd::Float4::value_type>(acc.getValue(ijk_));
}
}
valueAt(grid);
// SSE optimized
const simd::Float4
v1(valueAt(ijk.offsetBy(-2, 0, 0)) - valueAt(ijk.offsetBy(-3, 0, 0)),
valueAt(ijk.offsetBy( 0,-2, 0)) - valueAt(ijk.offsetBy( 0,-3, 0)),
valueAt(ijk.offsetBy( 0, 0,-2)) - valueAt(ijk.offsetBy( 0, 0,-3)), 0),
v2(valueAt(ijk.offsetBy(-1, 0, 0)) - valueAt(ijk.offsetBy(-2, 0, 0)),
valueAt(ijk.offsetBy( 0,-1, 0)) - valueAt(ijk.offsetBy( 0,-2, 0)),
valueAt(ijk.offsetBy( 0, 0,-1)) - valueAt(ijk.offsetBy( 0, 0,-2)), 0),
v3(valueAt(ijk ) - valueAt(ijk.offsetBy(-1, 0, 0)),
valueAt(ijk ) - valueAt(ijk.offsetBy( 0,-1, 0)),
valueAt(ijk ) - valueAt(ijk.offsetBy( 0, 0,-1)), 0),
v4(valueAt(ijk.offsetBy( 1, 0, 0)) - valueAt(ijk ),
valueAt(ijk.offsetBy( 0, 1, 0)) - valueAt(ijk ),
valueAt(ijk.offsetBy( 0, 0, 1)) - valueAt(ijk ), 0),
v5(valueAt(ijk.offsetBy( 2, 0, 0)) - valueAt(ijk.offsetBy( 1, 0, 0)),
valueAt(ijk.offsetBy( 0, 2, 0)) - valueAt(ijk.offsetBy( 0, 1, 0)),
valueAt(ijk.offsetBy( 0, 0, 2)) - valueAt(ijk.offsetBy( 0, 0, 1)), 0),
v6(valueAt(ijk.offsetBy( 3, 0, 0)) - valueAt(ijk.offsetBy( 2, 0, 0)),
valueAt(ijk.offsetBy( 0, 3, 0)) - valueAt(ijk.offsetBy( 0, 2, 0)),
valueAt(ijk.offsetBy( 0, 0, 3)) - valueAt(ijk.offsetBy( 0, 0, 2)), 0),
down = math::WENO5(v1, v2, v3, v4, v5),
up = math::WENO5(v6, v5, v4, v3, v2);
return math::GodunovsNormSqrd(grid.getValue(ijk)>0, down, up);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& s)
{
using F4Val = simd::Float4::value_type;
// SSE optimized
const simd::Float4
v1(F4Val(s.template getValue<-2, 0, 0>()) - F4Val(s.template getValue<-3, 0, 0>()),
F4Val(s.template getValue< 0,-2, 0>()) - F4Val(s.template getValue< 0,-3, 0>()),
F4Val(s.template getValue< 0, 0,-2>()) - F4Val(s.template getValue< 0, 0,-3>()), 0),
v2(F4Val(s.template getValue<-1, 0, 0>()) - F4Val(s.template getValue<-2, 0, 0>()),
F4Val(s.template getValue< 0,-1, 0>()) - F4Val(s.template getValue< 0,-2, 0>()),
F4Val(s.template getValue< 0, 0,-1>()) - F4Val(s.template getValue< 0, 0,-2>()), 0),
v3(F4Val(s.template getValue< 0, 0, 0>()) - F4Val(s.template getValue<-1, 0, 0>()),
F4Val(s.template getValue< 0, 0, 0>()) - F4Val(s.template getValue< 0,-1, 0>()),
F4Val(s.template getValue< 0, 0, 0>()) - F4Val(s.template getValue< 0, 0,-1>()), 0),
v4(F4Val(s.template getValue< 1, 0, 0>()) - F4Val(s.template getValue< 0, 0, 0>()),
F4Val(s.template getValue< 0, 1, 0>()) - F4Val(s.template getValue< 0, 0, 0>()),
F4Val(s.template getValue< 0, 0, 1>()) - F4Val(s.template getValue< 0, 0, 0>()), 0),
v5(F4Val(s.template getValue< 2, 0, 0>()) - F4Val(s.template getValue< 1, 0, 0>()),
F4Val(s.template getValue< 0, 2, 0>()) - F4Val(s.template getValue< 0, 1, 0>()),
F4Val(s.template getValue< 0, 0, 2>()) - F4Val(s.template getValue< 0, 0, 1>()), 0),
v6(F4Val(s.template getValue< 3, 0, 0>()) - F4Val(s.template getValue< 2, 0, 0>()),
F4Val(s.template getValue< 0, 3, 0>()) - F4Val(s.template getValue< 0, 2, 0>()),
F4Val(s.template getValue< 0, 0, 3>()) - F4Val(s.template getValue< 0, 0, 2>()), 0),
down = math::WENO5(v1, v2, v3, v4, v5),
up = math::WENO5(v6, v5, v4, v3, v2);
return math::GodunovsNormSqrd(s.template getValue<0, 0, 0>()>0, down, up);
}
};
#endif //DWA_OPENVDB // for SIMD - note will do the computations in float
//@}
//@{
/// @brief Laplacian defined in index space, using various center-difference stencils
template<DDScheme DiffScheme>
struct ISLaplacian
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk);
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& stencil);
};
template<>
struct ISLaplacian<CD_SECOND>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk)
{
return grid.getValue(ijk.offsetBy(1,0,0)) + grid.getValue(ijk.offsetBy(-1, 0, 0)) +
grid.getValue(ijk.offsetBy(0,1,0)) + grid.getValue(ijk.offsetBy(0, -1, 0)) +
grid.getValue(ijk.offsetBy(0,0,1)) + grid.getValue(ijk.offsetBy(0, 0,-1))
- 6*grid.getValue(ijk);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& stencil)
{
return stencil.template getValue< 1, 0, 0>() + stencil.template getValue<-1, 0, 0>() +
stencil.template getValue< 0, 1, 0>() + stencil.template getValue< 0,-1, 0>() +
stencil.template getValue< 0, 0, 1>() + stencil.template getValue< 0, 0,-1>()
- 6*stencil.template getValue< 0, 0, 0>();
}
};
template<>
struct ISLaplacian<CD_FOURTH>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk)
{
using ValueT = typename Accessor::ValueType;
return static_cast<ValueT>(
(-1./12.)*(
grid.getValue(ijk.offsetBy(2,0,0)) + grid.getValue(ijk.offsetBy(-2, 0, 0)) +
grid.getValue(ijk.offsetBy(0,2,0)) + grid.getValue(ijk.offsetBy( 0,-2, 0)) +
grid.getValue(ijk.offsetBy(0,0,2)) + grid.getValue(ijk.offsetBy( 0, 0,-2)) )
+ (4./3.)*(
grid.getValue(ijk.offsetBy(1,0,0)) + grid.getValue(ijk.offsetBy(-1, 0, 0)) +
grid.getValue(ijk.offsetBy(0,1,0)) + grid.getValue(ijk.offsetBy( 0,-1, 0)) +
grid.getValue(ijk.offsetBy(0,0,1)) + grid.getValue(ijk.offsetBy( 0, 0,-1)) )
- 7.5*grid.getValue(ijk));
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& stencil)
{
using ValueT = typename StencilT::ValueType;
return static_cast<ValueT>(
(-1./12.)*(
stencil.template getValue< 2, 0, 0>() + stencil.template getValue<-2, 0, 0>() +
stencil.template getValue< 0, 2, 0>() + stencil.template getValue< 0,-2, 0>() +
stencil.template getValue< 0, 0, 2>() + stencil.template getValue< 0, 0,-2>() )
+ (4./3.)*(
stencil.template getValue< 1, 0, 0>() + stencil.template getValue<-1, 0, 0>() +
stencil.template getValue< 0, 1, 0>() + stencil.template getValue< 0,-1, 0>() +
stencil.template getValue< 0, 0, 1>() + stencil.template getValue< 0, 0,-1>() )
- 7.5*stencil.template getValue< 0, 0, 0>());
}
};
template<>
struct ISLaplacian<CD_SIXTH>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk)
{
using ValueT = typename Accessor::ValueType;
return static_cast<ValueT>(
(1./90.)*(
grid.getValue(ijk.offsetBy(3,0,0)) + grid.getValue(ijk.offsetBy(-3, 0, 0)) +
grid.getValue(ijk.offsetBy(0,3,0)) + grid.getValue(ijk.offsetBy( 0,-3, 0)) +
grid.getValue(ijk.offsetBy(0,0,3)) + grid.getValue(ijk.offsetBy( 0, 0,-3)) )
- (3./20.)*(
grid.getValue(ijk.offsetBy(2,0,0)) + grid.getValue(ijk.offsetBy(-2, 0, 0)) +
grid.getValue(ijk.offsetBy(0,2,0)) + grid.getValue(ijk.offsetBy( 0,-2, 0)) +
grid.getValue(ijk.offsetBy(0,0,2)) + grid.getValue(ijk.offsetBy( 0, 0,-2)) )
+ 1.5 *(
grid.getValue(ijk.offsetBy(1,0,0)) + grid.getValue(ijk.offsetBy(-1, 0, 0)) +
grid.getValue(ijk.offsetBy(0,1,0)) + grid.getValue(ijk.offsetBy( 0,-1, 0)) +
grid.getValue(ijk.offsetBy(0,0,1)) + grid.getValue(ijk.offsetBy( 0, 0,-1)) )
- (3*49/18.)*grid.getValue(ijk));
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& stencil)
{
using ValueT = typename StencilT::ValueType;
return static_cast<ValueT>(
(1./90.)*(
stencil.template getValue< 3, 0, 0>() + stencil.template getValue<-3, 0, 0>() +
stencil.template getValue< 0, 3, 0>() + stencil.template getValue< 0,-3, 0>() +
stencil.template getValue< 0, 0, 3>() + stencil.template getValue< 0, 0,-3>() )
- (3./20.)*(
stencil.template getValue< 2, 0, 0>() + stencil.template getValue<-2, 0, 0>() +
stencil.template getValue< 0, 2, 0>() + stencil.template getValue< 0,-2, 0>() +
stencil.template getValue< 0, 0, 2>() + stencil.template getValue< 0, 0,-2>() )
+ 1.5 *(
stencil.template getValue< 1, 0, 0>() + stencil.template getValue<-1, 0, 0>() +
stencil.template getValue< 0, 1, 0>() + stencil.template getValue< 0,-1, 0>() +
stencil.template getValue< 0, 0, 1>() + stencil.template getValue< 0, 0,-1>() )
- (3*49/18.)*stencil.template getValue< 0, 0, 0>());
}
};
//@}
//@{
/// Divergence operator defined in index space using various first derivative schemes
template<DScheme DiffScheme>
struct ISDivergence
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const Accessor& grid, const Coord& ijk)
{
return D1Vec<DiffScheme>::inX(grid, ijk, 0) +
D1Vec<DiffScheme>::inY(grid, ijk, 1) +
D1Vec<DiffScheme>::inZ(grid, ijk, 2);
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const StencilT& stencil)
{
return D1Vec<DiffScheme>::inX(stencil, 0) +
D1Vec<DiffScheme>::inY(stencil, 1) +
D1Vec<DiffScheme>::inZ(stencil, 2);
}
};
//@}
//@{
/// Curl operator defined in index space using various first derivative schemes
template<DScheme DiffScheme>
struct ISCurl
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
return Vec3Type( D1Vec<DiffScheme>::inY(grid, ijk, 2) - //dw/dy - dv/dz
D1Vec<DiffScheme>::inZ(grid, ijk, 1),
D1Vec<DiffScheme>::inZ(grid, ijk, 0) - //du/dz - dw/dx
D1Vec<DiffScheme>::inX(grid, ijk, 2),
D1Vec<DiffScheme>::inX(grid, ijk, 1) - //dv/dx - du/dy
D1Vec<DiffScheme>::inY(grid, ijk, 0) );
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
return Vec3Type( D1Vec<DiffScheme>::inY(stencil, 2) - //dw/dy - dv/dz
D1Vec<DiffScheme>::inZ(stencil, 1),
D1Vec<DiffScheme>::inZ(stencil, 0) - //du/dz - dw/dx
D1Vec<DiffScheme>::inX(stencil, 2),
D1Vec<DiffScheme>::inX(stencil, 1) - //dv/dx - du/dy
D1Vec<DiffScheme>::inY(stencil, 0) );
}
};
//@}
//@{
/// Compute the mean curvature in index space
template<DDScheme DiffScheme2, DScheme DiffScheme1>
struct ISMeanCurvature
{
/// @brief Random access version
/// @return @c true if the gradient is nonzero, in which case the mean curvature
/// is returned in two parts, @a alpha and @a beta, where @a alpha is the numerator
/// in ∇ · (∇Φ / |∇Φ|) and @a beta is |∇Φ|.
template<typename Accessor>
static bool result(const Accessor& grid, const Coord& ijk,
typename Accessor::ValueType& alpha,
typename Accessor::ValueType& beta)
{
using ValueType = typename Accessor::ValueType;
const ValueType Dx = D1<DiffScheme1>::inX(grid, ijk);
const ValueType Dy = D1<DiffScheme1>::inY(grid, ijk);
const ValueType Dz = D1<DiffScheme1>::inZ(grid, ijk);
const ValueType Dx2 = Dx*Dx;
const ValueType Dy2 = Dy*Dy;
const ValueType Dz2 = Dz*Dz;
const ValueType normGrad = Dx2 + Dy2 + Dz2;
if (normGrad <= math::Tolerance<ValueType>::value()) {
alpha = beta = 0;
return false;
}
const ValueType Dxx = D2<DiffScheme2>::inX(grid, ijk);
const ValueType Dyy = D2<DiffScheme2>::inY(grid, ijk);
const ValueType Dzz = D2<DiffScheme2>::inZ(grid, ijk);
const ValueType Dxy = D2<DiffScheme2>::inXandY(grid, ijk);
const ValueType Dyz = D2<DiffScheme2>::inYandZ(grid, ijk);
const ValueType Dxz = D2<DiffScheme2>::inXandZ(grid, ijk);
// for return
alpha = (Dx2*(Dyy+Dzz)+Dy2*(Dxx+Dzz)+Dz2*(Dxx+Dyy)-2*(Dx*(Dy*Dxy+Dz*Dxz)+Dy*Dz*Dyz));
beta = ValueType(std::sqrt(double(normGrad))); // * 1/dx
return true;
}
/// @brief Stencil access version
/// @return @c true if the gradient is nonzero, in which case the mean curvature
/// is returned in two parts, @a alpha and @a beta, where @a alpha is the numerator
/// in ∇ · (∇Φ / |∇Φ|) and @a beta is |∇Φ|.
template<typename StencilT>
static bool result(const StencilT& stencil,
typename StencilT::ValueType& alpha,
typename StencilT::ValueType& beta)
{
using ValueType = typename StencilT::ValueType;
const ValueType Dx = D1<DiffScheme1>::inX(stencil);
const ValueType Dy = D1<DiffScheme1>::inY(stencil);
const ValueType Dz = D1<DiffScheme1>::inZ(stencil);
const ValueType Dx2 = Dx*Dx;
const ValueType Dy2 = Dy*Dy;
const ValueType Dz2 = Dz*Dz;
const ValueType normGrad = Dx2 + Dy2 + Dz2;
if (normGrad <= math::Tolerance<ValueType>::value()) {
alpha = beta = 0;
return false;
}
const ValueType Dxx = D2<DiffScheme2>::inX(stencil);
const ValueType Dyy = D2<DiffScheme2>::inY(stencil);
const ValueType Dzz = D2<DiffScheme2>::inZ(stencil);
const ValueType Dxy = D2<DiffScheme2>::inXandY(stencil);
const ValueType Dyz = D2<DiffScheme2>::inYandZ(stencil);
const ValueType Dxz = D2<DiffScheme2>::inXandZ(stencil);
// for return
alpha = (Dx2*(Dyy+Dzz)+Dy2*(Dxx+Dzz)+Dz2*(Dxx+Dyy)-2*(Dx*(Dy*Dxy+Dz*Dxz)+Dy*Dz*Dyz));
beta = ValueType(std::sqrt(double(normGrad))); // * 1/dx
return true;
}
};
////////////////////////////////////////////////////////
// --- Operators defined in the Range of a given map
//@{
/// @brief Center difference gradient operators, defined with respect to
/// the range-space of the @c map
/// @note This will need to be divided by two in the case of CD_2NDT
template<typename MapType, DScheme DiffScheme>
struct Gradient
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename internal::ReturnValue<Accessor>::Vec3Type;
Vec3d iGradient( ISGradient<DiffScheme>::result(grid, ijk) );
return Vec3Type(map.applyIJT(iGradient, ijk.asVec3d()));
}
// stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const MapType& map, const StencilT& stencil)
{
using Vec3Type = typename internal::ReturnValue<StencilT>::Vec3Type;
Vec3d iGradient( ISGradient<DiffScheme>::result(stencil) );
return Vec3Type(map.applyIJT(iGradient, stencil.getCenterCoord().asVec3d()));
}
};
// Partial template specialization of Gradient
// translation, any order
template<DScheme DiffScheme>
struct Gradient<TranslationMap, DiffScheme>
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const TranslationMap&, const Accessor& grid, const Coord& ijk)
{
return ISGradient<DiffScheme>::result(grid, ijk);
}
// stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const TranslationMap&, const StencilT& stencil)
{
return ISGradient<DiffScheme>::result(stencil);
}
};
/// Full template specialization of Gradient
/// uniform scale, 2nd order
template<>
struct Gradient<UniformScaleMap, CD_2ND>
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename internal::ReturnValue<Accessor>::ValueType;
using Vec3Type = typename internal::ReturnValue<Accessor>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(grid, ijk) );
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return iGradient * inv2dx;
}
// stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename internal::ReturnValue<StencilT>::ValueType;
using Vec3Type = typename internal::ReturnValue<StencilT>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(stencil) );
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return iGradient * inv2dx;
}
};
/// Full template specialization of Gradient
/// uniform scale translate, 2nd order
template<>
struct Gradient<UniformScaleTranslateMap, CD_2ND>
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename internal::ReturnValue<Accessor>::ValueType;
using Vec3Type = typename internal::ReturnValue<Accessor>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(grid, ijk) );
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return iGradient * inv2dx;
}
// stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename internal::ReturnValue<StencilT>::ValueType;
using Vec3Type = typename internal::ReturnValue<StencilT>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(stencil) );
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return iGradient * inv2dx;
}
};
/// Full template specialization of Gradient
/// scale, 2nd order
template<>
struct Gradient<ScaleMap, CD_2ND>
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const ScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename internal::ReturnValue<Accessor>::ValueType;
using Vec3Type = typename internal::ReturnValue<Accessor>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(grid, ijk) );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto gradient0 = iGradient[0] * map.getInvTwiceScale()[0];
const auto gradient1 = iGradient[1] * map.getInvTwiceScale()[1];
const auto gradient2 = iGradient[2] * map.getInvTwiceScale()[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return Vec3Type(ValueType(gradient0),
ValueType(gradient1),
ValueType(gradient2));
}
// stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const ScaleMap& map, const StencilT& stencil)
{
using ValueType = typename internal::ReturnValue<StencilT>::ValueType;
using Vec3Type = typename internal::ReturnValue<StencilT>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(stencil) );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto gradient0 = iGradient[0] * map.getInvTwiceScale()[0];
const auto gradient1 = iGradient[1] * map.getInvTwiceScale()[1];
const auto gradient2 = iGradient[2] * map.getInvTwiceScale()[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return Vec3Type(ValueType(gradient0),
ValueType(gradient1),
ValueType(gradient2));
}
};
/// Full template specialization of Gradient
/// scale translate, 2nd order
template<>
struct Gradient<ScaleTranslateMap, CD_2ND>
{
// random access version
template<typename Accessor>
static typename internal::ReturnValue<Accessor>::Vec3Type
result(const ScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename internal::ReturnValue<Accessor>::ValueType;
using Vec3Type = typename internal::ReturnValue<Accessor>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(grid, ijk) );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto gradient0 = iGradient[0] * map.getInvTwiceScale()[0];
const auto gradient1 = iGradient[1] * map.getInvTwiceScale()[1];
const auto gradient2 = iGradient[2] * map.getInvTwiceScale()[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return Vec3Type(ValueType(gradient0),
ValueType(gradient1),
ValueType(gradient2));
}
// Stencil access version
template<typename StencilT>
static typename internal::ReturnValue<StencilT>::Vec3Type
result(const ScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename internal::ReturnValue<StencilT>::ValueType;
using Vec3Type = typename internal::ReturnValue<StencilT>::Vec3Type;
Vec3Type iGradient( ISGradient<CD_2NDT>::result(stencil) );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto gradient0 = iGradient[0] * map.getInvTwiceScale()[0];
const auto gradient1 = iGradient[1] * map.getInvTwiceScale()[1];
const auto gradient2 = iGradient[2] * map.getInvTwiceScale()[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return Vec3Type(ValueType(gradient0),
ValueType(gradient1),
ValueType(gradient2));
}
};
//@}
//@{
/// @brief Biased gradient operators, defined with respect to the range-space of the map
/// @note This will need to be divided by two in the case of CD_2NDT
template<typename MapType, BiasedGradientScheme GradScheme>
struct GradientBiased
{
// random access version
template<typename Accessor> static math::Vec3<typename Accessor::ValueType>
result(const MapType& map, const Accessor& grid, const Coord& ijk,
const Vec3<typename Accessor::ValueType>& V)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3d iGradient( ISGradientBiased<GradScheme, Vec3Type>::result(grid, ijk, V) );
return Vec3Type(map.applyIJT(iGradient, ijk.asVec3d()));
}
// stencil access version
template<typename StencilT> static math::Vec3<typename StencilT::ValueType>
result(const MapType& map, const StencilT& stencil,
const Vec3<typename StencilT::ValueType>& V)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3d iGradient( ISGradientBiased<GradScheme, Vec3Type>::result(stencil, V) );
return Vec3Type(map.applyIJT(iGradient, stencil.getCenterCoord().asVec3d()));
}
};
//@}
////////////////////////////////////////////////////////
// Computes |Grad[Phi]| using upwinding
template<typename MapType, BiasedGradientScheme GradScheme>
struct GradientNormSqrd
{
static const DScheme FD = BIAS_SCHEME<GradScheme>::FD;
static const DScheme BD = BIAS_SCHEME<GradScheme>::BD;
// random access version
template<typename Accessor>
static typename Accessor::ValueType
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3Type up = Gradient<MapType, FD>::result(map, grid, ijk);
Vec3Type down = Gradient<MapType, BD>::result(map, grid, ijk);
return math::GodunovsNormSqrd(grid.getValue(ijk)>0, down, up);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType
result(const MapType& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = math::Vec3<ValueType>;
Vec3Type up = Gradient<MapType, FD>::result(map, stencil);
Vec3Type down = Gradient<MapType, BD>::result(map, stencil);
return math::GodunovsNormSqrd(stencil.template getValue<0, 0, 0>()>0, down, up);
}
};
/// Partial template specialization of GradientNormSqrd
template<BiasedGradientScheme GradScheme>
struct GradientNormSqrd<UniformScaleMap, GradScheme>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return invdxdx * ISGradientNormSqrd<GradScheme>::result(grid, ijk);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType
result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return invdxdx * ISGradientNormSqrd<GradScheme>::result(stencil);
}
};
/// Partial template specialization of GradientNormSqrd
template<BiasedGradientScheme GradScheme>
struct GradientNormSqrd<UniformScaleTranslateMap, GradScheme>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return invdxdx * ISGradientNormSqrd<GradScheme>::result(grid, ijk);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return invdxdx * ISGradientNormSqrd<GradScheme>::result(stencil);
}
};
//@{
/// @brief Compute the divergence of a vector-valued grid using differencing
/// of various orders, the result defined with respect to the range-space of the map.
template<typename MapType, DScheme DiffScheme>
struct Divergence
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
for (int i=0; i < 3; i++) {
Vec3d vec( D1Vec<DiffScheme>::inX(grid, ijk, i),
D1Vec<DiffScheme>::inY(grid, ijk, i),
D1Vec<DiffScheme>::inZ(grid, ijk, i) );
div += ValueType(map.applyIJT(vec, ijk.asVec3d())[i]);
}
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const MapType& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
for (int i=0; i < 3; i++) {
Vec3d vec( D1Vec<DiffScheme>::inX(stencil, i),
D1Vec<DiffScheme>::inY(stencil, i),
D1Vec<DiffScheme>::inZ(stencil, i) );
div += ValueType(map.applyIJT(vec, stencil.getCenterCoord().asVec3d())[i]);
}
return div;
}
};
/// Partial template specialization of Divergence
/// translation, any scheme
template<DScheme DiffScheme>
struct Divergence<TranslationMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const TranslationMap&, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(grid, ijk);
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const TranslationMap&, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(stencil);
return div;
}
};
/// Partial template specialization of Divergence
/// uniform scale, any scheme
template<DScheme DiffScheme>
struct Divergence<UniformScaleMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(grid, ijk);
ValueType invdx = ValueType(map.getInvScale()[0]);
return div * invdx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(stencil);
ValueType invdx = ValueType(map.getInvScale()[0]);
return div * invdx;
}
};
/// Partial template specialization of Divergence
/// uniform scale and translation, any scheme
template<DScheme DiffScheme>
struct Divergence<UniformScaleTranslateMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(grid, ijk);
ValueType invdx = ValueType(map.getInvScale()[0]);
return div * invdx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div =ISDivergence<DiffScheme>::result(stencil);
ValueType invdx = ValueType(map.getInvScale()[0]);
return div * invdx;
}
};
/// Full template specialization of Divergence
/// uniform scale 2nd order
template<>
struct Divergence<UniformScaleMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
div =ISDivergence<CD_2NDT>::result(grid, ijk);
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return div * inv2dx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div =ISDivergence<CD_2NDT>::result(stencil);
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return div * inv2dx;
}
};
/// Full template specialization of Divergence
/// uniform scale translate 2nd order
template<>
struct Divergence<UniformScaleTranslateMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div(0);
div =ISDivergence<CD_2NDT>::result(grid, ijk);
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return div * inv2dx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div =ISDivergence<CD_2NDT>::result(stencil);
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return div * inv2dx;
}
};
/// Partial template specialization of Divergence
/// scale, any scheme
template<DScheme DiffScheme>
struct Divergence<ScaleMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const ScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div = ValueType(
D1Vec<DiffScheme>::inX(grid, ijk, 0) * (map.getInvScale()[0]) +
D1Vec<DiffScheme>::inY(grid, ijk, 1) * (map.getInvScale()[1]) +
D1Vec<DiffScheme>::inZ(grid, ijk, 2) * (map.getInvScale()[2]));
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const ScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div = ValueType(
D1Vec<DiffScheme>::inX(stencil, 0) * (map.getInvScale()[0]) +
D1Vec<DiffScheme>::inY(stencil, 1) * (map.getInvScale()[1]) +
D1Vec<DiffScheme>::inZ(stencil, 2) * (map.getInvScale()[2]) );
return div;
}
};
/// Partial template specialization of Divergence
/// scale translate, any scheme
template<DScheme DiffScheme>
struct Divergence<ScaleTranslateMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const ScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div = ValueType(
D1Vec<DiffScheme>::inX(grid, ijk, 0) * (map.getInvScale()[0]) +
D1Vec<DiffScheme>::inY(grid, ijk, 1) * (map.getInvScale()[1]) +
D1Vec<DiffScheme>::inZ(grid, ijk, 2) * (map.getInvScale()[2]));
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const ScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div(0);
div = ValueType(
D1Vec<DiffScheme>::inX(stencil, 0) * (map.getInvScale()[0]) +
D1Vec<DiffScheme>::inY(stencil, 1) * (map.getInvScale()[1]) +
D1Vec<DiffScheme>::inZ(stencil, 2) * (map.getInvScale()[2]) );
return div;
}
};
/// Full template specialization Divergence
/// scale 2nd order
template<>
struct Divergence<ScaleMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const ScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div = ValueType(
D1Vec<CD_2NDT>::inX(grid, ijk, 0) * (map.getInvTwiceScale()[0]) +
D1Vec<CD_2NDT>::inY(grid, ijk, 1) * (map.getInvTwiceScale()[1]) +
D1Vec<CD_2NDT>::inZ(grid, ijk, 2) * (map.getInvTwiceScale()[2]) );
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const ScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div = ValueType(
D1Vec<CD_2NDT>::inX(stencil, 0) * (map.getInvTwiceScale()[0]) +
D1Vec<CD_2NDT>::inY(stencil, 1) * (map.getInvTwiceScale()[1]) +
D1Vec<CD_2NDT>::inZ(stencil, 2) * (map.getInvTwiceScale()[2]) );
return div;
}
};
/// Full template specialization of Divergence
/// scale and translate, 2nd order
template<>
struct Divergence<ScaleTranslateMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType::value_type
result(const ScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType::value_type;
ValueType div = ValueType(
D1Vec<CD_2NDT>::inX(grid, ijk, 0) * (map.getInvTwiceScale()[0]) +
D1Vec<CD_2NDT>::inY(grid, ijk, 1) * (map.getInvTwiceScale()[1]) +
D1Vec<CD_2NDT>::inZ(grid, ijk, 2) * (map.getInvTwiceScale()[2]) );
return div;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType::value_type
result(const ScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType::value_type;
ValueType div = ValueType(
D1Vec<CD_2NDT>::inX(stencil, 0) * (map.getInvTwiceScale()[0]) +
D1Vec<CD_2NDT>::inY(stencil, 1) * (map.getInvTwiceScale()[1]) +
D1Vec<CD_2NDT>::inZ(stencil, 2) * (map.getInvTwiceScale()[2]) );
return div;
}
};
//@}
//@{
/// @brief Compute the curl of a vector-valued grid using differencing
/// of various orders in the space defined by the range of the map.
template<typename MapType, DScheme DiffScheme>
struct Curl
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
Vec3Type mat[3];
for (int i = 0; i < 3; i++) {
Vec3d vec(
D1Vec<DiffScheme>::inX(grid, ijk, i),
D1Vec<DiffScheme>::inY(grid, ijk, i),
D1Vec<DiffScheme>::inZ(grid, ijk, i));
// dF_i/dx_j (x_1 = x, x_2 = y, x_3 = z)
mat[i] = Vec3Type(map.applyIJT(vec, ijk.asVec3d()));
}
return Vec3Type(mat[2][1] - mat[1][2], // dF_3/dx_2 - dF_2/dx_3
mat[0][2] - mat[2][0], // dF_1/dx_3 - dF_3/dx_1
mat[1][0] - mat[0][1]); // dF_2/dx_1 - dF_1/dx_2
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const MapType& map, const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
Vec3Type mat[3];
for (int i = 0; i < 3; i++) {
Vec3d vec(
D1Vec<DiffScheme>::inX(stencil, i),
D1Vec<DiffScheme>::inY(stencil, i),
D1Vec<DiffScheme>::inZ(stencil, i));
// dF_i/dx_j (x_1 = x, x_2 = y, x_3 = z)
mat[i] = Vec3Type(map.applyIJT(vec, stencil.getCenterCoord().asVec3d()));
}
return Vec3Type(mat[2][1] - mat[1][2], // dF_3/dx_2 - dF_2/dx_3
mat[0][2] - mat[2][0], // dF_1/dx_3 - dF_3/dx_1
mat[1][0] - mat[0][1]); // dF_2/dx_1 - dF_1/dx_2
}
};
/// Partial template specialization of Curl
template<DScheme DiffScheme>
struct Curl<UniformScaleMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<DiffScheme>::result(grid, ijk) * ValueType(map.getInvScale()[0]);
}
// Stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleMap& map, const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<DiffScheme>::result(stencil) * ValueType(map.getInvScale()[0]);
}
};
/// Partial template specialization of Curl
template<DScheme DiffScheme>
struct Curl<UniformScaleTranslateMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<DiffScheme>::result(grid, ijk) * ValueType(map.getInvScale()[0]);
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<DiffScheme>::result(stencil) * ValueType(map.getInvScale()[0]);
}
};
/// Full template specialization of Curl
template<>
struct Curl<UniformScaleMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<CD_2NDT>::result(grid, ijk) * ValueType(map.getInvTwiceScale()[0]);
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleMap& map, const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<CD_2NDT>::result(stencil) * ValueType(map.getInvTwiceScale()[0]);
}
};
/// Full template specialization of Curl
template<>
struct Curl<UniformScaleTranslateMap, CD_2ND>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using Vec3Type = typename Accessor::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<CD_2NDT>::result(grid, ijk) * ValueType(map.getInvTwiceScale()[0]);
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using Vec3Type = typename StencilT::ValueType;
using ValueType = typename Vec3Type::value_type;
return ISCurl<CD_2NDT>::result(stencil) * ValueType(map.getInvTwiceScale()[0]);
}
};
//@}
//@{
/// @brief Compute the Laplacian at a given location in a grid using finite differencing
/// of various orders. The result is defined in the range of the map.
template<typename MapType, DDScheme DiffScheme>
struct Laplacian
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const MapType& map,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
// all the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(grid, ijk);
ValueType iddy = D2<DiffScheme>::inY(grid, ijk);
ValueType iddz = D2<DiffScheme>::inZ(grid, ijk);
ValueType iddxy = D2<DiffScheme>::inXandY(grid, ijk);
ValueType iddyz = D2<DiffScheme>::inYandZ(grid, ijk);
ValueType iddxz = D2<DiffScheme>::inXandZ(grid, ijk);
// second derivatives in index space
Mat3d d2_is(iddx, iddxy, iddxz,
iddxy, iddy, iddyz,
iddxz, iddyz, iddz);
Mat3d d2_rs; // to hold the second derivative matrix in range space
if (is_linear<MapType>::value) {
d2_rs = map.applyIJC(d2_is);
} else {
// compute the first derivatives with 2nd order accuracy.
Vec3d d1_is(static_cast<double>(D1<CD_2ND>::inX(grid, ijk)),
static_cast<double>(D1<CD_2ND>::inY(grid, ijk)),
static_cast<double>(D1<CD_2ND>::inZ(grid, ijk)));
d2_rs = map.applyIJC(d2_is, d1_is, ijk.asVec3d());
}
// the trace of the second derivative (range space) matrix is laplacian
return ValueType(d2_rs(0,0) + d2_rs(1,1) + d2_rs(2,2));
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const MapType& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
// all the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(stencil);
ValueType iddy = D2<DiffScheme>::inY(stencil);
ValueType iddz = D2<DiffScheme>::inZ(stencil);
ValueType iddxy = D2<DiffScheme>::inXandY(stencil);
ValueType iddyz = D2<DiffScheme>::inYandZ(stencil);
ValueType iddxz = D2<DiffScheme>::inXandZ(stencil);
// second derivatives in index space
Mat3d d2_is(iddx, iddxy, iddxz,
iddxy, iddy, iddyz,
iddxz, iddyz, iddz);
Mat3d d2_rs; // to hold the second derivative matrix in range space
if (is_linear<MapType>::value) {
d2_rs = map.applyIJC(d2_is);
} else {
// compute the first derivatives with 2nd order accuracy.
Vec3d d1_is(D1<CD_2ND>::inX(stencil),
D1<CD_2ND>::inY(stencil),
D1<CD_2ND>::inZ(stencil) );
d2_rs = map.applyIJC(d2_is, d1_is, stencil.getCenterCoord().asVec3d());
}
// the trace of the second derivative (range space) matrix is laplacian
return ValueType(d2_rs(0,0) + d2_rs(1,1) + d2_rs(2,2));
}
};
template<DDScheme DiffScheme>
struct Laplacian<TranslationMap, DiffScheme>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const TranslationMap&,
const Accessor& grid, const Coord& ijk)
{
return ISLaplacian<DiffScheme>::result(grid, ijk);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const TranslationMap&, const StencilT& stencil)
{
return ISLaplacian<DiffScheme>::result(stencil);
}
};
// The Laplacian is invariant to rotation or reflection.
template<DDScheme DiffScheme>
struct Laplacian<UnitaryMap, DiffScheme>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const UnitaryMap&,
const Accessor& grid, const Coord& ijk)
{
return ISLaplacian<DiffScheme>::result(grid, ijk);
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const UnitaryMap&, const StencilT& stencil)
{
return ISLaplacian<DiffScheme>::result(stencil);
}
};
template<DDScheme DiffScheme>
struct Laplacian<UniformScaleMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ISLaplacian<DiffScheme>::result(grid, ijk) * invdxdx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ISLaplacian<DiffScheme>::result(stencil) * invdxdx;
}
};
template<DDScheme DiffScheme>
struct Laplacian<UniformScaleTranslateMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ISLaplacian<DiffScheme>::result(grid, ijk) * invdxdx;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ISLaplacian<DiffScheme>::result(stencil) * invdxdx;
}
};
template<DDScheme DiffScheme>
struct Laplacian<ScaleMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const ScaleMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
// compute the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(grid, ijk);
ValueType iddy = D2<DiffScheme>::inY(grid, ijk);
ValueType iddz = D2<DiffScheme>::inZ(grid, ijk);
const Vec3d& invScaleSqr = map.getInvScaleSqr();
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
// scale them by the appropriate 1/dx^2, 1/dy^2, 1/dz^2 and sum
const ValueType value = iddx * invScaleSqr[0] + iddy * invScaleSqr[1] + iddz * invScaleSqr[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const ScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
// compute the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(stencil);
ValueType iddy = D2<DiffScheme>::inY(stencil);
ValueType iddz = D2<DiffScheme>::inZ(stencil);
const Vec3d& invScaleSqr = map.getInvScaleSqr();
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
// scale them by the appropriate 1/dx^2, 1/dy^2, 1/dz^2 and sum
const ValueType value = iddx * invScaleSqr[0] + iddy * invScaleSqr[1] + iddz * invScaleSqr[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
};
template<DDScheme DiffScheme>
struct Laplacian<ScaleTranslateMap, DiffScheme>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const ScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
// compute the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(grid, ijk);
ValueType iddy = D2<DiffScheme>::inY(grid, ijk);
ValueType iddz = D2<DiffScheme>::inZ(grid, ijk);
const Vec3d& invScaleSqr = map.getInvScaleSqr();
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
// scale them by the appropriate 1/dx^2, 1/dy^2, 1/dz^2 and sum
const ValueType value = iddx * invScaleSqr[0] + iddy * invScaleSqr[1] + iddz * invScaleSqr[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const ScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
// compute the second derivatives in index space
ValueType iddx = D2<DiffScheme>::inX(stencil);
ValueType iddy = D2<DiffScheme>::inY(stencil);
ValueType iddz = D2<DiffScheme>::inZ(stencil);
const Vec3d& invScaleSqr = map.getInvScaleSqr();
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
// scale them by the appropriate 1/dx^2, 1/dy^2, 1/dz^2 and sum
const ValueType value = iddx * invScaleSqr[0] + iddy * invScaleSqr[1] + iddz * invScaleSqr[2];
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
};
/// @brief Compute the closest-point transform to a level set.
/// @return the closest point to the surface from which the level set was derived,
/// in the domain space of the map (e.g., voxel space).
template<typename MapType, DScheme DiffScheme>
struct CPT
{
// random access version
template<typename Accessor> static math::Vec3<typename Accessor::ValueType>
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = Vec3<ValueType>;
// current distance
ValueType d = grid.getValue(ijk);
// compute gradient in physical space where it is a unit normal
// since the grid holds a distance level set.
Vec3d vectorFromSurface(d*Gradient<MapType,DiffScheme>::result(map, grid, ijk));
if (is_linear<MapType>::value) {
Vec3d result = ijk.asVec3d() - map.applyInverseMap(vectorFromSurface);
return Vec3Type(result);
} else {
Vec3d location = map.applyMap(ijk.asVec3d());
Vec3d result = map.applyInverseMap(location - vectorFromSurface);
return Vec3Type(result);
}
}
// stencil access version
template<typename StencilT> static math::Vec3<typename StencilT::ValueType>
result(const MapType& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = Vec3<ValueType>;
// current distance
ValueType d = stencil.template getValue<0, 0, 0>();
// compute gradient in physical space where it is a unit normal
// since the grid holds a distance level set.
Vec3d vectorFromSurface(d*Gradient<MapType, DiffScheme>::result(map, stencil));
if (is_linear<MapType>::value) {
Vec3d result = stencil.getCenterCoord().asVec3d()
- map.applyInverseMap(vectorFromSurface);
return Vec3Type(result);
} else {
Vec3d location = map.applyMap(stencil.getCenterCoord().asVec3d());
Vec3d result = map.applyInverseMap(location - vectorFromSurface);
return Vec3Type(result);
}
}
};
/// @brief Compute the closest-point transform to a level set.
/// @return the closest point to the surface from which the level set was derived,
/// in the range space of the map (e.g., in world space)
template<typename MapType, DScheme DiffScheme>
struct CPT_RANGE
{
// random access version
template<typename Accessor> static Vec3<typename Accessor::ValueType>
result(const MapType& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
using Vec3Type = Vec3<ValueType>;
// current distance
ValueType d = grid.getValue(ijk);
// compute gradient in physical space where it is a unit normal
// since the grid holds a distance level set.
Vec3Type vectorFromSurface =
d*Gradient<MapType,DiffScheme>::result(map, grid, ijk);
Vec3d result = map.applyMap(ijk.asVec3d()) - vectorFromSurface;
return Vec3Type(result);
}
// stencil access version
template<typename StencilT> static Vec3<typename StencilT::ValueType>
result(const MapType& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
using Vec3Type = Vec3<ValueType>;
// current distance
ValueType d = stencil.template getValue<0, 0, 0>();
// compute gradient in physical space where it is a unit normal
// since the grid holds a distance level set.
Vec3Type vectorFromSurface =
d*Gradient<MapType, DiffScheme>::result(map, stencil);
Vec3d result = map.applyMap(stencil.getCenterCoord().asVec3d()) - vectorFromSurface;
return Vec3Type(result);
}
};
/// @brief Compute the mean curvature.
/// @details The mean curvature is returned in two parts, @a alpha and @a beta,
/// where @a alpha is the numerator in ∇ · (∇Φ / |∇Φ|)
/// and @a beta is |∇Φ|.
template<typename MapType, DDScheme DiffScheme2, DScheme DiffScheme1>
struct MeanCurvature
{
/// @brief Random access version
/// @return @c true if the gradient is nonzero, in which case the mean curvature
/// is returned in two parts, @a alpha and @a beta, where @a alpha is the numerator
/// in ∇ · (∇Φ / |∇Φ|) and @a beta is |∇Φ|.
template<typename Accessor>
static bool compute(const MapType& map, const Accessor& grid, const Coord& ijk,
double& alpha, double& beta)
{
using ValueType = typename Accessor::ValueType;
// compute the gradient in index and world space
Vec3d d1_is(static_cast<double>(D1<DiffScheme1>::inX(grid, ijk)),
static_cast<double>(D1<DiffScheme1>::inY(grid, ijk)),
static_cast<double>(D1<DiffScheme1>::inZ(grid, ijk))), d1_ws;
if (is_linear<MapType>::value) {//resolved at compiletime
d1_ws = map.applyIJT(d1_is);
} else {
d1_ws = map.applyIJT(d1_is, ijk.asVec3d());
}
const double Dx2 = d1_ws(0)*d1_ws(0);
const double Dy2 = d1_ws(1)*d1_ws(1);
const double Dz2 = d1_ws(2)*d1_ws(2);
const double normGrad = Dx2 + Dy2 + Dz2;
if (normGrad <= math::Tolerance<double>::value()) {
alpha = beta = 0;
return false;
}
// all the second derivatives in index space
ValueType iddx = D2<DiffScheme2>::inX(grid, ijk);
ValueType iddy = D2<DiffScheme2>::inY(grid, ijk);
ValueType iddz = D2<DiffScheme2>::inZ(grid, ijk);
ValueType iddxy = D2<DiffScheme2>::inXandY(grid, ijk);
ValueType iddyz = D2<DiffScheme2>::inYandZ(grid, ijk);
ValueType iddxz = D2<DiffScheme2>::inXandZ(grid, ijk);
// second derivatives in index space
Mat3d d2_is(iddx, iddxy, iddxz,
iddxy, iddy, iddyz,
iddxz, iddyz, iddz);
// convert second derivatives to world space
Mat3d d2_ws;
if (is_linear<MapType>::value) {//resolved at compiletime
d2_ws = map.applyIJC(d2_is);
} else {
d2_ws = map.applyIJC(d2_is, d1_is, ijk.asVec3d());
}
// assemble the nominator and denominator for mean curvature
alpha = (Dx2*(d2_ws(1,1)+d2_ws(2,2))+Dy2*(d2_ws(0,0)+d2_ws(2,2))
+Dz2*(d2_ws(0,0)+d2_ws(1,1))
-2*(d1_ws(0)*(d1_ws(1)*d2_ws(0,1)+d1_ws(2)*d2_ws(0,2))
+d1_ws(1)*d1_ws(2)*d2_ws(1,2)));
beta = std::sqrt(normGrad); // * 1/dx
return true;
}
template<typename Accessor>
static typename Accessor::ValueType result(const MapType& map,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
double alpha, beta;
return compute(map, grid, ijk, alpha, beta) ?
ValueType(alpha/(2. *math::Pow3(beta))) : 0;
}
template<typename Accessor>
static typename Accessor::ValueType normGrad(const MapType& map,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
double alpha, beta;
return compute(map, grid, ijk, alpha, beta) ?
ValueType(alpha/(2. *math::Pow2(beta))) : 0;
}
/// @brief Stencil access version
/// @return @c true if the gradient is nonzero, in which case the mean curvature
/// is returned in two parts, @a alpha and @a beta, where @a alpha is the numerator
/// in ∇ · (∇Φ / |∇Φ|) and @a beta is |∇Φ|.
template<typename StencilT>
static bool compute(const MapType& map, const StencilT& stencil,
double& alpha, double& beta)
{
using ValueType = typename StencilT::ValueType;
// compute the gradient in index and world space
Vec3d d1_is(D1<DiffScheme1>::inX(stencil),
D1<DiffScheme1>::inY(stencil),
D1<DiffScheme1>::inZ(stencil) ), d1_ws;
if (is_linear<MapType>::value) {//resolved at compiletime
d1_ws = map.applyIJT(d1_is);
} else {
d1_ws = map.applyIJT(d1_is, stencil.getCenterCoord().asVec3d());
}
const double Dx2 = d1_ws(0)*d1_ws(0);
const double Dy2 = d1_ws(1)*d1_ws(1);
const double Dz2 = d1_ws(2)*d1_ws(2);
const double normGrad = Dx2 + Dy2 + Dz2;
if (normGrad <= math::Tolerance<double>::value()) {
alpha = beta = 0;
return false;
}
// all the second derivatives in index space
ValueType iddx = D2<DiffScheme2>::inX(stencil);
ValueType iddy = D2<DiffScheme2>::inY(stencil);
ValueType iddz = D2<DiffScheme2>::inZ(stencil);
ValueType iddxy = D2<DiffScheme2>::inXandY(stencil);
ValueType iddyz = D2<DiffScheme2>::inYandZ(stencil);
ValueType iddxz = D2<DiffScheme2>::inXandZ(stencil);
// second derivatives in index space
Mat3d d2_is(iddx, iddxy, iddxz,
iddxy, iddy, iddyz,
iddxz, iddyz, iddz);
// convert second derivatives to world space
Mat3d d2_ws;
if (is_linear<MapType>::value) {//resolved at compiletime
d2_ws = map.applyIJC(d2_is);
} else {
d2_ws = map.applyIJC(d2_is, d1_is, stencil.getCenterCoord().asVec3d());
}
// for return
alpha = (Dx2*(d2_ws(1,1)+d2_ws(2,2))+Dy2*(d2_ws(0,0)+d2_ws(2,2))
+Dz2*(d2_ws(0,0)+d2_ws(1,1))
-2*(d1_ws(0)*(d1_ws(1)*d2_ws(0,1)+d1_ws(2)*d2_ws(0,2))
+d1_ws(1)*d1_ws(2)*d2_ws(1,2)));
beta = std::sqrt(normGrad); // * 1/dx
return true;
}
template<typename StencilT>
static typename StencilT::ValueType
result(const MapType& map, const StencilT stencil)
{
using ValueType = typename StencilT::ValueType;
double alpha, beta;
return compute(map, stencil, alpha, beta) ?
ValueType(alpha/(2*math::Pow3(beta))) : 0;
}
template<typename StencilT>
static typename StencilT::ValueType normGrad(const MapType& map, const StencilT stencil)
{
using ValueType = typename StencilT::ValueType;
double alpha, beta;
return compute(map, stencil, alpha, beta) ?
ValueType(alpha/(2*math::Pow2(beta))) : 0;
}
};
template<DDScheme DiffScheme2, DScheme DiffScheme1>
struct MeanCurvature<TranslationMap, DiffScheme2, DiffScheme1>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const TranslationMap&,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
return ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta) ?
ValueType(alpha /(2*math::Pow3(beta))) : 0;
}
template<typename Accessor>
static typename Accessor::ValueType normGrad(const TranslationMap&,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
return ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta) ?
ValueType(alpha/(2*math::Pow2(beta))) : 0;
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const TranslationMap&, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
return ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta) ?
ValueType(alpha /(2*math::Pow3(beta))) : 0;
}
template<typename StencilT>
static typename StencilT::ValueType normGrad(const TranslationMap&, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
return ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta) ?
ValueType(alpha/(2*math::Pow2(beta))) : 0;
}
};
template<DDScheme DiffScheme2, DScheme DiffScheme1>
struct MeanCurvature<UniformScaleMap, DiffScheme2, DiffScheme1>
{
// random access version
template<typename Accessor>
static typename Accessor::ValueType result(const UniformScaleMap& map,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta)) {
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return ValueType(alpha*inv2dx/math::Pow3(beta));
}
return 0;
}
template<typename Accessor>
static typename Accessor::ValueType normGrad(const UniformScaleMap& map,
const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta)) {
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ValueType(alpha*invdxdx/(2*math::Pow2(beta)));
}
return 0;
}
// stencil access version
template<typename StencilT>
static typename StencilT::ValueType result(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta)) {
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return ValueType(alpha*inv2dx/math::Pow3(beta));
}
return 0;
}
template<typename StencilT>
static typename StencilT::ValueType normGrad(const UniformScaleMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta)) {
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ValueType(alpha*invdxdx/(2*math::Pow2(beta)));
}
return 0;
}
};
template<DDScheme DiffScheme2, DScheme DiffScheme1>
struct MeanCurvature<UniformScaleTranslateMap, DiffScheme2, DiffScheme1>
{
// random access version
template<typename Accessor> static typename Accessor::ValueType
result(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta)) {
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return ValueType(alpha*inv2dx/math::Pow3(beta));
}
return 0;
}
template<typename Accessor> static typename Accessor::ValueType
normGrad(const UniformScaleTranslateMap& map, const Accessor& grid, const Coord& ijk)
{
using ValueType = typename Accessor::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(grid, ijk, alpha, beta)) {
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ValueType(alpha*invdxdx/(2*math::Pow2(beta)));
}
return 0;
}
// stencil access version
template<typename StencilT> static typename StencilT::ValueType
result(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta)) {
ValueType inv2dx = ValueType(map.getInvTwiceScale()[0]);
return ValueType(alpha*inv2dx/math::Pow3(beta));
}
return 0;
}
template<typename StencilT> static typename StencilT::ValueType
normGrad(const UniformScaleTranslateMap& map, const StencilT& stencil)
{
using ValueType = typename StencilT::ValueType;
ValueType alpha, beta;
if (ISMeanCurvature<DiffScheme2, DiffScheme1>::result(stencil, alpha, beta)) {
ValueType invdxdx = ValueType(map.getInvScaleSqr()[0]);
return ValueType(alpha*invdxdx/(2*math::Pow2(beta)));
}
return 0;
}
};
/// @brief A wrapper that holds a MapBase::ConstPtr and exposes a reduced set
/// of functionality needed by the mathematical operators
/// @details This may be used in some <tt>Map</tt>-templated code, when the overhead of
/// actually resolving the @c Map type is large compared to the map work to be done.
class GenericMap
{
public:
template<typename GridType>
GenericMap(const GridType& g): mMap(g.transform().baseMap()) {}
GenericMap(const Transform& t): mMap(t.baseMap()) {}
GenericMap(MapBase::Ptr map): mMap(ConstPtrCast<const MapBase>(map)) {}
GenericMap(MapBase::ConstPtr map): mMap(map) {}
~GenericMap() {}
Vec3d applyMap(const Vec3d& in) const { return mMap->applyMap(in); }
Vec3d applyInverseMap(const Vec3d& in) const { return mMap->applyInverseMap(in); }
Vec3d applyIJT(const Vec3d& in) const { return mMap->applyIJT(in); }
Vec3d applyIJT(const Vec3d& in, const Vec3d& pos) const { return mMap->applyIJT(in, pos); }
Mat3d applyIJC(const Mat3d& m) const { return mMap->applyIJC(m); }
Mat3d applyIJC(const Mat3d& m, const Vec3d& v, const Vec3d& pos) const
{ return mMap->applyIJC(m,v,pos); }
double determinant() const { return mMap->determinant(); }
double determinant(const Vec3d& in) const { return mMap->determinant(in); }
Vec3d voxelSize() const { return mMap->voxelSize(); }
Vec3d voxelSize(const Vec3d&v) const { return mMap->voxelSize(v); }
private:
MapBase::ConstPtr mMap;
};
} // end math namespace
} // namespace OPENVDB_VERSION_NAME
} // end openvdb namespace
#endif // OPENVDB_MATH_OPERATORS_HAS_BEEN_INCLUDED
| 81,715 | C | 37.41843 | 102 | 0.636921 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Transform.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Transform.h"
#include "LegacyFrustum.h"
#include <openvdb/version.h>
#include <sstream>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
////////////////////////////////////////
Transform::Transform(const MapBase::Ptr& map):
mMap(ConstPtrCast</*to=*/MapBase, /*from=*/const MapBase>(map))
{
// auto-convert to simplest type
if (!mMap->isType<UniformScaleMap>() && mMap->isLinear()) {
AffineMap::Ptr affine = mMap->getAffineMap();
mMap = simplify(affine);
}
}
Transform::Transform(const Transform& other):
mMap(ConstPtrCast</*to=*/MapBase, /*from=*/const MapBase>(other.baseMap()))
{
}
////////////////////////////////////////
// Factory methods
Transform::Ptr
Transform::createLinearTransform(double voxelDim)
{
return Transform::Ptr(new Transform(
MapBase::Ptr(new UniformScaleMap(voxelDim))));
}
Transform::Ptr
Transform::createLinearTransform(const Mat4R& m)
{
return Transform::Ptr(new Transform(MapBase::Ptr(new AffineMap(m))));
}
Transform::Ptr
Transform::createFrustumTransform(const BBoxd& bbox, double taper,
double depth, double voxelDim)
{
return Transform::Ptr(new Transform(
NonlinearFrustumMap(bbox, taper, depth).preScale(Vec3d(voxelDim, voxelDim, voxelDim))));
}
////////////////////////////////////////
void
Transform::read(std::istream& is)
{
// Read the type name.
Name type = readString(is);
if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_NEW_TRANSFORM) {
// Handle old-style transforms.
if (type == "LinearTransform") {
// First read in the old transform's base class.
Coord tmpMin, tmpMax;
is.read(reinterpret_cast<char*>(&tmpMin), sizeof(Coord::ValueType) * 3);
is.read(reinterpret_cast<char*>(&tmpMax), sizeof(Coord::ValueType) * 3);
// Second read in the old linear transform
Mat4d tmpLocalToWorld, tmpWorldToLocal, tmpVoxelToLocal, tmpLocalToVoxel;
tmpLocalToWorld.read(is);
tmpWorldToLocal.read(is);
tmpVoxelToLocal.read(is);
tmpLocalToVoxel.read(is);
// Convert and simplify
AffineMap::Ptr affineMap(new AffineMap(tmpVoxelToLocal*tmpLocalToWorld));
mMap = simplify(affineMap);
} else if (type == "FrustumTransform") {
internal::LegacyFrustum legacyFrustum(is);
CoordBBox bb = legacyFrustum.getBBox();
BBoxd bbox(bb.min().asVec3d(), bb.max().asVec3d()
/* -Vec3d(1,1,1) */
);
double taper = legacyFrustum.getTaper();
double depth = legacyFrustum.getDepth();
double nearPlaneWidth = legacyFrustum.getNearPlaneWidth();
double nearPlaneDist = legacyFrustum.getNearPlaneDist();
const Mat4d& camxform = legacyFrustum.getCamXForm();
// create the new frustum with these parameters
Mat4d xform(Mat4d::identity());
xform.setToTranslation(Vec3d(0,0, -nearPlaneDist));
xform.preScale(Vec3d(nearPlaneWidth, nearPlaneWidth, -nearPlaneWidth));
// create the linear part of the frustum (the second map)
Mat4d second = xform * camxform;
// we might have precision problems, the constructor for the
// affine map is not forgiving (so we fix here).
const Vec4d col3 = second.col(3);
const Vec4d ref(0, 0, 0, 1);
if (ref.eq(col3) ) {
second.setCol(3, ref);
}
MapBase::Ptr linearMap(simplify(AffineMap(second).getAffineMap()));
// note that the depth is scaled on the nearPlaneSize.
// the linearMap will uniformly scale the frustum to the correct size
// and rotate to align with the camera
mMap = MapBase::Ptr(new NonlinearFrustumMap(
bbox, taper, depth/nearPlaneWidth, linearMap));
} else {
OPENVDB_THROW(IoError, "Transforms of type " + type + " are no longer supported");
}
} else {
// Check if the map has been registered.
if (!MapRegistry::isRegistered(type)) {
OPENVDB_THROW(KeyError, "Map " << type << " is not registered");
}
// Create the map of the type and then read it in.
mMap = math::MapRegistry::createMap(type);
mMap->read(is);
}
}
void
Transform::write(std::ostream& os) const
{
if (!mMap) OPENVDB_THROW(IoError, "Transform does not have a map");
// Write the type-name of the map.
writeString(os, mMap->type());
mMap->write(os);
}
////////////////////////////////////////
bool
Transform::isIdentity() const
{
if (mMap->isLinear()) {
return mMap->getAffineMap()->isIdentity();
} else if ( mMap->isType<NonlinearFrustumMap>() ) {
NonlinearFrustumMap::Ptr frustum =
StaticPtrCast<NonlinearFrustumMap, MapBase>(mMap);
return frustum->isIdentity();
}
// unknown nonlinear map type
return false;
}
////////////////////////////////////////
void
Transform::preRotate(double radians, const Axis axis)
{
mMap = mMap->preRotate(radians, axis);
}
void
Transform::preTranslate(const Vec3d& t)
{
mMap = mMap->preTranslate(t);
}
void
Transform::preScale(const Vec3d& s)
{
mMap = mMap->preScale(s);
}
void
Transform::preScale(double s)
{
const Vec3d vec(s,s,s);
mMap = mMap->preScale(vec);
}
void
Transform::preShear(double shear, Axis axis0, Axis axis1)
{
mMap = mMap->preShear(shear, axis0, axis1);
}
void
Transform::preMult(const Mat4d& m)
{
if (mMap->isLinear()) {
const Mat4d currentMat4 = mMap->getAffineMap()->getMat4();
const Mat4d newMat4 = m * currentMat4;
AffineMap::Ptr affineMap( new AffineMap( newMat4) );
mMap = simplify(affineMap);
} else if (mMap->isType<NonlinearFrustumMap>() ) {
NonlinearFrustumMap::Ptr currentFrustum =
StaticPtrCast<NonlinearFrustumMap, MapBase>(mMap);
const Mat4d currentMat4 = currentFrustum->secondMap().getMat4();
const Mat4d newMat4 = m * currentMat4;
AffineMap affine{newMat4};
NonlinearFrustumMap::Ptr frustum{new NonlinearFrustumMap{
currentFrustum->getBBox(),
currentFrustum->getTaper(),
currentFrustum->getDepth(),
affine.copy()
}};
mMap = StaticPtrCast<MapBase, NonlinearFrustumMap>(frustum);
}
}
void
Transform::preMult(const Mat3d& m)
{
Mat4d mat4 = Mat4d::identity();
mat4.setMat3(m);
preMult(mat4);
}
void
Transform::postRotate(double radians, const Axis axis)
{
mMap = mMap->postRotate(radians, axis);
}
void
Transform::postTranslate(const Vec3d& t)
{
mMap = mMap->postTranslate(t);
}
void
Transform::postScale(const Vec3d& s)
{
mMap = mMap->postScale(s);
}
void
Transform::postScale(double s)
{
const Vec3d vec(s,s,s);
mMap = mMap->postScale(vec);
}
void
Transform::postShear(double shear, Axis axis0, Axis axis1)
{
mMap = mMap->postShear(shear, axis0, axis1);
}
void
Transform::postMult(const Mat4d& m)
{
if (mMap->isLinear()) {
const Mat4d currentMat4 = mMap->getAffineMap()->getMat4();
const Mat4d newMat4 = currentMat4 * m;
AffineMap::Ptr affineMap{new AffineMap{newMat4}};
mMap = simplify(affineMap);
} else if (mMap->isType<NonlinearFrustumMap>()) {
NonlinearFrustumMap::Ptr currentFrustum =
StaticPtrCast<NonlinearFrustumMap, MapBase>(mMap);
const Mat4d currentMat4 = currentFrustum->secondMap().getMat4();
const Mat4d newMat4 = currentMat4 * m;
AffineMap affine{newMat4};
NonlinearFrustumMap::Ptr frustum{new NonlinearFrustumMap{
currentFrustum->getBBox(),
currentFrustum->getTaper(),
currentFrustum->getDepth(),
affine.copy()
}};
mMap = StaticPtrCast<MapBase, NonlinearFrustumMap>(frustum);
}
}
void
Transform::postMult(const Mat3d& m)
{
Mat4d mat4 = Mat4d::identity();
mat4.setMat3(m);
postMult(mat4);
}
////////////////////////////////////////
BBoxd
Transform::indexToWorld(const CoordBBox& indexBBox) const
{
return this->indexToWorld(BBoxd(indexBBox.min().asVec3d(), indexBBox.max().asVec3d()));
}
BBoxd
Transform::indexToWorld(const BBoxd& indexBBox) const
{
const Vec3d &imin = indexBBox.min(), &imax = indexBBox.max();
Vec3d corners[8];
corners[0] = imin;
corners[1] = Vec3d(imax(0), imin(1), imin(2));
corners[2] = Vec3d(imax(0), imax(1), imin(2));
corners[3] = Vec3d(imin(0), imax(1), imin(2));
corners[4] = Vec3d(imin(0), imin(1), imax(2));
corners[5] = Vec3d(imax(0), imin(1), imax(2));
corners[6] = imax;
corners[7] = Vec3d(imin(0), imax(1), imax(2));
BBoxd worldBBox;
Vec3d &wmin = worldBBox.min(), &wmax = worldBBox.max();
wmin = wmax = this->indexToWorld(corners[0]);
for (int i = 1; i < 8; ++i) {
Vec3d image = this->indexToWorld(corners[i]);
wmin = minComponent(wmin, image);
wmax = maxComponent(wmax, image);
}
return worldBBox;
}
BBoxd
Transform::worldToIndex(const BBoxd& worldBBox) const
{
Vec3d indexMin, indexMax;
calculateBounds(*this, worldBBox.min(), worldBBox.max(), indexMin, indexMax);
return BBoxd(indexMin, indexMax);
}
CoordBBox
Transform::worldToIndexCellCentered(const BBoxd& worldBBox) const
{
Vec3d indexMin, indexMax;
calculateBounds(*this, worldBBox.min(), worldBBox.max(), indexMin, indexMax);
return CoordBBox(Coord::round(indexMin), Coord::round(indexMax));
}
CoordBBox
Transform::worldToIndexNodeCentered(const BBoxd& worldBBox) const
{
Vec3d indexMin, indexMax;
calculateBounds(*this, worldBBox.min(), worldBBox.max(), indexMin, indexMax);
return CoordBBox(Coord::floor(indexMin), Coord::floor(indexMax));
}
////////////////////////////////////////
// Utility methods
void
calculateBounds(const Transform& t,
const Vec3d& minWS,
const Vec3d& maxWS,
Vec3d& minIS,
Vec3d& maxIS)
{
/// the pre-image of the 8 corners of the box
Vec3d corners[8];
corners[0] = minWS;
corners[1] = Vec3d(maxWS(0), minWS(1), minWS(2));
corners[2] = Vec3d(maxWS(0), maxWS(1), minWS(2));
corners[3] = Vec3d(minWS(0), maxWS(1), minWS(2));
corners[4] = Vec3d(minWS(0), minWS(1), maxWS(2));
corners[5] = Vec3d(maxWS(0), minWS(1), maxWS(2));
corners[6] = maxWS;
corners[7] = Vec3d(minWS(0), maxWS(1), maxWS(2));
Vec3d pre_image;
minIS = t.worldToIndex(corners[0]);
maxIS = minIS;
for (int i = 1; i < 8; ++i) {
pre_image = t.worldToIndex(corners[i]);
for (int j = 0; j < 3; ++j) {
minIS(j) = std::min(minIS(j), pre_image(j));
maxIS(j) = std::max(maxIS(j), pre_image(j));
}
}
}
////////////////////////////////////////
bool
Transform::operator==(const Transform& other) const
{
if (!this->voxelSize().eq(other.voxelSize())) return false;
if (this->mapType() == other.mapType()) {
return this->baseMap()->isEqual(*other.baseMap());
}
if (this->isLinear() && other.isLinear()) {
// promote both maps to mat4 form and compare
return ( *(this->baseMap()->getAffineMap()) ==
*(other.baseMap()->getAffineMap()) );
}
return this->baseMap()->isEqual(*other.baseMap());
}
////////////////////////////////////////
void
Transform::print(std::ostream& os, const std::string& indent) const
{
struct Local {
// Print a Vec4d more compactly than Vec4d::str() does.
static std::string rowAsString(const Vec4d& row)
{
std::ostringstream ostr;
ostr << "[" << std::setprecision(3) << row[0] << ", "
<< row[1] << ", " << row[2] << ", " << row[3] << "] ";
return ostr.str();
}
};
// Write to a string stream so that I/O manipulators don't affect the output stream.
std::ostringstream ostr;
{
Vec3d dim = this->voxelSize();
if (dim.eq(Vec3d(dim[0]))) {
ostr << indent << std::left << "voxel size: " << std::setprecision(3) << dim[0];
} else {
ostr << indent << std::left << "voxel dimensions: [" << std::setprecision(3)
<< dim[0] << ", " << dim[1] << ", " << dim[2] << "]";
}
ostr << "\n";
}
if (this->isLinear()) {
openvdb::Mat4R v2w = this->baseMap()->getAffineMap()->getMat4();
ostr << indent << std::left << "index to world:\n";
for (int row = 0; row < 4; ++row) {
ostr << indent << " " << std::left << Local::rowAsString(v2w[row]) << "\n";
}
} else if (this->mapType() == NonlinearFrustumMap::mapType()) {
const NonlinearFrustumMap& frustum =
static_cast<const NonlinearFrustumMap&>(*this->baseMap());
const openvdb::Mat4R linear = this->baseMap()->getAffineMap()->getMat4();
std::vector<std::string> linearRow;
size_t w = 0;
for (int row = 0; row < 4; ++row) {
std::string str = Local::rowAsString(linear[row]);
w = std::max(w, str.size());
linearRow.push_back(str);
}
w = std::max<size_t>(w, 30);
const int iw = int(w);
// Print rows of the linear component matrix side-by-side with frustum parameters.
ostr << indent << std::left << std::setw(iw) << "linear:"
<< " frustum:\n";
ostr << indent << " " << std::left << std::setw(iw) << linearRow[0]
<< " taper: " << frustum.getTaper() << "\n";
ostr << indent << " " << std::left << std::setw(iw) << linearRow[1]
<< " depth: " << frustum.getDepth() << "\n";
std::ostringstream ostmp;
ostmp << indent << " " << std::left << std::setw(iw) << linearRow[2]
<< " bounds: " << frustum.getBBox();
if (ostmp.str().size() < 79) {
ostr << ostmp.str() << "\n";
ostr << indent << " " << std::left << std::setw(iw) << linearRow[3] << "\n";
} else {
// If the frustum bounding box doesn't fit on one line, split it into two lines.
ostr << indent << " " << std::left << std::setw(iw) << linearRow[2]
<< " bounds: " << frustum.getBBox().min() << " ->\n";
ostr << indent << " " << std::left << std::setw(iw) << linearRow[3]
<< " " << frustum.getBBox().max() << "\n";
}
} else {
/// @todo Handle other map types.
}
os << ostr.str();
}
////////////////////////////////////////
std::ostream&
operator<<(std::ostream& os, const Transform& t)
{
os << "Transform type: " << t.baseMap()->type() << std::endl;
os << t.baseMap()->str() << std::endl;
return os;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 15,271 | C++ | 26.868613 | 96 | 0.573309 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/ConjGradient.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file ConjGradient.h
/// @authors D.J. Hill, Peter Cucka
/// @brief Preconditioned conjugate gradient solver (solves @e Ax = @e b using
/// the conjugate gradient method with one of a selection of preconditioners)
#ifndef OPENVDB_MATH_CONJGRADIENT_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_CONJGRADIENT_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include <openvdb/Types.h>
#include <openvdb/util/logging.h>
#include <openvdb/util/NullInterrupter.h>
#include "Math.h" // for Abs(), isZero(), Max(), Sqrt()
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <algorithm> // for std::lower_bound()
#include <cassert>
#include <cmath> // for std::isfinite()
#include <limits>
#include <sstream>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
namespace pcg {
using SizeType = Index32;
using SizeRange = tbb::blocked_range<SizeType>;
template<typename ValueType> class Vector;
template<typename ValueType, SizeType STENCIL_SIZE> class SparseStencilMatrix;
template<typename ValueType> class Preconditioner;
template<typename MatrixType> class JacobiPreconditioner;
template<typename MatrixType> class IncompleteCholeskyPreconditioner;
/// Information about the state of a conjugate gradient solution
struct State {
bool success;
int iterations;
double relativeError;
double absoluteError;
};
/// Return default termination conditions for a conjugate gradient solver.
template<typename ValueType>
inline State
terminationDefaults()
{
State s;
s.success = false;
s.iterations = 50;
s.relativeError = 1.0e-6;
s.absoluteError = std::numeric_limits<ValueType>::epsilon() * 100.0;
return s;
}
////////////////////////////////////////
/// @brief Solve @e Ax = @e b via the preconditioned conjugate gradient method.
///
/// @param A a symmetric, positive-definite, @e N x @e N matrix
/// @param b a vector of size @e N
/// @param x a vector of size @e N
/// @param preconditioner a Preconditioner matrix
/// @param termination termination conditions given as a State object with the following fields:
/// <dl>
/// <dt><i>success</i>
/// <dd>ignored
/// <dt><i>iterations</i>
/// <dd>the maximum number of iterations, with or without convergence
/// <dt><i>relativeError</i>
/// <dd>the relative error ||<i>b</i> − <i>Ax</i>|| / ||<i>b</i>||
/// that denotes convergence
/// <dt><i>absoluteError</i>
/// <dd>the absolute error ||<i>b</i> − <i>Ax</i>|| that denotes convergence
///
/// @throw ArithmeticError if either @a x or @a b is not of the appropriate size.
template<typename PositiveDefMatrix>
inline State
solve(
const PositiveDefMatrix& A,
const Vector<typename PositiveDefMatrix::ValueType>& b,
Vector<typename PositiveDefMatrix::ValueType>& x,
Preconditioner<typename PositiveDefMatrix::ValueType>& preconditioner,
const State& termination = terminationDefaults<typename PositiveDefMatrix::ValueType>());
/// @brief Solve @e Ax = @e b via the preconditioned conjugate gradient method.
///
/// @param A a symmetric, positive-definite, @e N x @e N matrix
/// @param b a vector of size @e N
/// @param x a vector of size @e N
/// @param preconditioner a Preconditioner matrix
/// @param termination termination conditions given as a State object with the following fields:
/// <dl>
/// <dt><i>success</i>
/// <dd>ignored
/// <dt><i>iterations</i>
/// <dd>the maximum number of iterations, with or without convergence
/// <dt><i>relativeError</i>
/// <dd>the relative error ||<i>b</i> − <i>Ax</i>|| / ||<i>b</i>||
/// that denotes convergence
/// <dt><i>absoluteError</i>
/// <dd>the absolute error ||<i>b</i> − <i>Ax</i>|| that denotes convergence
/// @param interrupter an object adhering to the util::NullInterrupter interface
/// with which computation can be interrupted
///
/// @throw ArithmeticError if either @a x or @a b is not of the appropriate size.
/// @throw RuntimeError if the computation is interrupted.
template<typename PositiveDefMatrix, typename Interrupter>
inline State
solve(
const PositiveDefMatrix& A,
const Vector<typename PositiveDefMatrix::ValueType>& b,
Vector<typename PositiveDefMatrix::ValueType>& x,
Preconditioner<typename PositiveDefMatrix::ValueType>& preconditioner,
Interrupter& interrupter,
const State& termination = terminationDefaults<typename PositiveDefMatrix::ValueType>());
////////////////////////////////////////
/// Lightweight, variable-length vector
template<typename T>
class Vector
{
public:
using ValueType = T;
using Ptr = SharedPtr<Vector>;
/// Construct an empty vector.
Vector(): mData(nullptr), mSize(0) {}
/// Construct a vector of @a n elements, with uninitialized values.
Vector(SizeType n): mData(new T[n]), mSize(n) {}
/// Construct a vector of @a n elements and initialize each element to the given value.
Vector(SizeType n, const ValueType& val): mData(new T[n]), mSize(n) { this->fill(val); }
~Vector() { mSize = 0; delete[] mData; mData = nullptr; }
/// Deep copy the given vector.
Vector(const Vector&);
/// Deep copy the given vector.
Vector& operator=(const Vector&);
/// Return the number of elements in this vector.
SizeType size() const { return mSize; }
/// Return @c true if this vector has no elements.
bool empty() const { return (mSize == 0); }
/// @brief Reset this vector to have @a n elements, with uninitialized values.
/// @warning All of this vector's existing values will be lost.
void resize(SizeType n);
/// Swap internal storage with another vector, which need not be the same size.
void swap(Vector& other) { std::swap(mData, other.mData); std::swap(mSize, other.mSize); }
/// Set all elements of this vector to @a value.
void fill(const ValueType& value);
//@{
/// @brief Multiply each element of this vector by @a s.
template<typename Scalar> void scale(const Scalar& s);
template<typename Scalar> Vector& operator*=(const Scalar& s) { this->scale(s); return *this; }
//@}
/// Return the dot product of this vector with the given vector, which must be the same size.
ValueType dot(const Vector&) const;
/// Return the infinity norm of this vector.
ValueType infNorm() const;
/// Return the L2 norm of this vector.
ValueType l2Norm() const { return Sqrt(this->dot(*this)); }
/// Return @c true if every element of this vector has a finite value.
bool isFinite() const;
/// @brief Return @c true if this vector is equivalent to the given vector
/// to within the specified tolerance.
template<typename OtherValueType>
bool eq(const Vector<OtherValueType>& other,
ValueType eps = Tolerance<ValueType>::value()) const;
/// Return a string representation of this vector.
std::string str() const;
//@{
/// @brief Return the value of this vector's ith element.
inline T& at(SizeType i) { return mData[i]; }
inline const T& at(SizeType i) const { return mData[i]; }
inline T& operator[](SizeType i) { return this->at(i); }
inline const T& operator[](SizeType i) const { return this->at(i); }
//@}
//@{
/// @brief Return a pointer to this vector's elements.
inline T* data() { return mData; }
inline const T* data() const { return mData; }
inline const T* constData() const { return mData; }
//@}
private:
// Functor for use with tbb::parallel_for()
template<typename Scalar> struct ScaleOp;
struct DeterministicDotProductOp;
// Functors for use with tbb::parallel_reduce()
template<typename OtherValueType> struct EqOp;
struct InfNormOp;
struct IsFiniteOp;
T* mData;
SizeType mSize;
};
using VectorS = Vector<float>;
using VectorD = Vector<double>;
////////////////////////////////////////
/// @brief Sparse, square matrix representing a 3D stencil operator of size @a STENCIL_SIZE
/// @details The implementation is a variation on compressed row storage (CRS).
template<typename ValueType_, SizeType STENCIL_SIZE>
class SparseStencilMatrix
{
public:
using ValueType = ValueType_;
using VectorType = Vector<ValueType>;
using Ptr = SharedPtr<SparseStencilMatrix>;
class ConstValueIter;
class ConstRow;
class RowEditor;
static const ValueType sZeroValue;
/// Construct an @a n x @a n matrix with at most @a STENCIL_SIZE nonzero elements per row.
SparseStencilMatrix(SizeType n);
/// Deep copy the given matrix.
SparseStencilMatrix(const SparseStencilMatrix&);
//@{
/// Return the number of rows in this matrix.
SizeType numRows() const { return mNumRows; }
SizeType size() const { return mNumRows; }
//@}
/// @brief Set the value at the given coordinates.
/// @warning It is not safe to set values in the same row simultaneously
/// from multiple threads.
void setValue(SizeType row, SizeType col, const ValueType&);
//@{
/// @brief Return the value at the given coordinates.
/// @warning It is not safe to get values from a row while another thread
/// is setting values in that row.
const ValueType& getValue(SizeType row, SizeType col) const;
const ValueType& operator()(SizeType row, SizeType col) const;
//@}
/// Return a read-only view onto the given row of this matrix.
ConstRow getConstRow(SizeType row) const;
/// Return a read/write view onto the given row of this matrix.
RowEditor getRowEditor(SizeType row);
//@{
/// @brief Multiply all elements in the matrix by @a s;
template<typename Scalar> void scale(const Scalar& s);
template<typename Scalar>
SparseStencilMatrix& operator*=(const Scalar& s) { this->scale(s); return *this; }
//@}
/// @brief Multiply this matrix by @a inVec and return the result in @a resultVec.
/// @throw ArithmeticError if either @a inVec or @a resultVec is not of size @e N,
/// where @e N x @e N is the size of this matrix.
template<typename VecValueType>
void vectorMultiply(const Vector<VecValueType>& inVec, Vector<VecValueType>& resultVec) const;
/// @brief Multiply this matrix by the vector represented by the array @a inVec
/// and return the result in @a resultVec.
/// @warning Both @a inVec and @a resultVec must have at least @e N elements,
/// where @e N x @e N is the size of this matrix.
template<typename VecValueType>
void vectorMultiply(const VecValueType* inVec, VecValueType* resultVec) const;
/// @brief Return @c true if this matrix is equivalent to the given matrix
/// to within the specified tolerance.
template<typename OtherValueType>
bool eq(const SparseStencilMatrix<OtherValueType, STENCIL_SIZE>& other,
ValueType eps = Tolerance<ValueType>::value()) const;
/// Return @c true if every element of this matrix has a finite value.
bool isFinite() const;
/// Return a string representation of this matrix.
std::string str() const;
private:
struct RowData {
RowData(ValueType* v, SizeType* c, SizeType& s): mVals(v), mCols(c), mSize(s) {}
ValueType* mVals; SizeType* mCols; SizeType& mSize;
};
struct ConstRowData {
ConstRowData(const ValueType* v, const SizeType* c, const SizeType& s):
mVals(v), mCols(c), mSize(s) {}
const ValueType* mVals; const SizeType* mCols; const SizeType& mSize;
};
/// Base class for row accessors
template<typename DataType_ = RowData>
class RowBase
{
public:
using DataType = DataType_;
static SizeType capacity() { return STENCIL_SIZE; }
RowBase(const DataType& data): mData(data) {}
bool empty() const { return (mData.mSize == 0); }
const SizeType& size() const { return mData.mSize; }
const ValueType& getValue(SizeType columnIdx, bool& active) const;
const ValueType& getValue(SizeType columnIdx) const;
/// Return an iterator over the stored values in this row.
ConstValueIter cbegin() const;
/// @brief Return @c true if this row is equivalent to the given row
/// to within the specified tolerance.
template<typename OtherDataType>
bool eq(const RowBase<OtherDataType>& other,
ValueType eps = Tolerance<ValueType>::value()) const;
/// @brief Return the dot product of this row with the first
/// @a vecSize elements of @a inVec.
/// @warning @a inVec must have at least @a vecSize elements.
template<typename VecValueType>
VecValueType dot(const VecValueType* inVec, SizeType vecSize) const;
/// Return the dot product of this row with the given vector.
template<typename VecValueType>
VecValueType dot(const Vector<VecValueType>& inVec) const;
/// Return a string representation of this row.
std::string str() const;
protected:
friend class ConstValueIter;
const ValueType& value(SizeType i) const { return mData.mVals[i]; }
SizeType column(SizeType i) const { return mData.mCols[i]; }
/// @brief Return the array index of the first column index that is
/// equal to <i>or greater than</i> the given column index.
/// @note If @a columnIdx is larger than any existing column index,
/// the return value will point beyond the end of the array.
SizeType find(SizeType columnIdx) const;
DataType mData;
};
using ConstRowBase = RowBase<ConstRowData>;
public:
/// Iterator over the stored values in a row of this matrix
class ConstValueIter
{
public:
const ValueType& operator*() const
{
if (mData.mSize == 0) return SparseStencilMatrix::sZeroValue;
return mData.mVals[mCursor];
}
SizeType column() const { return mData.mCols[mCursor]; }
void increment() { mCursor++; }
ConstValueIter& operator++() { increment(); return *this; }
operator bool() const { return (mCursor < mData.mSize); }
void reset() { mCursor = 0; }
private:
friend class SparseStencilMatrix;
ConstValueIter(const RowData& d): mData(d.mVals, d.mCols, d.mSize), mCursor(0) {}
ConstValueIter(const ConstRowData& d): mData(d), mCursor(0) {}
const ConstRowData mData;
SizeType mCursor;
};
/// Read-only accessor to a row of this matrix
class ConstRow: public ConstRowBase
{
public:
ConstRow(const ValueType* valueHead, const SizeType* columnHead, const SizeType& rowSize);
}; // class ConstRow
/// Read/write accessor to a row of this matrix
class RowEditor: public RowBase<>
{
public:
RowEditor(ValueType* valueHead, SizeType* columnHead, SizeType& rowSize, SizeType colSize);
/// Set the number of entries in this row to zero.
void clear();
/// @brief Set the value of the entry in the specified column.
/// @return the current number of entries stored in this row.
SizeType setValue(SizeType column, const ValueType& value);
//@{
/// @brief Scale all of the entries in this row.
template<typename Scalar> void scale(const Scalar&);
template<typename Scalar>
RowEditor& operator*=(const Scalar& s) { this->scale(s); return *this; }
//@}
private:
const SizeType mNumColumns; // used only for bounds checking
}; // class RowEditor
private:
// Functors for use with tbb::parallel_for()
struct MatrixCopyOp;
template<typename VecValueType> struct VecMultOp;
template<typename Scalar> struct RowScaleOp;
// Functors for use with tbb::parallel_reduce()
struct IsFiniteOp;
template<typename OtherValueType> struct EqOp;
const SizeType mNumRows;
std::unique_ptr<ValueType[]> mValueArray;
std::unique_ptr<SizeType[]> mColumnIdxArray;
std::unique_ptr<SizeType[]> mRowSizeArray;
}; // class SparseStencilMatrix
////////////////////////////////////////
/// Base class for conjugate gradient preconditioners
template<typename T>
class Preconditioner
{
public:
using ValueType = T;
using Ptr = SharedPtr<Preconditioner>;
template<SizeType STENCIL_SIZE> Preconditioner(const SparseStencilMatrix<T, STENCIL_SIZE>&) {}
virtual ~Preconditioner() = default;
virtual bool isValid() const { return true; }
/// @brief Apply this preconditioner to a residue vector:
/// @e z = <i>M</i><sup><small>−1</small></sup><i>r</i>
/// @param r residue vector
/// @param[out] z preconditioned residue vector
virtual void apply(const Vector<T>& r, Vector<T>& z) = 0;
};
////////////////////////////////////////
namespace internal {
// Functor for use with tbb::parallel_for() to copy data from one array to another
template<typename T>
struct CopyOp
{
CopyOp(const T* from_, T* to_): from(from_), to(to_) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) to[n] = from[n];
}
const T* from;
T* to;
};
// Functor for use with tbb::parallel_for() to fill an array with a constant value
template<typename T>
struct FillOp
{
FillOp(T* data_, const T& val_): data(data_), val(val_) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) data[n] = val;
}
T* data;
const T val;
};
// Functor for use with tbb::parallel_for() that computes a * x + y
template<typename T>
struct LinearOp
{
LinearOp(const T& a_, const T* x_, const T* y_, T* out_): a(a_), x(x_), y(y_), out(out_) {}
void operator()(const SizeRange& range) const {
if (isExactlyEqual(a, T(1))) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) out[n] = x[n] + y[n];
} else if (isExactlyEqual(a, T(-1))) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) out[n] = -x[n] + y[n];
} else {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) out[n] = a * x[n] + y[n];
}
}
const T a, *x, *y;
T* out;
};
} // namespace internal
////////////////////////////////////////
inline std::ostream&
operator<<(std::ostream& os, const State& state)
{
os << (state.success ? "succeeded with " : "")
<< "rel. err. " << state.relativeError << ", abs. err. " << state.absoluteError
<< " after " << state.iterations << " iteration" << (state.iterations == 1 ? "" : "s");
return os;
}
////////////////////////////////////////
template<typename T>
inline
Vector<T>::Vector(const Vector& other): mData(new T[other.mSize]), mSize(other.mSize)
{
tbb::parallel_for(SizeRange(0, mSize),
internal::CopyOp<T>(/*from=*/other.mData, /*to=*/mData));
}
template<typename T>
inline
Vector<T>& Vector<T>::operator=(const Vector<T>& other)
{
// Update the internal storage to the correct size
if (mSize != other.mSize) {
mSize = other.mSize;
delete[] mData;
mData = new T[mSize];
}
// Deep copy the data
tbb::parallel_for(SizeRange(0, mSize),
internal::CopyOp<T>(/*from=*/other.mData, /*to=*/mData));
return *this;
}
template<typename T>
inline void
Vector<T>::resize(SizeType n)
{
if (n != mSize) {
if (mData) delete[] mData;
mData = new T[n];
mSize = n;
}
}
template<typename T>
inline void
Vector<T>::fill(const ValueType& value)
{
tbb::parallel_for(SizeRange(0, mSize), internal::FillOp<T>(mData, value));
}
template<typename T>
template<typename Scalar>
struct Vector<T>::ScaleOp
{
ScaleOp(T* data_, const Scalar& s_): data(data_), s(s_) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) data[n] *= s;
}
T* data;
const Scalar s;
};
template<typename T>
template<typename Scalar>
inline void
Vector<T>::scale(const Scalar& s)
{
tbb::parallel_for(SizeRange(0, mSize), ScaleOp<Scalar>(mData, s));
}
template<typename T>
struct Vector<T>::DeterministicDotProductOp
{
DeterministicDotProductOp(const T* a_, const T* b_,
const SizeType binCount_, const SizeType arraySize_, T* reducetmp_):
a(a_), b(b_), binCount(binCount_), arraySize(arraySize_), reducetmp(reducetmp_) {}
void operator()(const SizeRange& range) const
{
const SizeType binSize = arraySize / binCount;
// Iterate over bins (array segments)
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
const SizeType begin = n * binSize;
const SizeType end = (n == binCount-1) ? arraySize : begin + binSize;
// Compute the partial sum for this array segment
T sum = zeroVal<T>();
for (SizeType i = begin; i < end; ++i) { sum += a[i] * b[i]; }
// Store the partial sum
reducetmp[n] = sum;
}
}
const T* a;
const T* b;
const SizeType binCount;
const SizeType arraySize;
T* reducetmp;
};
template<typename T>
inline T
Vector<T>::dot(const Vector<T>& other) const
{
assert(this->size() == other.size());
const T* aData = this->data();
const T* bData = other.data();
SizeType arraySize = this->size();
T result = zeroVal<T>();
if (arraySize < 1024) {
// Compute the dot product in serial for small arrays
for (SizeType n = 0; n < arraySize; ++n) {
result += aData[n] * bData[n];
}
} else {
// Compute the dot product by segmenting the arrays into
// a predetermined number of sub arrays in parallel and
// accumulate the finial result in series.
const SizeType binCount = 100;
T partialSums[100];
tbb::parallel_for(SizeRange(0, binCount),
DeterministicDotProductOp(aData, bData, binCount, arraySize, partialSums));
for (SizeType n = 0; n < binCount; ++n) {
result += partialSums[n];
}
}
return result;
}
template<typename T>
struct Vector<T>::InfNormOp
{
InfNormOp(const T* data_): data(data_) {}
T operator()(const SizeRange& range, T maxValue) const
{
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
maxValue = Max(maxValue, Abs(data[n]));
}
return maxValue;
}
const T* data;
};
template<typename T>
inline T
Vector<T>::infNorm() const
{
// Parallelize over the elements of this vector.
T result = tbb::parallel_reduce(SizeRange(0, this->size()), /*seed=*/zeroVal<T>(),
InfNormOp(this->data()), /*join=*/[](T max1, T max2) { return Max(max1, max2); });
return result;
}
template<typename T>
struct Vector<T>::IsFiniteOp
{
IsFiniteOp(const T* data_): data(data_) {}
bool operator()(const SizeRange& range, bool finite) const
{
if (finite) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
if (!std::isfinite(data[n])) return false;
}
}
return finite;
}
const T* data;
};
template<typename T>
inline bool
Vector<T>::isFinite() const
{
// Parallelize over the elements of this vector.
bool finite = tbb::parallel_reduce(SizeRange(0, this->size()), /*seed=*/true,
IsFiniteOp(this->data()),
/*join=*/[](bool finite1, bool finite2) { return (finite1 && finite2); });
return finite;
}
template<typename T>
template<typename OtherValueType>
struct Vector<T>::EqOp
{
EqOp(const T* a_, const OtherValueType* b_, T e): a(a_), b(b_), eps(e) {}
bool operator()(const SizeRange& range, bool equal) const
{
if (equal) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
if (!isApproxEqual(a[n], b[n], eps)) return false;
}
}
return equal;
}
const T* a;
const OtherValueType* b;
const T eps;
};
template<typename T>
template<typename OtherValueType>
inline bool
Vector<T>::eq(const Vector<OtherValueType>& other, ValueType eps) const
{
if (this->size() != other.size()) return false;
bool equal = tbb::parallel_reduce(SizeRange(0, this->size()), /*seed=*/true,
EqOp<OtherValueType>(this->data(), other.data(), eps),
/*join=*/[](bool eq1, bool eq2) { return (eq1 && eq2); });
return equal;
}
template<typename T>
inline std::string
Vector<T>::str() const
{
std::ostringstream ostr;
ostr << "[";
std::string sep;
for (SizeType n = 0, N = this->size(); n < N; ++n) {
ostr << sep << (*this)[n];
sep = ", ";
}
ostr << "]";
return ostr.str();
}
////////////////////////////////////////
template<typename ValueType, SizeType STENCIL_SIZE>
const ValueType SparseStencilMatrix<ValueType, STENCIL_SIZE>::sZeroValue = zeroVal<ValueType>();
template<typename ValueType, SizeType STENCIL_SIZE>
inline
SparseStencilMatrix<ValueType, STENCIL_SIZE>::SparseStencilMatrix(SizeType numRows)
: mNumRows(numRows)
, mValueArray(new ValueType[mNumRows * STENCIL_SIZE])
, mColumnIdxArray(new SizeType[mNumRows * STENCIL_SIZE])
, mRowSizeArray(new SizeType[mNumRows])
{
// Initialize the matrix to a null state by setting the size of each row to zero.
tbb::parallel_for(SizeRange(0, mNumRows),
internal::FillOp<SizeType>(mRowSizeArray.get(), /*value=*/0));
}
template<typename ValueType, SizeType STENCIL_SIZE>
struct SparseStencilMatrix<ValueType, STENCIL_SIZE>::MatrixCopyOp
{
MatrixCopyOp(const SparseStencilMatrix& from_, SparseStencilMatrix& to_):
from(&from_), to(&to_) {}
void operator()(const SizeRange& range) const
{
const ValueType* fromVal = from->mValueArray.get();
const SizeType* fromCol = from->mColumnIdxArray.get();
ValueType* toVal = to->mValueArray.get();
SizeType* toCol = to->mColumnIdxArray.get();
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
toVal[n] = fromVal[n];
toCol[n] = fromCol[n];
}
}
const SparseStencilMatrix* from; SparseStencilMatrix* to;
};
template<typename ValueType, SizeType STENCIL_SIZE>
inline
SparseStencilMatrix<ValueType, STENCIL_SIZE>::SparseStencilMatrix(const SparseStencilMatrix& other)
: mNumRows(other.mNumRows)
, mValueArray(new ValueType[mNumRows * STENCIL_SIZE])
, mColumnIdxArray(new SizeType[mNumRows * STENCIL_SIZE])
, mRowSizeArray(new SizeType[mNumRows])
{
SizeType size = mNumRows * STENCIL_SIZE;
// Copy the value and column index arrays from the other matrix to this matrix.
tbb::parallel_for(SizeRange(0, size), MatrixCopyOp(/*from=*/other, /*to=*/*this));
// Copy the row size array from the other matrix to this matrix.
tbb::parallel_for(SizeRange(0, mNumRows),
internal::CopyOp<SizeType>(/*from=*/other.mRowSizeArray.get(), /*to=*/mRowSizeArray.get()));
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::setValue(SizeType row, SizeType col,
const ValueType& val)
{
assert(row < mNumRows);
this->getRowEditor(row).setValue(col, val);
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline const ValueType&
SparseStencilMatrix<ValueType, STENCIL_SIZE>::getValue(SizeType row, SizeType col) const
{
assert(row < mNumRows);
return this->getConstRow(row).getValue(col);
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline const ValueType&
SparseStencilMatrix<ValueType, STENCIL_SIZE>::operator()(SizeType row, SizeType col) const
{
return this->getValue(row,col);
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename Scalar>
struct SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowScaleOp
{
RowScaleOp(SparseStencilMatrix& m, const Scalar& s_): mat(&m), s(s_) {}
void operator()(const SizeRange& range) const
{
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
RowEditor row = mat->getRowEditor(n);
row.scale(s);
}
}
SparseStencilMatrix* mat;
const Scalar s;
};
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename Scalar>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::scale(const Scalar& s)
{
// Parallelize over the rows in the matrix.
tbb::parallel_for(SizeRange(0, mNumRows), RowScaleOp<Scalar>(*this, s));
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename VecValueType>
struct SparseStencilMatrix<ValueType, STENCIL_SIZE>::VecMultOp
{
VecMultOp(const SparseStencilMatrix& m, const VecValueType* i, VecValueType* o):
mat(&m), in(i), out(o) {}
void operator()(const SizeRange& range) const
{
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
ConstRow row = mat->getConstRow(n);
out[n] = row.dot(in, mat->numRows());
}
}
const SparseStencilMatrix* mat;
const VecValueType* in;
VecValueType* out;
};
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename VecValueType>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::vectorMultiply(
const Vector<VecValueType>& inVec, Vector<VecValueType>& resultVec) const
{
if (inVec.size() != mNumRows) {
OPENVDB_THROW(ArithmeticError, "matrix and input vector have incompatible sizes ("
<< mNumRows << "x" << mNumRows << " vs. " << inVec.size() << ")");
}
if (resultVec.size() != mNumRows) {
OPENVDB_THROW(ArithmeticError, "matrix and result vector have incompatible sizes ("
<< mNumRows << "x" << mNumRows << " vs. " << resultVec.size() << ")");
}
vectorMultiply(inVec.data(), resultVec.data());
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename VecValueType>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::vectorMultiply(
const VecValueType* inVec, VecValueType* resultVec) const
{
// Parallelize over the rows in the matrix.
tbb::parallel_for(SizeRange(0, mNumRows),
VecMultOp<VecValueType>(*this, inVec, resultVec));
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename OtherValueType>
struct SparseStencilMatrix<ValueType, STENCIL_SIZE>::EqOp
{
EqOp(const SparseStencilMatrix& a_,
const SparseStencilMatrix<OtherValueType, STENCIL_SIZE>& b_, ValueType e):
a(&a_), b(&b_), eps(e) {}
bool operator()(const SizeRange& range, bool equal) const
{
if (equal) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
if (!a->getConstRow(n).eq(b->getConstRow(n), eps)) return false;
}
}
return equal;
}
const SparseStencilMatrix* a;
const SparseStencilMatrix<OtherValueType, STENCIL_SIZE>* b;
const ValueType eps;
};
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename OtherValueType>
inline bool
SparseStencilMatrix<ValueType, STENCIL_SIZE>::eq(
const SparseStencilMatrix<OtherValueType, STENCIL_SIZE>& other, ValueType eps) const
{
if (this->numRows() != other.numRows()) return false;
bool equal = tbb::parallel_reduce(SizeRange(0, this->numRows()), /*seed=*/true,
EqOp<OtherValueType>(*this, other, eps),
/*join=*/[](bool eq1, bool eq2) { return (eq1 && eq2); });
return equal;
}
template<typename ValueType, SizeType STENCIL_SIZE>
struct SparseStencilMatrix<ValueType, STENCIL_SIZE>::IsFiniteOp
{
IsFiniteOp(const SparseStencilMatrix& m): mat(&m) {}
bool operator()(const SizeRange& range, bool finite) const
{
if (finite) {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
const ConstRow row = mat->getConstRow(n);
for (ConstValueIter it = row.cbegin(); it; ++it) {
if (!std::isfinite(*it)) return false;
}
}
}
return finite;
}
const SparseStencilMatrix* mat;
};
template<typename ValueType, SizeType STENCIL_SIZE>
inline bool
SparseStencilMatrix<ValueType, STENCIL_SIZE>::isFinite() const
{
// Parallelize over the rows of this matrix.
bool finite = tbb::parallel_reduce(SizeRange(0, this->numRows()), /*seed=*/true,
IsFiniteOp(*this), /*join=*/[](bool finite1, bool finite2) { return (finite1&&finite2); });
return finite;
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline std::string
SparseStencilMatrix<ValueType, STENCIL_SIZE>::str() const
{
std::ostringstream ostr;
for (SizeType n = 0, N = this->size(); n < N; ++n) {
ostr << n << ": " << this->getConstRow(n).str() << "\n";
}
return ostr.str();
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline typename SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowEditor
SparseStencilMatrix<ValueType, STENCIL_SIZE>::getRowEditor(SizeType i)
{
assert(i < mNumRows);
const SizeType head = i * STENCIL_SIZE;
return RowEditor(&mValueArray[head], &mColumnIdxArray[head], mRowSizeArray[i], mNumRows);
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline typename SparseStencilMatrix<ValueType, STENCIL_SIZE>::ConstRow
SparseStencilMatrix<ValueType, STENCIL_SIZE>::getConstRow(SizeType i) const
{
assert(i < mNumRows);
const SizeType head = i * STENCIL_SIZE; // index for this row into main storage
return ConstRow(&mValueArray[head], &mColumnIdxArray[head], mRowSizeArray[i]);
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
inline SizeType
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::find(SizeType columnIdx) const
{
if (this->empty()) return mData.mSize;
// Get a pointer to the first column index that is equal to or greater than the given index.
// (This assumes that the data is sorted by column.)
const SizeType* colPtr = std::lower_bound(mData.mCols, mData.mCols + mData.mSize, columnIdx);
// Return the offset of the pointer from the beginning of the array.
return static_cast<SizeType>(colPtr - mData.mCols);
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
inline const ValueType&
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::getValue(
SizeType columnIdx, bool& active) const
{
active = false;
SizeType idx = this->find(columnIdx);
if (idx < this->size() && this->column(idx) == columnIdx) {
active = true;
return this->value(idx);
}
return SparseStencilMatrix::sZeroValue;
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
inline const ValueType&
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::getValue(SizeType columnIdx) const
{
SizeType idx = this->find(columnIdx);
if (idx < this->size() && this->column(idx) == columnIdx) {
return this->value(idx);
}
return SparseStencilMatrix::sZeroValue;
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
inline typename SparseStencilMatrix<ValueType, STENCIL_SIZE>::ConstValueIter
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::cbegin() const
{
return ConstValueIter(mData);
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
template<typename OtherDataType>
inline bool
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::eq(
const RowBase<OtherDataType>& other, ValueType eps) const
{
if (this->size() != other.size()) return false;
for (ConstValueIter it = cbegin(), oit = other.cbegin(); it || oit; ++it, ++oit) {
if (it.column() != oit.column()) return false;
if (!isApproxEqual(*it, *oit, eps)) return false;
}
return true;
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
template<typename VecValueType>
inline VecValueType
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::dot(
const VecValueType* inVec, SizeType vecSize) const
{
VecValueType result = zeroVal<VecValueType>();
for (SizeType idx = 0, N = std::min(vecSize, this->size()); idx < N; ++idx) {
result += static_cast<VecValueType>(this->value(idx) * inVec[this->column(idx)]);
}
return result;
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
template<typename VecValueType>
inline VecValueType
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::dot(
const Vector<VecValueType>& inVec) const
{
return dot(inVec.data(), inVec.size());
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename DataType>
inline std::string
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowBase<DataType>::str() const
{
std::ostringstream ostr;
std::string sep;
for (SizeType n = 0, N = this->size(); n < N; ++n) {
ostr << sep << "(" << this->column(n) << ", " << this->value(n) << ")";
sep = ", ";
}
return ostr.str();
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline
SparseStencilMatrix<ValueType, STENCIL_SIZE>::ConstRow::ConstRow(
const ValueType* valueHead, const SizeType* columnHead, const SizeType& rowSize):
ConstRowBase(ConstRowData(const_cast<ValueType*>(valueHead),
const_cast<SizeType*>(columnHead), const_cast<SizeType&>(rowSize)))
{
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowEditor::RowEditor(
ValueType* valueHead, SizeType* columnHead, SizeType& rowSize, SizeType colSize):
RowBase<>(RowData(valueHead, columnHead, rowSize)), mNumColumns(colSize)
{
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowEditor::clear()
{
// Note: since mSize is a reference, this modifies the underlying matrix.
RowBase<>::mData.mSize = 0;
}
template<typename ValueType, SizeType STENCIL_SIZE>
inline SizeType
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowEditor::setValue(
SizeType column, const ValueType& value)
{
assert(column < mNumColumns);
RowData& data = RowBase<>::mData;
// Get the offset of the first column index that is equal to or greater than
// the column to be modified.
SizeType offset = this->find(column);
if (offset < data.mSize && data.mCols[offset] == column) {
// If the column already exists, just update its value.
data.mVals[offset] = value;
return data.mSize;
}
// Check that it is safe to add a new column.
assert(data.mSize < this->capacity());
if (offset >= data.mSize) {
// The new column's index is larger than any existing index. Append the new column.
data.mVals[data.mSize] = value;
data.mCols[data.mSize] = column;
} else {
// Insert the new column at the computed offset after shifting subsequent columns.
for (SizeType i = data.mSize; i > offset; --i) {
data.mVals[i] = data.mVals[i - 1];
data.mCols[i] = data.mCols[i - 1];
}
data.mVals[offset] = value;
data.mCols[offset] = column;
}
++data.mSize;
return data.mSize;
}
template<typename ValueType, SizeType STENCIL_SIZE>
template<typename Scalar>
inline void
SparseStencilMatrix<ValueType, STENCIL_SIZE>::RowEditor::scale(const Scalar& s)
{
for (int idx = 0, N = this->size(); idx < N; ++idx) {
RowBase<>::mData.mVals[idx] *= s;
}
}
////////////////////////////////////////
/// Diagonal preconditioner
template<typename MatrixType>
class JacobiPreconditioner: public Preconditioner<typename MatrixType::ValueType>
{
private:
struct InitOp;
struct ApplyOp;
public:
using ValueType = typename MatrixType::ValueType;
using BaseType = Preconditioner<ValueType>;
using VectorType = Vector<ValueType>;
using Ptr = SharedPtr<JacobiPreconditioner>;
JacobiPreconditioner(const MatrixType& A): BaseType(A), mDiag(A.numRows())
{
// Initialize vector mDiag with the values from the matrix diagonal.
tbb::parallel_for(SizeRange(0, A.numRows()), InitOp(A, mDiag.data()));
}
~JacobiPreconditioner() override = default;
void apply(const Vector<ValueType>& r, Vector<ValueType>& z) override
{
const SizeType size = mDiag.size();
assert(r.size() == z.size());
assert(r.size() == size);
tbb::parallel_for(SizeRange(0, size), ApplyOp(mDiag.data(), r.data(), z.data()));
}
/// Return @c true if all values along the diagonal are finite.
bool isFinite() const { return mDiag.isFinite(); }
private:
// Functor for use with tbb::parallel_for()
struct InitOp
{
InitOp(const MatrixType& m, ValueType* v): mat(&m), vec(v) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
const ValueType val = mat->getValue(n, n);
assert(!isApproxZero(val, ValueType(0.0001)));
vec[n] = static_cast<ValueType>(1.0 / val);
}
}
const MatrixType* mat; ValueType* vec;
};
// Functor for use with tbb::parallel_reduce()
struct ApplyOp
{
ApplyOp(const ValueType* x_, const ValueType* y_, ValueType* out_):
x(x_), y(y_), out(out_) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) out[n] = x[n] * y[n];
}
const ValueType *x, *y; ValueType* out;
};
// The Jacobi preconditioner is a diagonal matrix
VectorType mDiag;
}; // class JacobiPreconditioner
/// Preconditioner using incomplete Cholesky factorization
template<typename MatrixType>
class IncompleteCholeskyPreconditioner: public Preconditioner<typename MatrixType::ValueType>
{
private:
struct CopyToLowerOp;
struct TransposeOp;
public:
using ValueType = typename MatrixType::ValueType;
using BaseType = Preconditioner<ValueType>;
using VectorType = Vector<ValueType>;
using Ptr = SharedPtr<IncompleteCholeskyPreconditioner>;
using TriangularMatrix = SparseStencilMatrix<ValueType, 4>;
using TriangleConstRow = typename TriangularMatrix::ConstRow;
using TriangleRowEditor = typename TriangularMatrix::RowEditor;
IncompleteCholeskyPreconditioner(const MatrixType& matrix)
: BaseType(matrix)
, mLowerTriangular(matrix.numRows())
, mUpperTriangular(matrix.numRows())
, mTempVec(matrix.numRows())
{
// Size of matrix
const SizeType numRows = mLowerTriangular.numRows();
// Copy the upper triangular part to the lower triangular part.
tbb::parallel_for(SizeRange(0, numRows), CopyToLowerOp(matrix, mLowerTriangular));
// Build the Incomplete Cholesky Matrix
//
// Algorithm:
//
// for (k = 0; k < size; ++k) {
// A(k,k) = sqrt(A(k,k));
// for (i = k +1, i < size; ++i) {
// if (A(i,k) == 0) continue;
// A(i,k) = A(i,k) / A(k,k);
// }
// for (j = k+1; j < size; ++j) {
// for (i = j; i < size; ++i) {
// if (A(i,j) == 0) continue;
// A(i,j) -= A(i,k)*A(j,k);
// }
// }
// }
mPassedCompatibilityCondition = true;
for (SizeType k = 0; k < numRows; ++k) {
TriangleConstRow crow_k = mLowerTriangular.getConstRow(k);
ValueType diagonalValue = crow_k.getValue(k);
// Test if the matrix build has failed.
if (diagonalValue < 1.e-5) {
mPassedCompatibilityCondition = false;
break;
}
diagonalValue = Sqrt(diagonalValue);
TriangleRowEditor row_k = mLowerTriangular.getRowEditor(k);
row_k.setValue(k, diagonalValue);
// Exploit the fact that the matrix is symmetric.
typename MatrixType::ConstRow srcRow = matrix.getConstRow(k);
typename MatrixType::ConstValueIter citer = srcRow.cbegin();
for ( ; citer; ++citer) {
SizeType ii = citer.column();
if (ii < k+1) continue; // look above diagonal
TriangleRowEditor row_ii = mLowerTriangular.getRowEditor(ii);
row_ii.setValue(k, *citer / diagonalValue);
}
// for (j = k+1; j < size; ++j) replaced by row iter below
citer.reset(); // k,j entries
for ( ; citer; ++citer) {
SizeType j = citer.column();
if (j < k+1) continue;
TriangleConstRow row_j = mLowerTriangular.getConstRow(j);
ValueType a_jk = row_j.getValue(k); // a_jk is non zero if a_kj is non zero
// Entry (i,j) is non-zero if matrix(j,i) is nonzero
typename MatrixType::ConstRow mask = matrix.getConstRow(j);
typename MatrixType::ConstValueIter maskIter = mask.cbegin();
for ( ; maskIter; ++maskIter) {
SizeType i = maskIter.column();
if (i < j) continue;
TriangleConstRow crow_i = mLowerTriangular.getConstRow(i);
ValueType a_ij = crow_i.getValue(j);
ValueType a_ik = crow_i.getValue(k);
TriangleRowEditor row_i = mLowerTriangular.getRowEditor(i);
a_ij -= a_ik * a_jk;
row_i.setValue(j, a_ij);
}
}
}
// Build the transpose of the IC matrix: mUpperTriangular
tbb::parallel_for(SizeRange(0, numRows),
TransposeOp(matrix, mLowerTriangular, mUpperTriangular));
}
~IncompleteCholeskyPreconditioner() override = default;
bool isValid() const override { return mPassedCompatibilityCondition; }
void apply(const Vector<ValueType>& rVec, Vector<ValueType>& zVec) override
{
if (!mPassedCompatibilityCondition) {
OPENVDB_THROW(ArithmeticError, "invalid Cholesky decomposition");
}
// Solve mUpperTriangular * mLowerTriangular * rVec = zVec;
SizeType size = mLowerTriangular.numRows();
zVec.fill(zeroVal<ValueType>());
ValueType* zData = zVec.data();
if (size == 0) return;
assert(rVec.size() == size);
assert(zVec.size() == size);
// Allocate a temp vector
mTempVec.fill(zeroVal<ValueType>());
ValueType* tmpData = mTempVec.data();
const ValueType* rData = rVec.data();
// Solve mLowerTriangular * tmp = rVec;
for (SizeType i = 0; i < size; ++i) {
typename TriangularMatrix::ConstRow row = mLowerTriangular.getConstRow(i);
ValueType diagonal = row.getValue(i);
ValueType dot = row.dot(mTempVec);
tmpData[i] = (rData[i] - dot) / diagonal;
if (!std::isfinite(tmpData[i])) {
OPENVDB_LOG_DEBUG_RUNTIME("1 diagonal was " << diagonal);
OPENVDB_LOG_DEBUG_RUNTIME("1a diagonal " << row.getValue(i));
}
}
// Solve mUpperTriangular * zVec = tmp;
for (SizeType ii = 0; ii < size; ++ii) {
SizeType i = size - 1 - ii;
typename TriangularMatrix::ConstRow row = mUpperTriangular.getConstRow(i);
ValueType diagonal = row.getValue(i);
ValueType dot = row.dot(zVec);
zData[i] = (tmpData[i] - dot) / diagonal;
if (!std::isfinite(zData[i])) {
OPENVDB_LOG_DEBUG_RUNTIME("2 diagonal was " << diagonal);
}
}
}
const TriangularMatrix& lowerMatrix() const { return mLowerTriangular; }
const TriangularMatrix& upperMatrix() const { return mUpperTriangular; }
private:
// Functor for use with tbb::parallel_for()
struct CopyToLowerOp
{
CopyToLowerOp(const MatrixType& m, TriangularMatrix& l): mat(&m), lower(&l) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
typename TriangularMatrix::RowEditor outRow = lower->getRowEditor(n);
outRow.clear();
typename MatrixType::ConstRow inRow = mat->getConstRow(n);
for (typename MatrixType::ConstValueIter it = inRow.cbegin(); it; ++it) {
if (it.column() > n) continue; // skip above diagonal
outRow.setValue(it.column(), *it);
}
}
}
const MatrixType* mat; TriangularMatrix* lower;
};
// Functor for use with tbb::parallel_for()
struct TransposeOp
{
TransposeOp(const MatrixType& m, const TriangularMatrix& l, TriangularMatrix& u):
mat(&m), lower(&l), upper(&u) {}
void operator()(const SizeRange& range) const {
for (SizeType n = range.begin(), N = range.end(); n < N; ++n) {
typename TriangularMatrix::RowEditor outRow = upper->getRowEditor(n);
outRow.clear();
// Use the fact that matrix is symmetric.
typename MatrixType::ConstRow inRow = mat->getConstRow(n);
for (typename MatrixType::ConstValueIter it = inRow.cbegin(); it; ++it) {
const SizeType column = it.column();
if (column < n) continue; // only set upper triangle
outRow.setValue(column, lower->getValue(column, n));
}
}
}
const MatrixType* mat; const TriangularMatrix* lower; TriangularMatrix* upper;
};
TriangularMatrix mLowerTriangular;
TriangularMatrix mUpperTriangular;
Vector<ValueType> mTempVec;
bool mPassedCompatibilityCondition;
}; // class IncompleteCholeskyPreconditioner
////////////////////////////////////////
namespace internal {
/// Compute @e ax + @e y.
template<typename T>
inline void
axpy(const T& a, const T* xVec, const T* yVec, T* resultVec, SizeType size)
{
tbb::parallel_for(SizeRange(0, size), LinearOp<T>(a, xVec, yVec, resultVec));
}
/// Compute @e ax + @e y.
template<typename T>
inline void
axpy(const T& a, const Vector<T>& xVec, const Vector<T>& yVec, Vector<T>& result)
{
assert(xVec.size() == yVec.size());
assert(xVec.size() == result.size());
axpy(a, xVec.data(), yVec.data(), result.data(), xVec.size());
}
/// Compute @e r = @e b − @e Ax.
template<typename MatrixOperator, typename VecValueType>
inline void
computeResidual(const MatrixOperator& A, const VecValueType* x,
const VecValueType* b, VecValueType* r)
{
// Compute r = A * x.
A.vectorMultiply(x, r);
// Compute r = b - r.
tbb::parallel_for(SizeRange(0, A.numRows()), LinearOp<VecValueType>(-1.0, r, b, r));
}
/// Compute @e r = @e b − @e Ax.
template<typename MatrixOperator, typename T>
inline void
computeResidual(const MatrixOperator& A, const Vector<T>& x, const Vector<T>& b, Vector<T>& r)
{
assert(x.size() == b.size());
assert(x.size() == r.size());
assert(x.size() == A.numRows());
computeResidual(A, x.data(), b.data(), r.data());
}
} // namespace internal
////////////////////////////////////////
template<typename PositiveDefMatrix>
inline State
solve(
const PositiveDefMatrix& Amat,
const Vector<typename PositiveDefMatrix::ValueType>& bVec,
Vector<typename PositiveDefMatrix::ValueType>& xVec,
Preconditioner<typename PositiveDefMatrix::ValueType>& precond,
const State& termination)
{
util::NullInterrupter interrupter;
return solve(Amat, bVec, xVec, precond, interrupter, termination);
}
template<typename PositiveDefMatrix, typename Interrupter>
inline State
solve(
const PositiveDefMatrix& Amat,
const Vector<typename PositiveDefMatrix::ValueType>& bVec,
Vector<typename PositiveDefMatrix::ValueType>& xVec,
Preconditioner<typename PositiveDefMatrix::ValueType>& precond,
Interrupter& interrupter,
const State& termination)
{
using ValueType = typename PositiveDefMatrix::ValueType;
using VectorType = Vector<ValueType>;
State result;
result.success = false;
result.iterations = 0;
result.relativeError = 0.0;
result.absoluteError = 0.0;
const SizeType size = Amat.numRows();
if (size == 0) {
OPENVDB_LOG_WARN("pcg::solve(): matrix has dimension zero");
return result;
}
if (size != bVec.size()) {
OPENVDB_THROW(ArithmeticError, "A and b have incompatible sizes"
<< size << "x" << size << " vs. " << bVec.size() << ")");
}
if (size != xVec.size()) {
OPENVDB_THROW(ArithmeticError, "A and x have incompatible sizes"
<< size << "x" << size << " vs. " << xVec.size() << ")");
}
// Temp vectors
VectorType zVec(size); // transformed residual (M^-1 r)
VectorType pVec(size); // search direction
VectorType qVec(size); // A * p
// Compute norm of B (the source)
const ValueType tmp = bVec.infNorm();
const ValueType infNormOfB = isZero(tmp) ? ValueType(1) : tmp;
// Compute rVec: residual = b - Ax.
VectorType rVec(size); // vector of residuals
internal::computeResidual(Amat, xVec, bVec, rVec);
assert(rVec.isFinite());
// Normalize the residual norm with the source norm and look for early out.
result.absoluteError = static_cast<double>(rVec.infNorm());
result.relativeError = static_cast<double>(result.absoluteError / infNormOfB);
if (result.relativeError <= termination.relativeError) {
result.success = true;
return result;
}
// Iterations of the CG solve
ValueType rDotZPrev(1); // inner product of <z,r>
// Keep track of the minimum error to monitor convergence.
ValueType minL2Error = std::numeric_limits<ValueType>::max();
ValueType l2Error;
int iteration = 0;
for ( ; iteration < termination.iterations; ++iteration) {
if (interrupter.wasInterrupted()) {
OPENVDB_THROW(RuntimeError, "conjugate gradient solver was interrupted");
}
OPENVDB_LOG_DEBUG_RUNTIME("pcg::solve() " << result);
result.iterations = iteration + 1;
// Apply preconditioner to residual
// z_{k} = M^-1 r_{k}
precond.apply(rVec, zVec);
// <r,z>
const ValueType rDotZ = rVec.dot(zVec);
assert(std::isfinite(rDotZ));
if (0 == iteration) {
// Initialize
pVec = zVec;
} else {
const ValueType beta = rDotZ / rDotZPrev;
// p = beta * p + z
internal::axpy(beta, pVec, zVec, /*result */pVec);
}
// q_{k} = A p_{k}
Amat.vectorMultiply(pVec, qVec);
// alpha = <r_{k-1}, z_{k-1}> / <p_{k},q_{k}>
const ValueType pAp = pVec.dot(qVec);
assert(std::isfinite(pAp));
const ValueType alpha = rDotZ / pAp;
rDotZPrev = rDotZ;
// x_{k} = x_{k-1} + alpha * p_{k}
internal::axpy(alpha, pVec, xVec, /*result=*/xVec);
// r_{k} = r_{k-1} - alpha_{k-1} A p_{k}
internal::axpy(-alpha, qVec, rVec, /*result=*/rVec);
// update tolerances
l2Error = rVec.l2Norm();
minL2Error = Min(l2Error, minL2Error);
result.absoluteError = static_cast<double>(rVec.infNorm());
result.relativeError = static_cast<double>(result.absoluteError / infNormOfB);
if (l2Error > 2 * minL2Error) {
// The solution started to diverge.
result.success = false;
break;
}
if (!std::isfinite(result.absoluteError)) {
// Total divergence of solution
result.success = false;
break;
}
if (result.absoluteError <= termination.absoluteError) {
// Convergence
result.success = true;
break;
}
if (result.relativeError <= termination.relativeError) {
// Convergence
result.success = true;
break;
}
}
OPENVDB_LOG_DEBUG_RUNTIME("pcg::solve() " << result);
return result;
}
} // namespace pcg
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_CONJGRADIENT_HAS_BEEN_INCLUDED
| 56,872 | C | 31.095372 | 100 | 0.630855 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Stats.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Stats.h
///
/// @author Ken Museth
///
/// @brief Classes to compute statistics and histograms
#ifndef OPENVDB_MATH_STATS_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_STATS_HAS_BEEN_INCLUDED
#include <iosfwd> // for ostringstream
#include <openvdb/version.h>
#include <openvdb/Exceptions.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <vector>
#include <functional>// for std::less
#include "Math.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Templated class to compute the minimum and maximum values.
template <typename ValueType, typename Less = std::less<ValueType> >
class MinMax
{
using Limits = std::numeric_limits<ValueType>;
public:
/// @brief Empty constructor
///
/// @warning Only use this constructor with POD types
MinMax() : mMin(Limits::max()), mMax(Limits::lowest())
{
static_assert(std::numeric_limits<ValueType>::is_specialized,
"openvdb::math::MinMax default constructor requires a std::numeric_limits specialization");
}
/// @brief Constructor
MinMax(const ValueType &min, const ValueType &max) : mMin(min), mMax(max)
{
}
/// @brief Default copy constructor
MinMax(const MinMax &other) = default;
/// Add a single sample.
inline void add(const ValueType &val, const Less &less = Less())
{
if (less(val, mMin)) mMin = val;
if (less(mMax, val)) mMax = val;
}
/// Return the minimum value.
inline const ValueType& min() const { return mMin; }
/// Return the maximum value.
inline const ValueType& max() const { return mMax; }
/// Add the samples from the other Stats instance.
inline void add(const MinMax& other, const Less &less = Less())
{
if (less(other.mMin, mMin)) mMin = other.mMin;
if (less(mMax, other.mMax)) mMax = other.mMax;
}
/// @brief Print MinMax to the specified output stream.
void print(const std::string &name= "", std::ostream &strm=std::cout, int precision=3) const
{
// Write to a temporary string stream so as not to affect the state
// (precision, field width, etc.) of the output stream.
std::ostringstream os;
os << std::setprecision(precision) << std::setiosflags(std::ios::fixed);
os << "MinMax ";
if (!name.empty()) os << "for \"" << name << "\" ";
os << " Min=" << mMin << ", Max=" << mMax << std::endl;
strm << os.str();
}
protected:
ValueType mMin, mMax;
};//end MinMax
/// @brief This class computes the minimum and maximum values of a population
/// of floating-point values.
class Extrema
{
public:
/// @brief Constructor
/// @warning The min/max values are initiated to extreme values
Extrema()
: mSize(0)
, mMin(std::numeric_limits<double>::max())
, mMax(-mMin)
{
}
/// Add a single sample.
void add(double val)
{
++mSize;
mMin = std::min<double>(val, mMin);
mMax = std::max<double>(val, mMax);
}
/// Add @a n samples with constant value @a val.
void add(double val, uint64_t n)
{
mSize += n;
mMin = std::min<double>(val, mMin);
mMax = std::max<double>(val, mMax);
}
/// Return the size of the population, i.e., the total number of samples.
inline uint64_t size() const { return mSize; }
/// Return the minimum value.
inline double min() const { return mMin; }
/// Return the maximum value.
inline double max() const { return mMax; }
/// Return the range defined as the maximum value minus the minimum value.
inline double range() const { return mMax - mMin; }
/// Add the samples from the other Stats instance.
void add(const Extrema& other)
{
if (other.mSize > 0) this->join(other);
}
/// @brief Print extrema to the specified output stream.
void print(const std::string &name= "", std::ostream &strm=std::cout, int precision=3) const
{
// Write to a temporary string stream so as not to affect the state
// (precision, field width, etc.) of the output stream.
std::ostringstream os;
os << std::setprecision(precision) << std::setiosflags(std::ios::fixed);
os << "Extrema ";
if (!name.empty()) os << "for \"" << name << "\" ";
if (mSize>0) {
os << "with " << mSize << " samples:\n"
<< " Min=" << mMin
<< ", Max=" << mMax
<< ", Range="<< this->range() << std::endl;
} else {
os << ": no samples were added." << std::endl;
}
strm << os.str();
}
protected:
inline void join(const Extrema& other)
{
assert(other.mSize > 0);
mSize += other.mSize;
mMin = std::min<double>(mMin, other.mMin);
mMax = std::max<double>(mMax, other.mMax);
}
uint64_t mSize;
double mMin, mMax;
};//end Extrema
/// @brief This class computes statistics (minimum value, maximum
/// value, mean, variance and standard deviation) of a population
/// of floating-point values.
///
/// @details variance = Mean[ (X-Mean[X])^2 ] = Mean[X^2] - Mean[X]^2,
/// standard deviation = sqrt(variance)
///
/// @note This class employs incremental computation and double precision.
class Stats : public Extrema
{
public:
Stats()
: Extrema()
, mAvg(0.0)
, mAux(0.0)
{
}
/// Add a single sample.
void add(double val)
{
Extrema::add(val);
const double delta = val - mAvg;
mAvg += delta/double(mSize);
mAux += delta*(val - mAvg);
}
/// Add @a n samples with constant value @a val.
void add(double val, uint64_t n)
{
const double denom = 1.0/double(mSize + n);
const double delta = val - mAvg;
mAvg += denom * delta * double(n);
mAux += denom * delta * delta * double(mSize) * double(n);
Extrema::add(val, n);
}
/// Add the samples from the other Stats instance.
void add(const Stats& other)
{
if (other.mSize > 0) {
const double denom = 1.0/double(mSize + other.mSize);
const double delta = other.mAvg - mAvg;
mAvg += denom * delta * double(other.mSize);
mAux += other.mAux + denom * delta * delta * double(mSize) * double(other.mSize);
Extrema::join(other);
}
}
//@{
/// Return the arithmetic mean, i.e. average, value.
inline double avg() const { return mAvg; }
inline double mean() const { return mAvg; }
//@}
//@{
/// @brief Return the population variance.
/// @note The unbiased sample variance = population variance *
//num/(num-1)
inline double var() const { return mSize<2 ? 0.0 : mAux/double(mSize); }
inline double variance() const { return this->var(); }
//@}
//@{
/// @brief Return the standard deviation (=Sqrt(variance)) as
/// defined from the (biased) population variance.
inline double std() const { return sqrt(this->var()); }
inline double stdDev() const { return this->std(); }
//@}
/// @brief Print statistics to the specified output stream.
void print(const std::string &name= "", std::ostream &strm=std::cout, int precision=3) const
{
// Write to a temporary string stream so as not to affect the state
// (precision, field width, etc.) of the output stream.
std::ostringstream os;
os << std::setprecision(precision) << std::setiosflags(std::ios::fixed);
os << "Statistics ";
if (!name.empty()) os << "for \"" << name << "\" ";
if (mSize>0) {
os << "with " << mSize << " samples:\n"
<< " Min=" << mMin
<< ", Max=" << mMax
<< ", Ave=" << mAvg
<< ", Std=" << this->stdDev()
<< ", Var=" << this->variance() << std::endl;
} else {
os << ": no samples were added." << std::endl;
}
strm << os.str();
}
protected:
using Extrema::mSize;
using Extrema::mMin;
using Extrema::mMax;
double mAvg, mAux;
}; // end Stats
////////////////////////////////////////
/// @brief This class computes a histogram, with a fixed interval width,
/// of a population of floating-point values.
class Histogram
{
public:
/// Construct with given minimum and maximum values and the given bin count.
Histogram(double min, double max, size_t numBins = 10)
: mSize(0), mMin(min), mMax(max + 1e-10),
mDelta(double(numBins)/(max-min)), mBins(numBins)
{
if ( mMax <= mMin ) {
OPENVDB_THROW(ValueError, "Histogram: expected min < max");
} else if ( numBins == 0 ) {
OPENVDB_THROW(ValueError, "Histogram: expected at least one bin");
}
for (size_t i=0; i<numBins; ++i) mBins[i]=0;
}
/// @brief Construct with the given bin count and with minimum and maximum values
/// taken from a Stats object.
Histogram(const Stats& s, size_t numBins = 10):
mSize(0), mMin(s.min()), mMax(s.max()+1e-10),
mDelta(double(numBins)/(mMax-mMin)), mBins(numBins)
{
if ( mMax <= mMin ) {
OPENVDB_THROW(ValueError, "Histogram: expected min < max");
} else if ( numBins == 0 ) {
OPENVDB_THROW(ValueError, "Histogram: expected at least one bin");
}
for (size_t i=0; i<numBins; ++i) mBins[i]=0;
}
/// @brief Add @a n samples with constant value @a val, provided that the
/// @a val falls within this histogram's value range.
/// @return @c true if the sample value falls within this histogram's value range.
inline bool add(double val, uint64_t n = 1)
{
if (val<mMin || val>mMax) return false;
mBins[size_t(mDelta*(val-mMin))] += n;
mSize += n;
return true;
}
/// @brief Add all the contributions from the other histogram, provided that
/// it has the same configuration as this histogram.
bool add(const Histogram& other)
{
if (!isApproxEqual(mMin, other.mMin) || !isApproxEqual(mMax, other.mMax) ||
mBins.size() != other.mBins.size()) return false;
for (size_t i=0, e=mBins.size(); i!=e; ++i) mBins[i] += other.mBins[i];
mSize += other.mSize;
return true;
}
/// Return the number of bins in this histogram.
inline size_t numBins() const { return mBins.size(); }
/// Return the lower bound of this histogram's value range.
inline double min() const { return mMin; }
/// Return the upper bound of this histogram's value range.
inline double max() const { return mMax; }
/// Return the minimum value in the <i>n</i>th bin.
inline double min(int n) const { return mMin+n/mDelta; }
/// Return the maximum value in the <i>n</i>th bin.
inline double max(int n) const { return mMin+(n+1)/mDelta; }
/// Return the number of samples in the <i>n</i>th bin.
inline uint64_t count(int n) const { return mBins[n]; }
/// Return the population size, i.e., the total number of samples.
inline uint64_t size() const { return mSize; }
/// Print the histogram to the specified output stream.
void print(const std::string& name = "", std::ostream& strm = std::cout) const
{
// Write to a temporary string stream so as not to affect the state
// (precision, field width, etc.) of the output stream.
std::ostringstream os;
os << std::setprecision(6) << std::setiosflags(std::ios::fixed) << std::endl;
os << "Histogram ";
if (!name.empty()) os << "for \"" << name << "\" ";
if (mSize > 0) {
os << "with " << mSize << " samples:\n";
os << "==============================================================\n";
os << "|| # | Min | Max | Frequency | % ||\n";
os << "==============================================================\n";
for (int i = 0, e = int(mBins.size()); i != e; ++i) {
os << "|| " << std::setw(4) << i << " | " << std::setw(14) << this->min(i) << " | "
<< std::setw(14) << this->max(i) << " | " << std::setw(9) << mBins[i] << " | "
<< std::setw(3) << (100*mBins[i]/mSize) << " ||\n";
}
os << "==============================================================\n";
} else {
os << ": no samples were added." << std::endl;
}
strm << os.str();
}
private:
uint64_t mSize;
double mMin, mMax, mDelta;
std::vector<uint64_t> mBins;
};// end Histogram
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_STATS_HAS_BEEN_INCLUDED
| 12,988 | C | 32.913838 | 113 | 0.559747 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/LegacyFrustum.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file math/LegacyFrustum.h
#ifndef OPENVDB_MATH_LEGACYFRUSTUM_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_LEGACYFRUSTUM_HAS_BEEN_INCLUDED
#include <iostream>
#include <openvdb/Types.h> // for Real typedef
#include "Coord.h"
#include "Mat4.h"
#include "Vec3.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
namespace internal {
/// @brief LegacyFrustum class used at DreamWorks for converting old vdb files.
class LegacyFrustum
{
public:
LegacyFrustum(std::istream& is)
{
// First read in the old transform's base class.
// the "extents"
Vec3i tmpMin, tmpMax;
is.read(reinterpret_cast<char*>(&tmpMin), sizeof(Vec3i::ValueType) * 3);
is.read(reinterpret_cast<char*>(&tmpMax), sizeof(Vec3i::ValueType) * 3);
Coord tmpMinCoord(tmpMin);
Coord tmpMaxCoord(tmpMax);
// set the extents
mExtents = CoordBBox(tmpMinCoord, tmpMaxCoord);
// read the old-frustum class member data
//Mat4d tmpW2C;
Mat4d tmpW2C, tmpC2S, tmpS2C, tmpWorldToLocal;
Mat4d tmpS2U, tmpXYLocalToUnit, tmpZLocalToUnit;
Real tmpWindow[6];
Real tmpPadding;
//Mat4d tmpXYUnitToLocal, tmpZUnitToLocal
// read in each matrix.
is.read(reinterpret_cast<char*>(&tmpW2C),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&mC2W),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpC2S),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpS2C),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpWorldToLocal),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&mLocalToWorld),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpWindow[0]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpWindow[1]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpWindow[2]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpWindow[3]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpWindow[4]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpWindow[5]), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpPadding), sizeof(Real));
is.read(reinterpret_cast<char*>(&tmpS2U),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&mXYUnitToLocal),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpXYLocalToUnit),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&mZUnitToLocal),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
is.read(reinterpret_cast<char*>(&tmpZLocalToUnit),
sizeof(Mat4d::value_type) * Mat4d::size * Mat4d::size);
mNearPlane = tmpWindow[4];
mFarPlane = tmpWindow[5];
// Look up the world space corners of the
// frustum grid.
mFrNearOrigin = unitToLocalFrustum(Vec3R(0,0,0));
mFrFarOrigin = unitToLocalFrustum(Vec3R(0,0,1));
Vec3d frNearXTip = unitToLocalFrustum(Vec3R(1,0,0));
Vec3d frNearYTip = unitToLocalFrustum(Vec3R(0,1,0));
mFrNearXBasis = frNearXTip - mFrNearOrigin;
mFrNearYBasis = frNearYTip - mFrNearOrigin;
Vec3R frFarXTip = unitToLocalFrustum(Vec3R(1,0,1));
Vec3R frFarYTip = unitToLocalFrustum(Vec3R(0,1,1));
mFrFarXBasis = frFarXTip - mFrFarOrigin;
mFrFarYBasis = frFarYTip - mFrFarOrigin;
}
~LegacyFrustum() {}
const Mat4d& getCamXForm() const {return mC2W; }
double getDepth() const {return (mFarPlane - mNearPlane); }
double getTaper() const {
return getNearPlaneWidth() / getFarPlaneWidth();
}
double getNearPlaneWidth() const {
double nearPlaneWidth = (unitToWorld(Vec3d(0,0,0)) - unitToWorld(Vec3d(1,0,0))).length();
return nearPlaneWidth;
}
double getFarPlaneWidth() const {
double farPlaneWidth = (unitToWorld(Vec3d(0,0,1)) - unitToWorld(Vec3d(1,0,1))).length();
return farPlaneWidth;
}
double getNearPlaneDist() const { return mNearPlane; }
const CoordBBox& getBBox() const {return mExtents; }
Vec3d unitToWorld(const Vec3d& in) const {return mLocalToWorld.transform( unitToLocal(in) ); }
private:
LegacyFrustum() {}
Vec3d unitToLocal(const Vec3d& U) const {
// We first find the local space coordinates
// of the unit point projected onto the near
// and far planes of the frustum by using a
// linear combination of the planes basis vectors
Vec3d nearLS = ( U[0] * mFrNearXBasis ) + ( U[1] * mFrNearYBasis ) + mFrNearOrigin;
Vec3d farLS = ( U[0] * mFrFarXBasis ) + ( U[1] * mFrFarYBasis ) + mFrFarOrigin;
// then we lerp the two ws points in frustum z space
return U[2] * farLS + ( 1.0 - U[2] ) * nearLS;
}
Vec3d unitToLocalFrustum(const Vec3d& u) const {
Vec3d fzu = mZUnitToLocal.transformH(u);
Vec3d fu = u;
fu[2] = fzu.z();
return mXYUnitToLocal.transformH(fu);
}
private:
Mat4d mC2W, mLocalToWorld, mXYUnitToLocal, mZUnitToLocal;
CoordBBox mExtents;
Vec3d mFrNearXBasis, mFrNearYBasis, mFrFarXBasis, mFrFarYBasis;
Vec3d mFrNearOrigin, mFrFarOrigin;
double mNearPlane, mFarPlane;
};
} // namespace internal
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_LEGACYFRUSTUM_HAS_BEEN_INCLUDED
| 5,948 | C | 34.837349 | 98 | 0.64341 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Transform.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_TRANSFORM_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_TRANSFORM_HAS_BEEN_INCLUDED
#include "Maps.h"
#include <openvdb/Types.h>
#include <iosfwd>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
// Forward declaration
class Transform;
// Utility methods
/// @brief Calculate an axis-aligned bounding box in index space from an
/// axis-aligned bounding box in world space.
/// @see Transform::worldToIndex(const BBoxd&) const
OPENVDB_API void
calculateBounds(const Transform& t, const Vec3d& minWS, const Vec3d& maxWS,
Vec3d& minIS, Vec3d& maxIS);
/// @todo Calculate an axis-aligned bounding box in index space from a
/// bounding sphere in world space.
//void calculateBounds(const Transform& t, const Vec3d& center, const Real radius,
// Vec3d& minIS, Vec3d& maxIS);
////////////////////////////////////////
/// @class Transform
class OPENVDB_API Transform
{
public:
using Ptr = SharedPtr<Transform>;
using ConstPtr = SharedPtr<const Transform>;
Transform(): mMap(MapBase::Ptr(new ScaleMap())) {}
Transform(const MapBase::Ptr&);
Transform(const Transform&);
~Transform() {}
Ptr copy() const { return Ptr(new Transform(mMap->copy())); }
//@{
/// @brief Create and return a shared pointer to a new transform.
static Transform::Ptr createLinearTransform(double voxelSize = 1.0);
static Transform::Ptr createLinearTransform(const Mat4R&);
static Transform::Ptr createFrustumTransform(const BBoxd&, double taper,
double depth, double voxelSize = 1.0);
//@}
/// Return @c true if the transformation map is exclusively linear/affine.
bool isLinear() const { return mMap->isLinear(); }
/// Return @c true if the transform is equivalent to an idenity.
bool isIdentity() const ;
/// Return the transformation map's type-name
Name mapType() const { return mMap->type(); }
//@{
/// @brief Update the linear (affine) map by prepending or
/// postfixing the appropriate operation. In the case of
/// a frustum, the pre-operations apply to the linear part
/// of the transform and not the entire transform, while the
/// post-operations are allways applied last.
void preRotate(double radians, const Axis axis = X_AXIS);
void preTranslate(const Vec3d&);
void preScale(const Vec3d&);
void preScale(double);
void preShear(double shear, Axis axis0, Axis axis1);
void preMult(const Mat4d&);
void preMult(const Mat3d&);
void postRotate(double radians, const Axis axis = X_AXIS);
void postTranslate(const Vec3d&);
void postScale(const Vec3d&);
void postScale(double);
void postShear(double shear, Axis axis0, Axis axis1);
void postMult(const Mat4d&);
void postMult(const Mat3d&);
//@}
/// Return the size of a voxel using the linear component of the map.
Vec3d voxelSize() const { return mMap->voxelSize(); }
/// @brief Return the size of a voxel at position (x, y, z).
/// @note Maps that have a nonlinear component (e.g., perspective and frustum maps)
/// have position-dependent voxel sizes.
Vec3d voxelSize(const Vec3d& xyz) const { return mMap->voxelSize(xyz); }
/// Return the voxel volume of the linear component of the map.
double voxelVolume() const { return mMap->determinant(); }
/// Return the voxel volume at position (x, y, z).
double voxelVolume(const Vec3d& xyz) const { return mMap->determinant(xyz); }
/// Return true if the voxels in world space are uniformly sized cubes
bool hasUniformScale() const { return mMap->hasUniformScale(); }
//@{
/// @brief Apply this transformation to the given coordinates.
Vec3d indexToWorld(const Vec3d& xyz) const { return mMap->applyMap(xyz); }
Vec3d indexToWorld(const Coord& ijk) const { return mMap->applyMap(ijk.asVec3d()); }
Vec3d worldToIndex(const Vec3d& xyz) const { return mMap->applyInverseMap(xyz); }
Coord worldToIndexCellCentered(const Vec3d& xyz) const {return Coord::round(worldToIndex(xyz));}
Coord worldToIndexNodeCentered(const Vec3d& xyz) const {return Coord::floor(worldToIndex(xyz));}
//@}
//@{
/// @brief Apply this transformation to the given index-space bounding box.
/// @return an axis-aligned world-space bounding box
BBoxd indexToWorld(const CoordBBox&) const;
BBoxd indexToWorld(const BBoxd&) const;
//@}
//@{
/// @brief Apply the inverse of this transformation to the given world-space bounding box.
/// @return an axis-aligned index-space bounding box
BBoxd worldToIndex(const BBoxd&) const;
CoordBBox worldToIndexCellCentered(const BBoxd&) const;
CoordBBox worldToIndexNodeCentered(const BBoxd&) const;
//@}
//@{
/// Return a base pointer to the transformation map.
MapBase::ConstPtr baseMap() const { return mMap; }
MapBase::Ptr baseMap() { return mMap; }
//@}
//@{
/// @brief Return the result of downcasting the base map pointer to a
/// @c MapType pointer, or return a null pointer if the types are incompatible.
template<typename MapType> typename MapType::Ptr map();
template<typename MapType> typename MapType::ConstPtr map() const;
template<typename MapType> typename MapType::ConstPtr constMap() const;
//@}
/// Unserialize this transform from the given stream.
void read(std::istream&);
/// Serialize this transform to the given stream.
void write(std::ostream&) const;
/// @brief Print a description of this transform.
/// @param os a stream to which to write textual information
/// @param indent a string with which to prefix each line of text
void print(std::ostream& os = std::cout, const std::string& indent = "") const;
bool operator==(const Transform& other) const;
inline bool operator!=(const Transform& other) const { return !(*this == other); }
private:
MapBase::Ptr mMap;
}; // class Transform
OPENVDB_API std::ostream& operator<<(std::ostream&, const Transform&);
////////////////////////////////////////
template<typename MapType>
inline typename MapType::Ptr
Transform::map()
{
if (mMap->type() == MapType::mapType()) {
return StaticPtrCast<MapType>(mMap);
}
return typename MapType::Ptr();
}
template<typename MapType>
inline typename MapType::ConstPtr
Transform::map() const
{
return ConstPtrCast<const MapType>(
const_cast<Transform*>(this)->map<MapType>());
}
template<typename MapType>
inline typename MapType::ConstPtr
Transform::constMap() const
{
return map<MapType>();
}
////////////////////////////////////////
/// Helper function used internally by processTypedMap()
template<typename ResolvedMapType, typename OpType>
inline void
doProcessTypedMap(Transform& transform, OpType& op)
{
ResolvedMapType& resolvedMap = *transform.map<ResolvedMapType>();
op.template operator()<ResolvedMapType>(resolvedMap);
}
/// Helper function used internally by processTypedMap()
template<typename ResolvedMapType, typename OpType>
inline void
doProcessTypedMap(const Transform& transform, OpType& op)
{
const ResolvedMapType& resolvedMap = *transform.map<ResolvedMapType>();
op.template operator()<ResolvedMapType>(resolvedMap);
}
/// @brief Utility function that, given a generic map pointer,
/// calls a functor on the fully-resoved map
///
/// Usage:
/// @code
/// struct Foo {
/// template<typename MapT>
/// void operator()(const MapT& map) const { blah }
/// };
///
/// processTypedMap(myMap, Foo());
/// @endcode
///
/// @return @c false if the grid type is unknown or unhandled.
template<typename TransformType, typename OpType>
bool
processTypedMap(TransformType& transform, OpType& op)
{
using namespace openvdb;
const Name mapType = transform.mapType();
if (mapType == UniformScaleMap::mapType()) {
doProcessTypedMap<UniformScaleMap, OpType>(transform, op);
} else if (mapType == UniformScaleTranslateMap::mapType()) {
doProcessTypedMap<UniformScaleTranslateMap, OpType>(transform, op);
} else if (mapType == ScaleMap::mapType()) {
doProcessTypedMap<ScaleMap, OpType>(transform, op);
} else if (mapType == ScaleTranslateMap::mapType()) {
doProcessTypedMap<ScaleTranslateMap, OpType>(transform, op);
} else if (mapType == UnitaryMap::mapType()) {
doProcessTypedMap<UnitaryMap, OpType>(transform, op);
} else if (mapType == AffineMap::mapType()) {
doProcessTypedMap<AffineMap, OpType>(transform, op);
} else if (mapType == TranslationMap::mapType()) {
doProcessTypedMap<TranslationMap, OpType>(transform, op);
} else if (mapType == NonlinearFrustumMap::mapType()) {
doProcessTypedMap<NonlinearFrustumMap, OpType>(transform, op);
} else {
return false;
}
return true;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_TRANSFORM_HAS_BEEN_INCLUDED
| 9,098 | C | 32.452206 | 100 | 0.685425 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Maps.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Maps.h"
#include <tbb/mutex.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
namespace {
using Mutex = tbb::mutex;
using Lock = Mutex::scoped_lock;
// Declare this at file scope to ensure thread-safe initialization.
// NOTE: Do *NOT* move this into Maps.h or else we will need to pull in
// Windows.h with things like 'rad2' defined!
Mutex sInitMapRegistryMutex;
} // unnamed namespace
////////////////////////////////////////
// Caller is responsible for calling this function serially.
MapRegistry*
MapRegistry::staticInstance()
{
static MapRegistry registry;
return ®istry;
}
MapRegistry*
MapRegistry::instance()
{
Lock lock(sInitMapRegistryMutex);
return staticInstance();
}
MapBase::Ptr
MapRegistry::createMap(const Name& name)
{
Lock lock(sInitMapRegistryMutex);
MapDictionary::const_iterator iter = staticInstance()->mMap.find(name);
if (iter == staticInstance()->mMap.end()) {
OPENVDB_THROW(LookupError, "Cannot create map of unregistered type " << name);
}
return (iter->second)();
}
bool
MapRegistry::isRegistered(const Name& name)
{
Lock lock(sInitMapRegistryMutex);
return (staticInstance()->mMap.find(name) != staticInstance()->mMap.end());
}
void
MapRegistry::registerMap(const Name& name, MapBase::MapFactory factory)
{
Lock lock(sInitMapRegistryMutex);
if (staticInstance()->mMap.find(name) != staticInstance()->mMap.end()) {
OPENVDB_THROW(KeyError, "Map type " << name << " is already registered");
}
staticInstance()->mMap[name] = factory;
}
void
MapRegistry::unregisterMap(const Name& name)
{
Lock lock(sInitMapRegistryMutex);
staticInstance()->mMap.erase(name);
}
void
MapRegistry::clear()
{
Lock lock(sInitMapRegistryMutex);
staticInstance()->mMap.clear();
}
////////////////////////////////////////
// Utility methods for decomposition
SymmetricMap::Ptr
createSymmetricMap(const Mat3d& m)
{
// test that the mat3 is a rotation || reflection
if (!isSymmetric(m)) {
OPENVDB_THROW(ArithmeticError,
"3x3 Matrix initializing symmetric map was not symmetric");
}
Vec3d eigenValues;
Mat3d Umatrix;
bool converged = math::diagonalizeSymmetricMatrix(m, Umatrix, eigenValues);
if (!converged) {
OPENVDB_THROW(ArithmeticError, "Diagonalization of the symmetric matrix failed");
}
UnitaryMap rotation(Umatrix);
ScaleMap diagonal(eigenValues);
CompoundMap<UnitaryMap, ScaleMap> first(rotation, diagonal);
UnitaryMap rotationInv(Umatrix.transpose());
return SymmetricMap::Ptr( new SymmetricMap(first, rotationInv));
}
PolarDecomposedMap::Ptr
createPolarDecomposedMap(const Mat3d& m)
{
// Because our internal libary left-multiplies vectors against matrices
// we are constructing M = Symmetric * Unitary instead of the more
// standard M = Unitary * Symmetric
Mat3d unitary, symmetric, mat3 = m.transpose();
// factor mat3 = U * S where U is unitary and S is symmetric
bool gotPolar = math::polarDecomposition(mat3, unitary, symmetric);
if (!gotPolar) {
OPENVDB_THROW(ArithmeticError, "Polar decomposition of transform failed");
}
// put the result in a polar map and then copy it into the output polar
UnitaryMap unitary_map(unitary.transpose());
SymmetricMap::Ptr symmetric_map = createSymmetricMap(symmetric);
return PolarDecomposedMap::Ptr(new PolarDecomposedMap(*symmetric_map, unitary_map));
}
FullyDecomposedMap::Ptr
createFullyDecomposedMap(const Mat4d& m)
{
if (!isAffine(m)) {
OPENVDB_THROW(ArithmeticError,
"4x4 Matrix initializing Decomposition map was not affine");
}
TranslationMap translate(m.getTranslation());
PolarDecomposedMap::Ptr polar = createPolarDecomposedMap(m.getMat3());
UnitaryAndTranslationMap rotationAndTranslate(polar->secondMap(), translate);
return FullyDecomposedMap::Ptr(new FullyDecomposedMap(polar->firstMap(), rotationAndTranslate));
}
MapBase::Ptr
simplify(AffineMap::Ptr affine)
{
if (affine->isScale()) { // can be simplified into a ScaleMap
Vec3d scale = affine->applyMap(Vec3d(1,1,1));
if (isApproxEqual(scale[0], scale[1]) && isApproxEqual(scale[0], scale[2])) {
return MapBase::Ptr(new UniformScaleMap(scale[0]));
} else {
return MapBase::Ptr(new ScaleMap(scale));
}
} else if (affine->isScaleTranslate()) { // can be simplified into a ScaleTranslateMap
Vec3d translate = affine->applyMap(Vec3d(0,0,0));
Vec3d scale = affine->applyMap(Vec3d(1,1,1)) - translate;
if (isApproxEqual(scale[0], scale[1]) && isApproxEqual(scale[0], scale[2])) {
return MapBase::Ptr(new UniformScaleTranslateMap(scale[0], translate));
} else {
return MapBase::Ptr(new ScaleTranslateMap(scale, translate));
}
}
// could not simplify the general Affine map.
return StaticPtrCast<MapBase, AffineMap>(affine);
}
Mat4d
approxInverse(const Mat4d& mat4d)
{
if (std::abs(mat4d.det()) >= 3 * math::Tolerance<double>::value()) {
try {
return mat4d.inverse();
} catch (ArithmeticError& ) {
// Mat4 code couldn't invert.
}
}
const Mat3d mat3 = mat4d.getMat3();
const Mat3d mat3T = mat3.transpose();
const Vec3d trans = mat4d.getTranslation();
// absolute tolerance used for the symmetric test.
const double tol = 1.e-6;
// only create the pseudoInverse for symmetric
bool symmetric = true;
for (int i = 0; i < 3; ++i ) {
for (int j = 0; j < 3; ++j ) {
if (!isApproxEqual(mat3[i][j], mat3T[i][j], tol)) {
symmetric = false;
}
}
}
if (!symmetric) {
// not symmetric, so just zero out the mat3 inverse and reverse the translation
Mat4d result = Mat4d::zero();
result.setTranslation(-trans);
result[3][3] = 1.f;
return result;
} else {
// compute the pseudo inverse
Mat3d eigenVectors;
Vec3d eigenValues;
diagonalizeSymmetricMatrix(mat3, eigenVectors, eigenValues);
Mat3d d = Mat3d::identity();
for (int i = 0; i < 3; ++i ) {
if (std::abs(eigenValues[i]) < 10.0 * math::Tolerance<double>::value()) {
d[i][i] = 0.f;
} else {
d[i][i] = 1.f/eigenValues[i];
}
}
// assemble the pseudo inverse
Mat3d pseudoInv = eigenVectors * d * eigenVectors.transpose();
Vec3d invTrans = -trans * pseudoInv;
Mat4d result = Mat4d::identity();
result.setMat3(pseudoInv);
result.setTranslation(invTrans);
return result;
}
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 7,031 | C++ | 25.737642 | 100 | 0.643863 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Maps.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file math/Maps.h
#ifndef OPENVDB_MATH_MAPS_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_MAPS_HAS_BEEN_INCLUDED
#include "Math.h"
#include "Mat4.h"
#include "Vec3.h"
#include "BBox.h"
#include "Coord.h"
#include <openvdb/io/io.h> // for io::getFormatVersion()
#include <openvdb/util/Name.h>
#include <openvdb/Types.h>
#include <cmath> // for std::abs()
#include <iostream>
#include <map>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
////////////////////////////////////////
/// Forward declarations of the different map types
class MapBase;
class ScaleMap;
class TranslationMap;
class ScaleTranslateMap;
class UniformScaleMap;
class UniformScaleTranslateMap;
class AffineMap;
class UnitaryMap;
class NonlinearFrustumMap;
template<typename T1, typename T2> class CompoundMap;
using UnitaryAndTranslationMap = CompoundMap<UnitaryMap, TranslationMap>;
using SpectralDecomposedMap = CompoundMap<CompoundMap<UnitaryMap, ScaleMap>, UnitaryMap>;
using SymmetricMap = SpectralDecomposedMap;
using FullyDecomposedMap = CompoundMap<SymmetricMap, UnitaryAndTranslationMap>;
using PolarDecomposedMap = CompoundMap<SymmetricMap, UnitaryMap>;
////////////////////////////////////////
/// Map traits
template<typename T> struct is_linear { static const bool value = false; };
template<> struct is_linear<AffineMap> { static const bool value = true; };
template<> struct is_linear<ScaleMap> { static const bool value = true; };
template<> struct is_linear<UniformScaleMap> { static const bool value = true; };
template<> struct is_linear<UnitaryMap> { static const bool value = true; };
template<> struct is_linear<TranslationMap> { static const bool value = true; };
template<> struct is_linear<ScaleTranslateMap> { static const bool value = true; };
template<> struct is_linear<UniformScaleTranslateMap> { static const bool value = true; };
template<typename T1, typename T2> struct is_linear<CompoundMap<T1, T2> > {
static const bool value = is_linear<T1>::value && is_linear<T2>::value;
};
template<typename T> struct is_uniform_scale { static const bool value = false; };
template<> struct is_uniform_scale<UniformScaleMap> { static const bool value = true; };
template<typename T> struct is_uniform_scale_translate { static const bool value = false; };
template<> struct is_uniform_scale_translate<TranslationMap> { static const bool value = true; };
template<> struct is_uniform_scale_translate<UniformScaleTranslateMap> {
static const bool value = true;
};
template<typename T> struct is_scale { static const bool value = false; };
template<> struct is_scale<ScaleMap> { static const bool value = true; };
template<typename T> struct is_scale_translate { static const bool value = false; };
template<> struct is_scale_translate<ScaleTranslateMap> { static const bool value = true; };
template<typename T> struct is_uniform_diagonal_jacobian {
static const bool value = is_uniform_scale<T>::value || is_uniform_scale_translate<T>::value;
};
template<typename T> struct is_diagonal_jacobian {
static const bool value = is_scale<T>::value || is_scale_translate<T>::value;
};
////////////////////////////////////////
/// Utility methods
/// @brief Create a SymmetricMap from a symmetric matrix.
/// Decomposes the map into Rotation Diagonal Rotation^T
OPENVDB_API SharedPtr<SymmetricMap> createSymmetricMap(const Mat3d& m);
/// @brief General decomposition of a Matrix into a Unitary (e.g. rotation)
/// following a Symmetric (e.g. stretch & shear)
OPENVDB_API SharedPtr<FullyDecomposedMap> createFullyDecomposedMap(const Mat4d& m);
/// @brief Decomposes a general linear into translation following polar decomposition.
///
/// T U S where:
///
/// T: Translation
/// U: Unitary (rotation or reflection)
/// S: Symmetric
///
/// @note: the Symmetric is automatically decomposed into Q D Q^T, where
/// Q is rotation and D is diagonal.
OPENVDB_API SharedPtr<PolarDecomposedMap> createPolarDecomposedMap(const Mat3d& m);
/// @brief reduces an AffineMap to a ScaleMap or a ScaleTranslateMap when it can
OPENVDB_API SharedPtr<MapBase> simplify(SharedPtr<AffineMap> affine);
/// @brief Returns the left pseudoInverse of the input matrix when the 3x3 part is symmetric
/// otherwise it zeros the 3x3 and reverses the translation.
OPENVDB_API Mat4d approxInverse(const Mat4d& mat);
////////////////////////////////////////
/// @brief Abstract base class for maps
class OPENVDB_API MapBase
{
public:
using Ptr = SharedPtr<MapBase>;
using ConstPtr = SharedPtr<const MapBase>;
using MapFactory = Ptr (*)();
MapBase(const MapBase&) = default;
virtual ~MapBase() = default;
virtual SharedPtr<AffineMap> getAffineMap() const = 0;
/// Return the name of this map's concrete type (e.g., @c "AffineMap").
virtual Name type() const = 0;
/// Return @c true if this map is of concrete type @c MapT (e.g., AffineMap).
template<typename MapT> bool isType() const { return this->type() == MapT::mapType(); }
/// Return @c true if this map is equal to the given map.
virtual bool isEqual(const MapBase& other) const = 0;
/// Return @c true if this map is linear.
virtual bool isLinear() const = 0;
/// Return @c true if the spacing between the image of latice is uniform in all directions
virtual bool hasUniformScale() const = 0;
virtual Vec3d applyMap(const Vec3d& in) const = 0;
virtual Vec3d applyInverseMap(const Vec3d& in) const = 0;
//@{
/// @brief Apply the Inverse Jacobian Transpose of this map to a vector.
/// For a linear map this is equivalent to applying the transpose of
/// inverse map excluding translation.
virtual Vec3d applyIJT(const Vec3d& in) const = 0;
virtual Vec3d applyIJT(const Vec3d& in, const Vec3d& domainPos) const = 0;
//@}
virtual Mat3d applyIJC(const Mat3d& m) const = 0;
virtual Mat3d applyIJC(const Mat3d& m, const Vec3d& v, const Vec3d& domainPos) const = 0;
virtual double determinant() const = 0;
virtual double determinant(const Vec3d&) const = 0;
//@{
/// @brief Method to return the local size of a voxel.
/// When a location is specified as an argument, it is understood to be
/// be in the domain of the map (i.e. index space)
virtual Vec3d voxelSize() const = 0;
virtual Vec3d voxelSize(const Vec3d&) const = 0;
//@}
virtual void read(std::istream&) = 0;
virtual void write(std::ostream&) const = 0;
virtual std::string str() const = 0;
virtual MapBase::Ptr copy() const = 0;
//@{
/// @brief Methods to update the map
virtual MapBase::Ptr preRotate(double radians, Axis axis = X_AXIS) const = 0;
virtual MapBase::Ptr preTranslate(const Vec3d&) const = 0;
virtual MapBase::Ptr preScale(const Vec3d&) const = 0;
virtual MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const = 0;
virtual MapBase::Ptr postRotate(double radians, Axis axis = X_AXIS) const = 0;
virtual MapBase::Ptr postTranslate(const Vec3d&) const = 0;
virtual MapBase::Ptr postScale(const Vec3d&) const = 0;
virtual MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const = 0;
//@}
//@{
/// @brief Apply the Jacobian of this map to a vector.
/// For a linear map this is equivalent to applying the map excluding translation.
/// @warning Houdini 12.5 uses an earlier version of OpenVDB, and maps created
/// with that version lack a virtual table entry for this method. Do not call
/// this method from Houdini 12.5.
virtual Vec3d applyJacobian(const Vec3d& in) const = 0;
virtual Vec3d applyJacobian(const Vec3d& in, const Vec3d& domainPos) const = 0;
//@}
//@{
/// @brief Apply the InverseJacobian of this map to a vector.
/// For a linear map this is equivalent to applying the map inverse excluding translation.
/// @warning Houdini 12.5 uses an earlier version of OpenVDB, and maps created
/// with that version lack a virtual table entry for this method. Do not call
/// this method from Houdini 12.5.
virtual Vec3d applyInverseJacobian(const Vec3d& in) const = 0;
virtual Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d& domainPos) const = 0;
//@}
//@{
/// @brief Apply the Jacobian transpose of this map to a vector.
/// For a linear map this is equivalent to applying the transpose of the map
/// excluding translation.
/// @warning Houdini 12.5 uses an earlier version of OpenVDB, and maps created
/// with that version lack a virtual table entry for this method. Do not call
/// this method from Houdini 12.5.
virtual Vec3d applyJT(const Vec3d& in) const = 0;
virtual Vec3d applyJT(const Vec3d& in, const Vec3d& domainPos) const = 0;
//@}
/// @brief Return a new map representing the inverse of this map.
/// @throw NotImplementedError if the map is a NonlinearFrustumMap.
/// @warning Houdini 12.5 uses an earlier version of OpenVDB, and maps created
/// with that version lack a virtual table entry for this method. Do not call
/// this method from Houdini 12.5.
virtual MapBase::Ptr inverseMap() const = 0;
protected:
MapBase() {}
template<typename MapT>
static bool isEqualBase(const MapT& self, const MapBase& other)
{
return other.isType<MapT>() && (self == *static_cast<const MapT*>(&other));
}
};
////////////////////////////////////////
/// @brief Threadsafe singleton object for accessing the map type-name dictionary.
/// Associates a map type-name with a factory function.
class OPENVDB_API MapRegistry
{
public:
using MapDictionary = std::map<Name, MapBase::MapFactory>;
static MapRegistry* instance();
/// Create a new map of the given (registered) type name.
static MapBase::Ptr createMap(const Name&);
/// Return @c true if the given map type name is registered.
static bool isRegistered(const Name&);
/// Register a map type along with a factory function.
static void registerMap(const Name&, MapBase::MapFactory);
/// Remove a map type from the registry.
static void unregisterMap(const Name&);
/// Clear the map type registry.
static void clear();
private:
MapRegistry() {}
static MapRegistry* staticInstance();
MapDictionary mMap;
};
////////////////////////////////////////
/// @note Macro to use a final specifier from ABI=8 onwards.
#if OPENVDB_ABI_VERSION_NUMBER >= 8
#define OPENVDB_MAP_CLASS_SPECIFIER final
#define OPENVDB_MAP_FUNC_SPECIFIER final
#else
#define OPENVDB_MAP_CLASS_SPECIFIER
#define OPENVDB_MAP_FUNC_SPECIFIER override
#endif
/// @brief A general linear transform using homogeneous coordinates to perform
/// rotation, scaling, shear and translation
/// @note This class is marked final with ABI=8
class OPENVDB_API AffineMap OPENVDB_MAP_CLASS_SPECIFIER: public MapBase
{
public:
using Ptr = SharedPtr<AffineMap>;
using ConstPtr = SharedPtr<const AffineMap>;
AffineMap():
mMatrix(Mat4d::identity()),
mMatrixInv(Mat4d::identity()),
mJacobianInv(Mat3d::identity()),
mDeterminant(1),
mVoxelSize(Vec3d(1,1,1)),
mIsDiagonal(true),
mIsIdentity(true)
// the default constructor for translation is zero
{
}
AffineMap(const Mat3d& m)
{
Mat4d mat4(Mat4d::identity());
mat4.setMat3(m);
mMatrix = mat4;
updateAcceleration();
}
AffineMap(const Mat4d& m): mMatrix(m)
{
if (!isAffine(m)) {
OPENVDB_THROW(ArithmeticError,
"Tried to initialize an affine transform from a non-affine 4x4 matrix");
}
updateAcceleration();
}
AffineMap(const AffineMap& other):
MapBase(other),
mMatrix(other.mMatrix),
mMatrixInv(other.mMatrixInv),
mJacobianInv(other.mJacobianInv),
mDeterminant(other.mDeterminant),
mVoxelSize(other.mVoxelSize),
mIsDiagonal(other.mIsDiagonal),
mIsIdentity(other.mIsIdentity)
{
}
/// @brief constructor that merges the matrixes for two affine maps
AffineMap(const AffineMap& first, const AffineMap& second):
mMatrix(first.mMatrix * second.mMatrix)
{
updateAcceleration();
}
~AffineMap() override = default;
/// Return a MapBase::Ptr to a new AffineMap
static MapBase::Ptr create() { return MapBase::Ptr(new AffineMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new AffineMap(*this)); }
MapBase::Ptr inverseMap() const override { return MapBase::Ptr(new AffineMap(mMatrixInv)); }
static bool isRegistered() { return MapRegistry::isRegistered(AffineMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
AffineMap::mapType(),
AffineMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("AffineMap"); }
/// Return @c true (an AffineMap is always linear).
bool isLinear() const override { return true; }
/// Return @c false ( test if this is unitary with translation )
bool hasUniformScale() const override
{
Mat3d mat = mMatrix.getMat3();
const double det = mat.det();
if (isApproxEqual(det, double(0))) {
return false;
} else {
mat *= (1.0 / pow(std::abs(det), 1.0/3.0));
return isUnitary(mat);
}
}
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const AffineMap& other) const
{
// the Mat.eq() is approximate
if (!mMatrix.eq(other.mMatrix)) { return false; }
if (!mMatrixInv.eq(other.mMatrixInv)) { return false; }
return true;
}
bool operator!=(const AffineMap& other) const { return !(*this == other); }
AffineMap& operator=(const AffineMap& other)
{
mMatrix = other.mMatrix;
mMatrixInv = other.mMatrixInv;
mJacobianInv = other.mJacobianInv;
mDeterminant = other.mDeterminant;
mVoxelSize = other.mVoxelSize;
mIsDiagonal = other.mIsDiagonal;
mIsIdentity = other.mIsIdentity;
return *this;
}
/// Return the image of @c in under the map
Vec3d applyMap(const Vec3d& in) const override { return in * mMatrix; }
/// Return the pre-image of @c in under the map
Vec3d applyInverseMap(const Vec3d& in) const override {return in * mMatrixInv; }
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in, const Vec3d&) const override { return applyJacobian(in); }
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in) const override { return mMatrix.transform3x3(in); }
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d&) const override {
return applyInverseJacobian(in);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const override {
return mMatrixInv.transform3x3(in);
}
/// Return the Jacobian Transpose of the map applied to @a in.
/// This tranforms range-space gradients to domain-space gradients
Vec3d applyJT(const Vec3d& in, const Vec3d&) const override { return applyJT(in); }
/// Return the Jacobian Transpose of the map applied to @a in.
Vec3d applyJT(const Vec3d& in) const override {
const double* m = mMatrix.asPointer();
return Vec3d( m[ 0] * in[0] + m[ 1] * in[1] + m[ 2] * in[2],
m[ 4] * in[0] + m[ 5] * in[1] + m[ 6] * in[2],
m[ 8] * in[0] + m[ 9] * in[1] + m[10] * in[2] );
}
/// Return the transpose of the inverse Jacobian of the map applied to @a in.
Vec3d applyIJT(const Vec3d& in, const Vec3d&) const override { return applyIJT(in); }
/// Return the transpose of the inverse Jacobian of the map applied to @c in
Vec3d applyIJT(const Vec3d& in) const override { return in * mJacobianInv; }
/// Return the Jacobian Curvature: zero for a linear map
Mat3d applyIJC(const Mat3d& m) const override {
return mJacobianInv.transpose()* m * mJacobianInv;
}
Mat3d applyIJC(const Mat3d& in, const Vec3d& , const Vec3d& ) const override {
return applyIJC(in);
}
/// Return the determinant of the Jacobian, ignores argument
double determinant(const Vec3d& ) const override { return determinant(); }
/// Return the determinant of the Jacobian
double determinant() const override { return mDeterminant; }
//@{
/// @brief Return the lengths of the images of the segments
/// (0,0,0)-(1,0,0), (0,0,0)-(0,1,0) and (0,0,0)-(0,0,1).
Vec3d voxelSize() const override { return mVoxelSize; }
Vec3d voxelSize(const Vec3d&) const override { return voxelSize(); }
//@}
/// Return @c true if the underlying matrix is approximately an identity
bool isIdentity() const { return mIsIdentity; }
/// Return @c true if the underylying matrix is diagonal
bool isDiagonal() const { return mIsDiagonal; }
/// Return @c true if the map is equivalent to a ScaleMap
bool isScale() const { return isDiagonal(); }
/// Return @c true if the map is equivalent to a ScaleTranslateMap
bool isScaleTranslate() const { return math::isDiagonal(mMatrix.getMat3()); }
// Methods that modify the existing affine map
//@{
/// @brief Modify the existing affine map by pre-applying the given operation.
void accumPreRotation(Axis axis, double radians)
{
mMatrix.preRotate(axis, radians);
updateAcceleration();
}
void accumPreScale(const Vec3d& v)
{
mMatrix.preScale(v);
updateAcceleration();
}
void accumPreTranslation(const Vec3d& v)
{
mMatrix.preTranslate(v);
updateAcceleration();
}
void accumPreShear(Axis axis0, Axis axis1, double shear)
{
mMatrix.preShear(axis0, axis1, shear);
updateAcceleration();
}
//@}
//@{
/// @brief Modify the existing affine map by post-applying the given operation.
void accumPostRotation(Axis axis, double radians)
{
mMatrix.postRotate(axis, radians);
updateAcceleration();
}
void accumPostScale(const Vec3d& v)
{
mMatrix.postScale(v);
updateAcceleration();
}
void accumPostTranslation(const Vec3d& v)
{
mMatrix.postTranslate(v);
updateAcceleration();
}
void accumPostShear(Axis axis0, Axis axis1, double shear)
{
mMatrix.postShear(axis0, axis1, shear);
updateAcceleration();
}
//@}
/// read serialization
void read(std::istream& is) override { mMatrix.read(is); updateAcceleration(); }
/// write serialization
void write(std::ostream& os) const override { mMatrix.write(os); }
/// string serialization, useful for debugging
std::string str() const override
{
std::ostringstream buffer;
buffer << " - mat4:\n" << mMatrix.str() << std::endl;
buffer << " - voxel dimensions: " << mVoxelSize << std::endl;
return buffer.str();
}
/// on-demand decomposition of the affine map
SharedPtr<FullyDecomposedMap> createDecomposedMap()
{
return createFullyDecomposedMap(mMatrix);
}
/// Return AffineMap::Ptr to a deep copy of the current AffineMap
AffineMap::Ptr getAffineMap() const override { return AffineMap::Ptr(new AffineMap(*this)); }
/// Return AffineMap::Ptr to the inverse of this map
AffineMap::Ptr inverse() const { return AffineMap::Ptr(new AffineMap(mMatrixInv)); }
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the appropraite operation.
MapBase::Ptr preRotate(double radians, Axis axis = X_AXIS) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreTranslation(t);
return StaticPtrCast<MapBase, AffineMap>(affineMap);
}
MapBase::Ptr preScale(const Vec3d& s) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreScale(s);
return StaticPtrCast<MapBase, AffineMap>(affineMap);
}
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of postfixing the appropraite operation.
MapBase::Ptr postRotate(double radians, Axis axis = X_AXIS) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr postTranslate(const Vec3d& t) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostTranslation(t);
return StaticPtrCast<MapBase, AffineMap>(affineMap);
}
MapBase::Ptr postScale(const Vec3d& s) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostScale(s);
return StaticPtrCast<MapBase, AffineMap>(affineMap);
}
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
/// Return the matrix representation of this AffineMap
Mat4d getMat4() const { return mMatrix;}
const Mat4d& getConstMat4() const {return mMatrix;}
const Mat3d& getConstJacobianInv() const {return mJacobianInv;}
private:
void updateAcceleration() {
Mat3d mat3 = mMatrix.getMat3();
mDeterminant = mat3.det();
if (std::abs(mDeterminant) < (3.0 * math::Tolerance<double>::value())) {
OPENVDB_THROW(ArithmeticError,
"Tried to initialize an affine transform from a nearly singular matrix");
}
mMatrixInv = mMatrix.inverse();
mJacobianInv = mat3.inverse().transpose();
mIsDiagonal = math::isDiagonal(mMatrix);
mIsIdentity = math::isIdentity(mMatrix);
Vec3d pos = applyMap(Vec3d(0,0,0));
mVoxelSize(0) = (applyMap(Vec3d(1,0,0)) - pos).length();
mVoxelSize(1) = (applyMap(Vec3d(0,1,0)) - pos).length();
mVoxelSize(2) = (applyMap(Vec3d(0,0,1)) - pos).length();
}
// the underlying matrix
Mat4d mMatrix;
// stored for acceleration
Mat4d mMatrixInv;
Mat3d mJacobianInv;
double mDeterminant;
Vec3d mVoxelSize;
bool mIsDiagonal, mIsIdentity;
}; // class AffineMap
////////////////////////////////////////
/// @brief A specialized Affine transform that scales along the principal axis
/// the scaling need not be uniform in the three-directions
/// @note This class is not marked final because UniformScaleMap inherits from it,
/// so some of the member methods are marked final instead.
class OPENVDB_API ScaleMap: public MapBase
{
public:
using Ptr = SharedPtr<ScaleMap>;
using ConstPtr = SharedPtr<const ScaleMap>;
ScaleMap(): MapBase(), mScaleValues(Vec3d(1,1,1)), mVoxelSize(Vec3d(1,1,1)),
mScaleValuesInverse(Vec3d(1,1,1)),
mInvScaleSqr(1,1,1), mInvTwiceScale(0.5,0.5,0.5){}
ScaleMap(const Vec3d& scale):
MapBase(),
mScaleValues(scale),
mVoxelSize(Vec3d(std::abs(scale(0)),std::abs(scale(1)), std::abs(scale(2))))
{
double determinant = scale[0]* scale[1] * scale[2];
if (std::abs(determinant) < 3.0 * math::Tolerance<double>::value()) {
OPENVDB_THROW(ArithmeticError, "Non-zero scale values required");
}
mScaleValuesInverse = 1.0 / mScaleValues;
mInvScaleSqr = mScaleValuesInverse * mScaleValuesInverse;
mInvTwiceScale = mScaleValuesInverse / 2;
}
ScaleMap(const ScaleMap& other):
MapBase(),
mScaleValues(other.mScaleValues),
mVoxelSize(other.mVoxelSize),
mScaleValuesInverse(other.mScaleValuesInverse),
mInvScaleSqr(other.mInvScaleSqr),
mInvTwiceScale(other.mInvTwiceScale)
{
}
~ScaleMap() override = default;
/// Return a MapBase::Ptr to a new ScaleMap
static MapBase::Ptr create() { return MapBase::Ptr(new ScaleMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new ScaleMap(*this)); }
MapBase::Ptr inverseMap() const override {
return MapBase::Ptr(new ScaleMap(mScaleValuesInverse));
}
static bool isRegistered() { return MapRegistry::isRegistered(ScaleMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
ScaleMap::mapType(),
ScaleMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("ScaleMap"); }
/// Return @c true (a ScaleMap is always linear).
bool isLinear() const OPENVDB_MAP_FUNC_SPECIFIER { return true; }
/// Return @c true if the values have the same magitude (eg. -1, 1, -1 would be a rotation).
bool hasUniformScale() const OPENVDB_MAP_FUNC_SPECIFIER
{
bool value = isApproxEqual(
std::abs(mScaleValues.x()), std::abs(mScaleValues.y()), double(5e-7));
value = value && isApproxEqual(
std::abs(mScaleValues.x()), std::abs(mScaleValues.z()), double(5e-7));
return value;
}
/// Return the image of @c in under the map
Vec3d applyMap(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
return Vec3d(
in.x() * mScaleValues.x(),
in.y() * mScaleValues.y(),
in.z() * mScaleValues.z());
}
/// Return the pre-image of @c in under the map
Vec3d applyInverseMap(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
return Vec3d(
in.x() * mScaleValuesInverse.x(),
in.y() * mScaleValuesInverse.y(),
in.z() * mScaleValuesInverse.z());
}
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyJacobian(in);
}
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER { return applyMap(in); }
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyInverseJacobian(in);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyInverseMap(in);
}
/// @brief Return the Jacobian Transpose of the map applied to @a in.
/// @details This tranforms range-space gradients to domain-space gradients
Vec3d applyJT(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return applyJT(in); }
/// Return the Jacobian Transpose of the map applied to @a in.
Vec3d applyJT(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER { return applyMap(in); }
/// @brief Return the transpose of the inverse Jacobian of the map applied to @a in.
/// @details Ignores second argument
Vec3d applyIJT(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyIJT(in);
}
/// Return the transpose of the inverse Jacobian of the map applied to @c in
Vec3d applyIJT(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER { return applyInverseMap(in); }
/// Return the Jacobian Curvature: zero for a linear map
Mat3d applyIJC(const Mat3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
Mat3d tmp;
for (int i = 0; i < 3; i++) {
tmp.setRow(i, in.row(i) * mScaleValuesInverse(i));
}
for (int i = 0; i < 3; i++) {
tmp.setCol(i, tmp.col(i) * mScaleValuesInverse(i));
}
return tmp;
}
Mat3d applyIJC(const Mat3d& in, const Vec3d&, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyIJC(in);
}
/// Return the product of the scale values, ignores argument
double determinant(const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return determinant(); }
/// Return the product of the scale values
double determinant() const OPENVDB_MAP_FUNC_SPECIFIER {
return mScaleValues.x() * mScaleValues.y() * mScaleValues.z();
}
/// Return the scale values that define the map
const Vec3d& getScale() const {return mScaleValues;}
/// Return the square of the scale. Used to optimize some finite difference calculations
const Vec3d& getInvScaleSqr() const { return mInvScaleSqr; }
/// Return 1/(2 scale). Used to optimize some finite difference calculations
const Vec3d& getInvTwiceScale() const { return mInvTwiceScale; }
/// Return 1/(scale)
const Vec3d& getInvScale() const { return mScaleValuesInverse; }
//@{
/// @brief Return the lengths of the images of the segments
/// (0,0,0) − 1,0,0), (0,0,0) − (0,1,0) and (0,0,0) − (0,0,1).
/// @details This is equivalent to the absolute values of the scale values
Vec3d voxelSize() const OPENVDB_MAP_FUNC_SPECIFIER { return mVoxelSize; }
Vec3d voxelSize(const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return voxelSize(); }
//@}
/// read serialization
void read(std::istream& is) override
{
mScaleValues.read(is);
mVoxelSize.read(is);
mScaleValuesInverse.read(is);
mInvScaleSqr.read(is);
mInvTwiceScale.read(is);
}
/// write serialization
void write(std::ostream& os) const override
{
mScaleValues.write(os);
mVoxelSize.write(os);
mScaleValuesInverse.write(os);
mInvScaleSqr.write(os);
mInvTwiceScale.write(os);
}
/// string serialization, useful for debuging
std::string str() const override
{
std::ostringstream buffer;
buffer << " - scale: " << mScaleValues << std::endl;
buffer << " - voxel dimensions: " << mVoxelSize << std::endl;
return buffer.str();
}
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const ScaleMap& other) const
{
// ::eq() uses a tolerance
if (!mScaleValues.eq(other.mScaleValues)) { return false; }
return true;
}
bool operator!=(const ScaleMap& other) const { return !(*this == other); }
/// Return a AffineMap equivalent to this map
AffineMap::Ptr getAffineMap() const override
{
return AffineMap::Ptr(new AffineMap(math::scale<Mat4d>(mScaleValues)));
}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the appropraite operation to the existing map
MapBase::Ptr preRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr preTranslate(const Vec3d&) const override;
MapBase::Ptr preScale(const Vec3d&) const override;
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the appropraite operation to the existing map.
MapBase::Ptr postRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr postTranslate(const Vec3d&) const override;
MapBase::Ptr postScale(const Vec3d&) const override;
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
private:
Vec3d mScaleValues, mVoxelSize, mScaleValuesInverse, mInvScaleSqr, mInvTwiceScale;
}; // class ScaleMap
/// @brief A specialized Affine transform that scales along the principal axis
/// the scaling is uniform in the three-directions
/// @note This class is marked final with ABI=8
class OPENVDB_API UniformScaleMap OPENVDB_MAP_CLASS_SPECIFIER: public ScaleMap
{
public:
using Ptr = SharedPtr<UniformScaleMap>;
using ConstPtr = SharedPtr<const UniformScaleMap>;
UniformScaleMap(): ScaleMap(Vec3d(1,1,1)) {}
UniformScaleMap(double scale): ScaleMap(Vec3d(scale, scale, scale)) {}
UniformScaleMap(const UniformScaleMap& other): ScaleMap(other) {}
~UniformScaleMap() override = default;
/// Return a MapBase::Ptr to a new UniformScaleMap
static MapBase::Ptr create() { return MapBase::Ptr(new UniformScaleMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new UniformScaleMap(*this)); }
MapBase::Ptr inverseMap() const override
{
const Vec3d& invScale = getInvScale();
return MapBase::Ptr(new UniformScaleMap( invScale[0]));
}
static bool isRegistered() { return MapRegistry::isRegistered(UniformScaleMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
UniformScaleMap::mapType(),
UniformScaleMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("UniformScaleMap"); }
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const UniformScaleMap& other) const { return ScaleMap::operator==(other); }
bool operator!=(const UniformScaleMap& other) const { return !(*this == other); }
/// @brief Return a MapBase::Ptr to a UniformScaleTraslateMap that is the result of
/// pre-translation on this map
MapBase::Ptr preTranslate(const Vec3d&) const override;
/// @brief Return a MapBase::Ptr to a UniformScaleTraslateMap that is the result of
/// post-translation on this map
MapBase::Ptr postTranslate(const Vec3d&) const override;
}; // class UniformScaleMap
////////////////////////////////////////
inline MapBase::Ptr
ScaleMap::preScale(const Vec3d& v) const
{
const Vec3d new_scale(v * mScaleValues);
if (isApproxEqual(new_scale[0],new_scale[1]) && isApproxEqual(new_scale[0],new_scale[2])) {
return MapBase::Ptr(new UniformScaleMap(new_scale[0]));
} else {
return MapBase::Ptr(new ScaleMap(new_scale));
}
}
inline MapBase::Ptr
ScaleMap::postScale(const Vec3d& v) const
{ // pre-post Scale are the same for a scale map
return preScale(v);
}
/// @brief A specialized linear transform that performs a translation
/// @note This class is marked final with ABI=8
class OPENVDB_API TranslationMap OPENVDB_MAP_CLASS_SPECIFIER: public MapBase
{
public:
using Ptr = SharedPtr<TranslationMap>;
using ConstPtr = SharedPtr<const TranslationMap>;
// default constructor is a translation by zero.
TranslationMap(): MapBase(), mTranslation(Vec3d(0,0,0)) {}
TranslationMap(const Vec3d& t): MapBase(), mTranslation(t) {}
TranslationMap(const TranslationMap& other): MapBase(), mTranslation(other.mTranslation) {}
~TranslationMap() override = default;
/// Return a MapBase::Ptr to a new TranslationMap
static MapBase::Ptr create() { return MapBase::Ptr(new TranslationMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new TranslationMap(*this)); }
MapBase::Ptr inverseMap() const override {
return MapBase::Ptr(new TranslationMap(-mTranslation));
}
static bool isRegistered() { return MapRegistry::isRegistered(TranslationMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
TranslationMap::mapType(),
TranslationMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("TranslationMap"); }
/// Return @c true (a TranslationMap is always linear).
bool isLinear() const override { return true; }
/// Return @c false (by convention true)
bool hasUniformScale() const override { return true; }
/// Return the image of @c in under the map
Vec3d applyMap(const Vec3d& in) const override { return in + mTranslation; }
/// Return the pre-image of @c in under the map
Vec3d applyInverseMap(const Vec3d& in) const override { return in - mTranslation; }
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in, const Vec3d&) const override { return applyJacobian(in); }
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in) const override { return in; }
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d&) const override {
return applyInverseJacobian(in);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const override { return in; }
/// @brief Return the Jacobian Transpose of the map applied to @a in.
/// @details This tranforms range-space gradients to domain-space gradients
Vec3d applyJT(const Vec3d& in, const Vec3d&) const override { return applyJT(in); }
/// Return the Jacobian Transpose of the map applied to @a in.
Vec3d applyJT(const Vec3d& in) const override { return in; }
/// @brief Return the transpose of the inverse Jacobian (Identity for TranslationMap)
/// of the map applied to @c in, ignores second argument
Vec3d applyIJT(const Vec3d& in, const Vec3d& ) const override { return applyIJT(in);}
/// @brief Return the transpose of the inverse Jacobian (Identity for TranslationMap)
/// of the map applied to @c in
Vec3d applyIJT(const Vec3d& in) const override {return in;}
/// Return the Jacobian Curvature: zero for a linear map
Mat3d applyIJC(const Mat3d& mat) const override {return mat;}
Mat3d applyIJC(const Mat3d& mat, const Vec3d&, const Vec3d&) const override {
return applyIJC(mat);
}
/// Return @c 1
double determinant(const Vec3d& ) const override { return determinant(); }
/// Return @c 1
double determinant() const override { return 1.0; }
/// Return (1,1,1).
Vec3d voxelSize() const override { return Vec3d(1,1,1);}
/// Return (1,1,1).
Vec3d voxelSize(const Vec3d&) const override { return voxelSize();}
/// Return the translation vector
const Vec3d& getTranslation() const { return mTranslation; }
/// read serialization
void read(std::istream& is) override { mTranslation.read(is); }
/// write serialization
void write(std::ostream& os) const override { mTranslation.write(os); }
/// string serialization, useful for debuging
std::string str() const override
{
std::ostringstream buffer;
buffer << " - translation: " << mTranslation << std::endl;
return buffer.str();
}
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const TranslationMap& other) const
{
// ::eq() uses a tolerance
return mTranslation.eq(other.mTranslation);
}
bool operator!=(const TranslationMap& other) const { return !(*this == other); }
/// Return AffineMap::Ptr to an AffineMap equivalent to *this
AffineMap::Ptr getAffineMap() const override
{
Mat4d matrix(Mat4d::identity());
matrix.setTranslation(mTranslation);
AffineMap::Ptr affineMap(new AffineMap(matrix));
return affineMap;
}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the appropriate operation.
MapBase::Ptr preRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
return MapBase::Ptr(new TranslationMap(t + mTranslation));
}
MapBase::Ptr preScale(const Vec3d& v) const override;
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of postfixing the appropriate operation.
MapBase::Ptr postRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr postTranslate(const Vec3d& t) const override
{ // post and pre are the same for this
return MapBase::Ptr(new TranslationMap(t + mTranslation));
}
MapBase::Ptr postScale(const Vec3d& v) const override;
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
private:
Vec3d mTranslation;
}; // class TranslationMap
////////////////////////////////////////
/// @brief A specialized Affine transform that scales along the principal axis
/// the scaling need not be uniform in the three-directions, and then
/// translates the result.
/// @note This class is not marked final because UniformScaleMap inherits from it,
/// so some of the member methods are marked final instead.
class OPENVDB_API ScaleTranslateMap: public MapBase
{
public:
using Ptr = SharedPtr<ScaleTranslateMap>;
using ConstPtr = SharedPtr<const ScaleTranslateMap>;
ScaleTranslateMap():
MapBase(),
mTranslation(Vec3d(0,0,0)),
mScaleValues(Vec3d(1,1,1)),
mVoxelSize(Vec3d(1,1,1)),
mScaleValuesInverse(Vec3d(1,1,1)),
mInvScaleSqr(1,1,1),
mInvTwiceScale(0.5,0.5,0.5)
{
}
ScaleTranslateMap(const Vec3d& scale, const Vec3d& translate):
MapBase(),
mTranslation(translate),
mScaleValues(scale),
mVoxelSize(std::abs(scale(0)), std::abs(scale(1)), std::abs(scale(2)))
{
const double determinant = scale[0]* scale[1] * scale[2];
if (std::abs(determinant) < 3.0 * math::Tolerance<double>::value()) {
OPENVDB_THROW(ArithmeticError, "Non-zero scale values required");
}
mScaleValuesInverse = 1.0 / mScaleValues;
mInvScaleSqr = mScaleValuesInverse * mScaleValuesInverse;
mInvTwiceScale = mScaleValuesInverse / 2;
}
ScaleTranslateMap(const ScaleMap& scale, const TranslationMap& translate):
MapBase(),
mTranslation(translate.getTranslation()),
mScaleValues(scale.getScale()),
mVoxelSize(std::abs(mScaleValues(0)),
std::abs(mScaleValues(1)),
std::abs(mScaleValues(2))),
mScaleValuesInverse(1.0 / scale.getScale())
{
mInvScaleSqr = mScaleValuesInverse * mScaleValuesInverse;
mInvTwiceScale = mScaleValuesInverse / 2;
}
ScaleTranslateMap(const ScaleTranslateMap& other):
MapBase(),
mTranslation(other.mTranslation),
mScaleValues(other.mScaleValues),
mVoxelSize(other.mVoxelSize),
mScaleValuesInverse(other.mScaleValuesInverse),
mInvScaleSqr(other.mInvScaleSqr),
mInvTwiceScale(other.mInvTwiceScale)
{}
~ScaleTranslateMap() override = default;
/// Return a MapBase::Ptr to a new ScaleTranslateMap
static MapBase::Ptr create() { return MapBase::Ptr(new ScaleTranslateMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new ScaleTranslateMap(*this)); }
MapBase::Ptr inverseMap() const override
{
return MapBase::Ptr(new ScaleTranslateMap(
mScaleValuesInverse, -mScaleValuesInverse * mTranslation));
}
static bool isRegistered() { return MapRegistry::isRegistered(ScaleTranslateMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
ScaleTranslateMap::mapType(),
ScaleTranslateMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("ScaleTranslateMap"); }
/// Return @c true (a ScaleTranslateMap is always linear).
bool isLinear() const OPENVDB_MAP_FUNC_SPECIFIER { return true; }
/// @brief Return @c true if the scale values have the same magnitude
/// (eg. -1, 1, -1 would be a rotation).
bool hasUniformScale() const OPENVDB_MAP_FUNC_SPECIFIER
{
bool value = isApproxEqual(
std::abs(mScaleValues.x()), std::abs(mScaleValues.y()), double(5e-7));
value = value && isApproxEqual(
std::abs(mScaleValues.x()), std::abs(mScaleValues.z()), double(5e-7));
return value;
}
/// Return the image of @c under the map
Vec3d applyMap(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
return Vec3d(
in.x() * mScaleValues.x() + mTranslation.x(),
in.y() * mScaleValues.y() + mTranslation.y(),
in.z() * mScaleValues.z() + mTranslation.z());
}
/// Return the pre-image of @c under the map
Vec3d applyInverseMap(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
return Vec3d(
(in.x() - mTranslation.x() ) * mScaleValuesInverse.x(),
(in.y() - mTranslation.y() ) * mScaleValuesInverse.y(),
(in.z() - mTranslation.z() ) * mScaleValuesInverse.z());
}
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyJacobian(in);
}
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER { return in * mScaleValues; }
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyInverseJacobian(in);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER {
return in * mScaleValuesInverse;
}
/// @brief Return the Jacobian Transpose of the map applied to @a in.
/// @details This tranforms range-space gradients to domain-space gradients
Vec3d applyJT(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return applyJT(in); }
/// Return the Jacobian Transpose of the map applied to @a in.
Vec3d applyJT(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER { return applyJacobian(in); }
/// @brief Return the transpose of the inverse Jacobian of the map applied to @a in
/// @details Ignores second argument
Vec3d applyIJT(const Vec3d& in, const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyIJT(in);
}
/// Return the transpose of the inverse Jacobian of the map applied to @c in
Vec3d applyIJT(const Vec3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
return Vec3d(
in.x() * mScaleValuesInverse.x(),
in.y() * mScaleValuesInverse.y(),
in.z() * mScaleValuesInverse.z());
}
/// Return the Jacobian Curvature: zero for a linear map
Mat3d applyIJC(const Mat3d& in) const OPENVDB_MAP_FUNC_SPECIFIER
{
Mat3d tmp;
for (int i=0; i<3; i++){
tmp.setRow(i, in.row(i)*mScaleValuesInverse(i));
}
for (int i=0; i<3; i++){
tmp.setCol(i, tmp.col(i)*mScaleValuesInverse(i));
}
return tmp;
}
Mat3d applyIJC(const Mat3d& in, const Vec3d&, const Vec3d& ) const OPENVDB_MAP_FUNC_SPECIFIER {
return applyIJC(in);
}
/// Return the product of the scale values, ignores argument
double determinant(const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return determinant(); }
/// Return the product of the scale values
double determinant() const OPENVDB_MAP_FUNC_SPECIFIER {
return mScaleValues.x() * mScaleValues.y() * mScaleValues.z();
}
/// Return the absolute values of the scale values
Vec3d voxelSize() const OPENVDB_MAP_FUNC_SPECIFIER { return mVoxelSize;}
/// Return the absolute values of the scale values, ignores argument
Vec3d voxelSize(const Vec3d&) const OPENVDB_MAP_FUNC_SPECIFIER { return voxelSize();}
/// Returns the scale values
const Vec3d& getScale() const { return mScaleValues; }
/// Returns the translation
const Vec3d& getTranslation() const { return mTranslation; }
/// Return the square of the scale. Used to optimize some finite difference calculations
const Vec3d& getInvScaleSqr() const {return mInvScaleSqr;}
/// Return 1/(2 scale). Used to optimize some finite difference calculations
const Vec3d& getInvTwiceScale() const {return mInvTwiceScale;}
/// Return 1/(scale)
const Vec3d& getInvScale() const {return mScaleValuesInverse; }
/// read serialization
void read(std::istream& is) override
{
mTranslation.read(is);
mScaleValues.read(is);
mVoxelSize.read(is);
mScaleValuesInverse.read(is);
mInvScaleSqr.read(is);
mInvTwiceScale.read(is);
}
/// write serialization
void write(std::ostream& os) const override
{
mTranslation.write(os);
mScaleValues.write(os);
mVoxelSize.write(os);
mScaleValuesInverse.write(os);
mInvScaleSqr.write(os);
mInvTwiceScale.write(os);
}
/// string serialization, useful for debuging
std::string str() const override
{
std::ostringstream buffer;
buffer << " - translation: " << mTranslation << std::endl;
buffer << " - scale: " << mScaleValues << std::endl;
buffer << " - voxel dimensions: " << mVoxelSize << std::endl;
return buffer.str();
}
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const ScaleTranslateMap& other) const
{
// ::eq() uses a tolerance
if (!mScaleValues.eq(other.mScaleValues)) { return false; }
if (!mTranslation.eq(other.mTranslation)) { return false; }
return true;
}
bool operator!=(const ScaleTranslateMap& other) const { return !(*this == other); }
/// Return AffineMap::Ptr to an AffineMap equivalent to *this
AffineMap::Ptr getAffineMap() const override
{
AffineMap::Ptr affineMap(new AffineMap(math::scale<Mat4d>(mScaleValues)));
affineMap->accumPostTranslation(mTranslation);
return affineMap;
}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the appropraite operation.
MapBase::Ptr preRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
const Vec3d& s = mScaleValues;
const Vec3d scaled_trans( t.x() * s.x(),
t.y() * s.y(),
t.z() * s.z() );
return MapBase::Ptr( new ScaleTranslateMap(mScaleValues, mTranslation + scaled_trans));
}
MapBase::Ptr preScale(const Vec3d& v) const override;
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
//@{
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of postfixing the appropraite operation.
MapBase::Ptr postRotate(double radians, Axis axis) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostRotation(axis, radians);
return simplify(affineMap);
}
MapBase::Ptr postTranslate(const Vec3d& t) const override
{
return MapBase::Ptr( new ScaleTranslateMap(mScaleValues, mTranslation + t));
}
MapBase::Ptr postScale(const Vec3d& v) const override;
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostShear(axis0, axis1, shear);
return simplify(affineMap);
}
//@}
private:
Vec3d mTranslation, mScaleValues, mVoxelSize, mScaleValuesInverse,
mInvScaleSqr, mInvTwiceScale;
}; // class ScaleTanslateMap
inline MapBase::Ptr
ScaleMap::postTranslate(const Vec3d& t) const
{
return MapBase::Ptr(new ScaleTranslateMap(mScaleValues, t));
}
inline MapBase::Ptr
ScaleMap::preTranslate(const Vec3d& t) const
{
const Vec3d& s = mScaleValues;
const Vec3d scaled_trans( t.x() * s.x(),
t.y() * s.y(),
t.z() * s.z() );
return MapBase::Ptr(new ScaleTranslateMap(mScaleValues, scaled_trans));
}
/// @brief A specialized Affine transform that uniformaly scales along the principal axis
/// and then translates the result.
/// @note This class is marked final with ABI=8
class OPENVDB_API UniformScaleTranslateMap OPENVDB_MAP_CLASS_SPECIFIER: public ScaleTranslateMap
{
public:
using Ptr = SharedPtr<UniformScaleTranslateMap>;
using ConstPtr = SharedPtr<const UniformScaleTranslateMap>;
UniformScaleTranslateMap():ScaleTranslateMap(Vec3d(1,1,1), Vec3d(0,0,0)) {}
UniformScaleTranslateMap(double scale, const Vec3d& translate):
ScaleTranslateMap(Vec3d(scale,scale,scale), translate) {}
UniformScaleTranslateMap(const UniformScaleMap& scale, const TranslationMap& translate):
ScaleTranslateMap(scale.getScale(), translate.getTranslation()) {}
UniformScaleTranslateMap(const UniformScaleTranslateMap& other):ScaleTranslateMap(other) {}
~UniformScaleTranslateMap() override = default;
/// Return a MapBase::Ptr to a new UniformScaleTranslateMap
static MapBase::Ptr create() { return MapBase::Ptr(new UniformScaleTranslateMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new UniformScaleTranslateMap(*this)); }
MapBase::Ptr inverseMap() const override
{
const Vec3d& scaleInv = getInvScale();
const Vec3d& trans = getTranslation();
return MapBase::Ptr(new UniformScaleTranslateMap(scaleInv[0], -scaleInv[0] * trans));
}
static bool isRegistered()
{
return MapRegistry::isRegistered(UniformScaleTranslateMap::mapType());
}
static void registerMap()
{
MapRegistry::registerMap(
UniformScaleTranslateMap::mapType(), UniformScaleTranslateMap::create);
}
Name type() const override { return mapType(); }
static Name mapType() { return Name("UniformScaleTranslateMap"); }
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const UniformScaleTranslateMap& other) const
{
return ScaleTranslateMap::operator==(other);
}
bool operator!=(const UniformScaleTranslateMap& other) const { return !(*this == other); }
/// @brief Return a MapBase::Ptr to a UniformScaleTranslateMap that is
/// the result of prepending translation on this map.
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
const double scale = this->getScale().x();
const Vec3d new_trans = this->getTranslation() + scale * t;
return MapBase::Ptr( new UniformScaleTranslateMap(scale, new_trans));
}
/// @brief Return a MapBase::Ptr to a UniformScaleTranslateMap that is
/// the result of postfixing translation on this map.
MapBase::Ptr postTranslate(const Vec3d& t) const override
{
const double scale = this->getScale().x();
return MapBase::Ptr( new UniformScaleTranslateMap(scale, this->getTranslation() + t));
}
}; // class UniformScaleTanslateMap
inline MapBase::Ptr
UniformScaleMap::postTranslate(const Vec3d& t) const
{
const double scale = this->getScale().x();
return MapBase::Ptr(new UniformScaleTranslateMap(scale, t));
}
inline MapBase::Ptr
UniformScaleMap::preTranslate(const Vec3d& t) const
{
const double scale = this->getScale().x();
return MapBase::Ptr(new UniformScaleTranslateMap(scale, scale*t));
}
inline MapBase::Ptr
TranslationMap::preScale(const Vec3d& v) const
{
if (isApproxEqual(v[0],v[1]) && isApproxEqual(v[0],v[2])) {
return MapBase::Ptr(new UniformScaleTranslateMap(v[0], mTranslation));
} else {
return MapBase::Ptr(new ScaleTranslateMap(v, mTranslation));
}
}
inline MapBase::Ptr
TranslationMap::postScale(const Vec3d& v) const
{
if (isApproxEqual(v[0],v[1]) && isApproxEqual(v[0],v[2])) {
return MapBase::Ptr(new UniformScaleTranslateMap(v[0], v[0]*mTranslation));
} else {
const Vec3d trans(mTranslation.x()*v.x(),
mTranslation.y()*v.y(),
mTranslation.z()*v.z());
return MapBase::Ptr(new ScaleTranslateMap(v, trans));
}
}
inline MapBase::Ptr
ScaleTranslateMap::preScale(const Vec3d& v) const
{
const Vec3d new_scale( v * mScaleValues );
if (isApproxEqual(new_scale[0],new_scale[1]) && isApproxEqual(new_scale[0],new_scale[2])) {
return MapBase::Ptr( new UniformScaleTranslateMap(new_scale[0], mTranslation));
} else {
return MapBase::Ptr( new ScaleTranslateMap(new_scale, mTranslation));
}
}
inline MapBase::Ptr
ScaleTranslateMap::postScale(const Vec3d& v) const
{
const Vec3d new_scale( v * mScaleValues );
const Vec3d new_trans( mTranslation.x()*v.x(),
mTranslation.y()*v.y(),
mTranslation.z()*v.z() );
if (isApproxEqual(new_scale[0],new_scale[1]) && isApproxEqual(new_scale[0],new_scale[2])) {
return MapBase::Ptr( new UniformScaleTranslateMap(new_scale[0], new_trans));
} else {
return MapBase::Ptr( new ScaleTranslateMap(new_scale, new_trans));
}
}
////////////////////////////////////////
/// @brief A specialized linear transform that performs a unitary maping
/// i.e. rotation and or reflection.
/// @note This class is marked final with ABI=8
class OPENVDB_API UnitaryMap OPENVDB_MAP_CLASS_SPECIFIER: public MapBase
{
public:
using Ptr = SharedPtr<UnitaryMap>;
using ConstPtr = SharedPtr<const UnitaryMap>;
/// default constructor makes an Idenity.
UnitaryMap(): mAffineMap(Mat4d::identity())
{
}
UnitaryMap(const Vec3d& axis, double radians)
{
Mat3d matrix;
matrix.setToRotation(axis, radians);
mAffineMap = AffineMap(matrix);
}
UnitaryMap(Axis axis, double radians)
{
Mat4d matrix;
matrix.setToRotation(axis, radians);
mAffineMap = AffineMap(matrix);
}
UnitaryMap(const Mat3d& m)
{
// test that the mat3 is a rotation || reflection
if (!isUnitary(m)) {
OPENVDB_THROW(ArithmeticError, "Matrix initializing unitary map was not unitary");
}
Mat4d matrix(Mat4d::identity());
matrix.setMat3(m);
mAffineMap = AffineMap(matrix);
}
UnitaryMap(const Mat4d& m)
{
if (!isInvertible(m)) {
OPENVDB_THROW(ArithmeticError,
"4x4 Matrix initializing unitary map was not unitary: not invertible");
}
if (!isAffine(m)) {
OPENVDB_THROW(ArithmeticError,
"4x4 Matrix initializing unitary map was not unitary: not affine");
}
if (hasTranslation(m)) {
OPENVDB_THROW(ArithmeticError,
"4x4 Matrix initializing unitary map was not unitary: had translation");
}
if (!isUnitary(m.getMat3())) {
OPENVDB_THROW(ArithmeticError,
"4x4 Matrix initializing unitary map was not unitary");
}
mAffineMap = AffineMap(m);
}
UnitaryMap(const UnitaryMap& other):
MapBase(other),
mAffineMap(other.mAffineMap)
{
}
UnitaryMap(const UnitaryMap& first, const UnitaryMap& second):
mAffineMap(*(first.getAffineMap()), *(second.getAffineMap()))
{
}
~UnitaryMap() override = default;
/// Return a MapBase::Ptr to a new UnitaryMap
static MapBase::Ptr create() { return MapBase::Ptr(new UnitaryMap()); }
/// Returns a MapBase::Ptr to a deep copy of *this
MapBase::Ptr copy() const override { return MapBase::Ptr(new UnitaryMap(*this)); }
MapBase::Ptr inverseMap() const override
{
return MapBase::Ptr(new UnitaryMap(mAffineMap.getMat4().inverse()));
}
static bool isRegistered() { return MapRegistry::isRegistered(UnitaryMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
UnitaryMap::mapType(),
UnitaryMap::create);
}
/// Return @c UnitaryMap
Name type() const override { return mapType(); }
/// Return @c UnitaryMap
static Name mapType() { return Name("UnitaryMap"); }
/// Return @c true (a UnitaryMap is always linear).
bool isLinear() const override { return true; }
/// Return @c false (by convention true)
bool hasUniformScale() const override { return true; }
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const UnitaryMap& other) const
{
// compare underlying linear map.
if (mAffineMap!=other.mAffineMap) return false;
return true;
}
bool operator!=(const UnitaryMap& other) const { return !(*this == other); }
/// Return the image of @c in under the map
Vec3d applyMap(const Vec3d& in) const override { return mAffineMap.applyMap(in); }
/// Return the pre-image of @c in under the map
Vec3d applyInverseMap(const Vec3d& in) const override { return mAffineMap.applyInverseMap(in); }
Vec3d applyJacobian(const Vec3d& in, const Vec3d&) const override { return applyJacobian(in); }
/// Return the Jacobian of the map applied to @a in.
Vec3d applyJacobian(const Vec3d& in) const override { return mAffineMap.applyJacobian(in); }
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d&) const override {
return applyInverseJacobian(in);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const override {
return mAffineMap.applyInverseJacobian(in);
}
/// @brief Return the Jacobian Transpose of the map applied to @a in.
/// @details This tranforms range-space gradients to domain-space gradients
Vec3d applyJT(const Vec3d& in, const Vec3d&) const override { return applyJT(in); }
/// Return the Jacobian Transpose of the map applied to @a in.
Vec3d applyJT(const Vec3d& in) const override {
return applyInverseMap(in); // the transpose of the unitary map is its inverse
}
/// @brief Return the transpose of the inverse Jacobian of the map applied to @a in
/// @details Ignores second argument
Vec3d applyIJT(const Vec3d& in, const Vec3d& ) const override { return applyIJT(in);}
/// Return the transpose of the inverse Jacobian of the map applied to @c in
Vec3d applyIJT(const Vec3d& in) const override { return mAffineMap.applyIJT(in); }
/// Return the Jacobian Curvature: zero for a linear map
Mat3d applyIJC(const Mat3d& in) const override { return mAffineMap.applyIJC(in); }
Mat3d applyIJC(const Mat3d& in, const Vec3d&, const Vec3d& ) const override {
return applyIJC(in);
}
/// Return the determinant of the Jacobian, ignores argument
double determinant(const Vec3d&) const override { return determinant(); }
/// Return the determinant of the Jacobian
double determinant() const override { return mAffineMap.determinant(); }
/// @{
/// @brief Returns the lengths of the images of the segments
/// (0,0,0) − (1,0,0), (0,0,0) − (0,1,0) and (0,0,0) − (0,0,1).
Vec3d voxelSize() const override { return mAffineMap.voxelSize();}
Vec3d voxelSize(const Vec3d&) const override { return voxelSize();}
/// @}
/// read serialization
void read(std::istream& is) override
{
mAffineMap.read(is);
}
/// write serialization
void write(std::ostream& os) const override
{
mAffineMap.write(os);
}
/// string serialization, useful for debuging
std::string str() const override
{
std::ostringstream buffer;
buffer << mAffineMap.str();
return buffer.str();
}
/// Return AffineMap::Ptr to an AffineMap equivalent to *this
AffineMap::Ptr getAffineMap() const override {
return AffineMap::Ptr(new AffineMap(mAffineMap));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given rotation.
MapBase::Ptr preRotate(double radians, Axis axis) const override
{
UnitaryMap first(axis, radians);
UnitaryMap::Ptr unitaryMap(new UnitaryMap(first, *this));
return StaticPtrCast<MapBase, UnitaryMap>(unitaryMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given translation.
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreTranslation(t);
return simplify(affineMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given scale.
MapBase::Ptr preScale(const Vec3d& v) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreScale(v);
return simplify(affineMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given shear.
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPreShear(axis0, axis1, shear);
return simplify(affineMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given rotation.
MapBase::Ptr postRotate(double radians, Axis axis) const override
{
UnitaryMap second(axis, radians);
UnitaryMap::Ptr unitaryMap(new UnitaryMap(*this, second));
return StaticPtrCast<MapBase, UnitaryMap>(unitaryMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given translation.
MapBase::Ptr postTranslate(const Vec3d& t) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostTranslation(t);
return simplify(affineMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given scale.
MapBase::Ptr postScale(const Vec3d& v) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostScale(v);
return simplify(affineMap);
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given shear.
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
AffineMap::Ptr affineMap = getAffineMap();
affineMap->accumPostShear(axis0, axis1, shear);
return simplify(affineMap);
}
private:
AffineMap mAffineMap;
}; // class UnitaryMap
////////////////////////////////////////
/// @brief This map is composed of three steps.
/// First it will take a box of size (Lx X Ly X Lz) defined by a member data bounding box
/// and map it into a frustum with near plane (1 X Ly/Lx) and prescribed depth
/// Then this frustum is transformed by an internal second map: most often a uniform scale,
/// but other effects can be achieved by accumulating translation, shear and rotation: these
/// are all applied to the second map
/// @note This class is marked final with ABI=8
class OPENVDB_API NonlinearFrustumMap OPENVDB_MAP_CLASS_SPECIFIER: public MapBase
{
public:
using Ptr = SharedPtr<NonlinearFrustumMap>;
using ConstPtr = SharedPtr<const NonlinearFrustumMap>;
NonlinearFrustumMap():
MapBase(),
mBBox(Vec3d(0), Vec3d(1)),
mTaper(1),
mDepth(1)
{
init();
}
/// @brief Constructor that takes an index-space bounding box
/// to be mapped into a frustum with a given @a depth and @a taper
/// (defined as ratio of nearplane/farplane).
NonlinearFrustumMap(const BBoxd& bb, double taper, double depth):
MapBase(),mBBox(bb), mTaper(taper), mDepth(depth)
{
init();
}
/// @brief Constructor that takes an index-space bounding box
/// to be mapped into a frustum with a given @a depth and @a taper
/// (defined as ratio of nearplane/farplane).
/// @details This frustum is further modifed by the @a secondMap,
/// intended to be a simple translation and rotation and uniform scale
NonlinearFrustumMap(const BBoxd& bb, double taper, double depth,
const MapBase::Ptr& secondMap):
mBBox(bb), mTaper(taper), mDepth(depth)
{
if (!secondMap->isLinear() ) {
OPENVDB_THROW(ArithmeticError,
"The second map in the Frustum transfrom must be linear");
}
mSecondMap = *( secondMap->getAffineMap() );
init();
}
NonlinearFrustumMap(const NonlinearFrustumMap& other):
MapBase(),
mBBox(other.mBBox),
mTaper(other.mTaper),
mDepth(other.mDepth),
mSecondMap(other.mSecondMap),
mHasSimpleAffine(other.mHasSimpleAffine)
{
init();
}
/// @brief Constructor from a camera frustum
///
/// @param position the tip of the frustum (i.e., the camera's position).
/// @param direction a vector pointing from @a position toward the near plane.
/// @param up a non-unit vector describing the direction and extent of
/// the frustum's intersection on the near plane. Together,
/// @a up must be orthogonal to @a direction.
/// @param aspect the aspect ratio of the frustum intersection with near plane
/// defined as width / height
/// @param z_near,depth the distance from @a position along @a direction to the
/// near and far planes of the frustum.
/// @param x_count the number of voxels, aligned with @a left,
/// across the face of the frustum
/// @param z_count the number of voxels, aligned with @a direction,
/// between the near and far planes
NonlinearFrustumMap(const Vec3d& position,
const Vec3d& direction,
const Vec3d& up,
double aspect /* width / height */,
double z_near, double depth,
Coord::ValueType x_count, Coord::ValueType z_count) {
/// @todo check that depth > 0
/// @todo check up.length > 0
/// @todo check that direction dot up = 0
if (!(depth > 0)) {
OPENVDB_THROW(ArithmeticError,
"The frustum depth must be non-zero and positive");
}
if (!(up.length() > 0)) {
OPENVDB_THROW(ArithmeticError,
"The frustum height must be non-zero and positive");
}
if (!(aspect > 0)) {
OPENVDB_THROW(ArithmeticError,
"The frustum aspect ratio must be non-zero and positive");
}
if (!(isApproxEqual(up.dot(direction), 0.))) {
OPENVDB_THROW(ArithmeticError,
"The frustum up orientation must be perpendicular to into-frustum direction");
}
double near_plane_height = 2 * up.length();
double near_plane_width = aspect * near_plane_height;
Coord::ValueType y_count = static_cast<int>(Round(x_count / aspect));
mBBox = BBoxd(Vec3d(0,0,0), Vec3d(x_count, y_count, z_count));
mDepth = depth / near_plane_width; // depth non-dimensionalized on width
double gamma = near_plane_width / z_near;
mTaper = 1./(mDepth*gamma + 1.);
Vec3d direction_unit = direction;
direction_unit.normalize();
Mat4d r1(Mat4d::identity());
r1.setToRotation(/*from*/Vec3d(0,0,1), /*to */direction_unit);
Mat4d r2(Mat4d::identity());
Vec3d temp = r1.inverse().transform(up);
r2.setToRotation(/*from*/Vec3d(0,1,0), /*to*/temp );
Mat4d scale = math::scale<Mat4d>(
Vec3d(near_plane_width, near_plane_width, near_plane_width));
// move the near plane to origin, rotate to align with axis, and scale down
// T_inv * R1_inv * R2_inv * scale_inv
Mat4d mat = scale * r2 * r1;
mat.setTranslation(position + z_near*direction_unit);
mSecondMap = AffineMap(mat);
init();
}
~NonlinearFrustumMap() override = default;
/// Return a MapBase::Ptr to a new NonlinearFrustumMap
static MapBase::Ptr create() { return MapBase::Ptr(new NonlinearFrustumMap()); }
/// Return a MapBase::Ptr to a deep copy of this map
MapBase::Ptr copy() const override { return MapBase::Ptr(new NonlinearFrustumMap(*this)); }
/// @brief Not implemented, since there is currently no map type that can
/// represent the inverse of a frustum
/// @throw NotImplementedError
MapBase::Ptr inverseMap() const override
{
OPENVDB_THROW(NotImplementedError,
"inverseMap() is not implemented for NonlinearFrustumMap");
}
static bool isRegistered() { return MapRegistry::isRegistered(NonlinearFrustumMap::mapType()); }
static void registerMap()
{
MapRegistry::registerMap(
NonlinearFrustumMap::mapType(),
NonlinearFrustumMap::create);
}
/// Return @c NonlinearFrustumMap
Name type() const override { return mapType(); }
/// Return @c NonlinearFrustumMap
static Name mapType() { return Name("NonlinearFrustumMap"); }
/// Return @c false (a NonlinearFrustumMap is never linear).
bool isLinear() const override { return false; }
/// Return @c false (by convention false)
bool hasUniformScale() const override { return false; }
/// Return @c true if the map is equivalent to an identity
bool isIdentity() const
{
// The frustum can only be consistent with a linear map if the taper value is 1
if (!isApproxEqual(mTaper, double(1)) ) return false;
// There are various ways an identity can decomposed between the two parts of the
// map. Best to just check that the principle vectors are stationary.
const Vec3d e1(1,0,0);
if (!applyMap(e1).eq(e1)) return false;
const Vec3d e2(0,1,0);
if (!applyMap(e2).eq(e2)) return false;
const Vec3d e3(0,0,1);
if (!applyMap(e3).eq(e3)) return false;
return true;
}
bool isEqual(const MapBase& other) const override { return isEqualBase(*this, other); }
bool operator==(const NonlinearFrustumMap& other) const
{
if (mBBox!=other.mBBox) return false;
if (!isApproxEqual(mTaper, other.mTaper)) return false;
if (!isApproxEqual(mDepth, other.mDepth)) return false;
// Two linear transforms are equivalent iff they have the same translation
// and have the same affects on orthongal spanning basis check translation
Vec3d e(0,0,0);
if (!mSecondMap.applyMap(e).eq(other.mSecondMap.applyMap(e))) return false;
/// check spanning vectors
e(0) = 1;
if (!mSecondMap.applyMap(e).eq(other.mSecondMap.applyMap(e))) return false;
e(0) = 0;
e(1) = 1;
if (!mSecondMap.applyMap(e).eq(other.mSecondMap.applyMap(e))) return false;
e(1) = 0;
e(2) = 1;
if (!mSecondMap.applyMap(e).eq(other.mSecondMap.applyMap(e))) return false;
return true;
}
bool operator!=(const NonlinearFrustumMap& other) const { return !(*this == other); }
/// Return the image of @c in under the map
Vec3d applyMap(const Vec3d& in) const override
{
return mSecondMap.applyMap(applyFrustumMap(in));
}
/// Return the pre-image of @c in under the map
Vec3d applyInverseMap(const Vec3d& in) const override
{
return applyFrustumInverseMap(mSecondMap.applyInverseMap(in));
}
/// Return the Jacobian of the linear second map applied to @c in
Vec3d applyJacobian(const Vec3d& in) const override { return mSecondMap.applyJacobian(in); }
/// Return the Jacobian defined at @c isloc applied to @c in
Vec3d applyJacobian(const Vec3d& in, const Vec3d& isloc) const override
{
// Move the center of the x-face of the bbox
// to the origin in index space.
Vec3d centered(isloc);
centered = centered - mBBox.min();
centered.x() -= mXo;
centered.y() -= mYo;
// scale the z-direction on depth / K count
const double zprime = centered.z()*mDepthOnLz;
const double scale = (mGamma * zprime + 1.) / mLx;
const double scale2 = mGamma * mDepthOnLz / mLx;
const Vec3d tmp(scale * in.x() + scale2 * centered.x()* in.z(),
scale * in.y() + scale2 * centered.y()* in.z(),
mDepthOnLz * in.z());
return mSecondMap.applyJacobian(tmp);
}
/// @brief Return the Inverse Jacobian of the map applied to @a in
/// (i.e. inverse map with out translation)
Vec3d applyInverseJacobian(const Vec3d& in) const override {
return mSecondMap.applyInverseJacobian(in);
}
/// Return the Inverse Jacobian defined at @c isloc of the map applied to @a in.
Vec3d applyInverseJacobian(const Vec3d& in, const Vec3d& isloc) const override {
// Move the center of the x-face of the bbox
// to the origin in index space.
Vec3d centered(isloc);
centered = centered - mBBox.min();
centered.x() -= mXo;
centered.y() -= mYo;
// scale the z-direction on depth / K count
const double zprime = centered.z()*mDepthOnLz;
const double scale = (mGamma * zprime + 1.) / mLx;
const double scale2 = mGamma * mDepthOnLz / mLx;
Vec3d out = mSecondMap.applyInverseJacobian(in);
out.x() = (out.x() - scale2 * centered.x() * out.z() / mDepthOnLz) / scale;
out.y() = (out.y() - scale2 * centered.y() * out.z() / mDepthOnLz) / scale;
out.z() = out.z() / mDepthOnLz;
return out;
}
/// @brief Return the Jacobian Transpose of the map applied to vector @c in at @c indexloc.
/// @details This tranforms range-space gradients to domain-space gradients.
Vec3d applyJT(const Vec3d& in, const Vec3d& isloc) const override {
const Vec3d tmp = mSecondMap.applyJT(in);
// Move the center of the x-face of the bbox
// to the origin in index space.
Vec3d centered(isloc);
centered = centered - mBBox.min();
centered.x() -= mXo;
centered.y() -= mYo;
// scale the z-direction on depth / K count
const double zprime = centered.z()*mDepthOnLz;
const double scale = (mGamma * zprime + 1.) / mLx;
const double scale2 = mGamma * mDepthOnLz / mLx;
return Vec3d(scale * tmp.x(),
scale * tmp.y(),
scale2 * centered.x()* tmp.x() +
scale2 * centered.y()* tmp.y() +
mDepthOnLz * tmp.z());
}
/// Return the Jacobian Transpose of the second map applied to @c in.
Vec3d applyJT(const Vec3d& in) const override {
return mSecondMap.applyJT(in);
}
/// Return the transpose of the inverse Jacobian of the linear second map applied to @c in
Vec3d applyIJT(const Vec3d& in) const override { return mSecondMap.applyIJT(in); }
// the Jacobian of the nonlinear part of the transform is a sparse matrix
// Jacobian^(-T) =
//
// (Lx)( 1/s 0 0 )
// ( 0 1/s 0 )
// ( -(x-xo)g/(sLx) -(y-yo)g/(sLx) Lz/(Depth Lx) )
/// Return the transpose of the inverse Jacobain (at @c locW applied to @c in.
/// @c ijk is the location in the pre-image space (e.g. index space)
Vec3d applyIJT(const Vec3d& d1_is, const Vec3d& ijk) const override
{
const Vec3d loc = applyFrustumMap(ijk);
const double s = mGamma * loc.z() + 1.;
// verify that we aren't at the singularity
if (isApproxEqual(s, 0.)) {
OPENVDB_THROW(ArithmeticError, "Tried to evaluate the frustum transform"
" at the singular focal point (e.g. camera)");
}
const double sinv = 1.0/s; // 1/(z*gamma + 1)
const double pt0 = mLx * sinv; // Lx / (z*gamma +1)
const double pt1 = mGamma * pt0; // gamma * Lx / ( z*gamma +1)
const double pt2 = pt1 * sinv; // gamma * Lx / ( z*gamma +1)**2
const Mat3d& jacinv = mSecondMap.getConstJacobianInv();
// compute \frac{\partial E_i}{\partial x_j}
Mat3d gradE(Mat3d::zero());
for (int j = 0; j < 3; ++j ) {
gradE(0,j) = pt0 * jacinv(0,j) - pt2 * loc.x()*jacinv(2,j);
gradE(1,j) = pt0 * jacinv(1,j) - pt2 * loc.y()*jacinv(2,j);
gradE(2,j) = (1./mDepthOnLz) * jacinv(2,j);
}
Vec3d result;
for (int i = 0; i < 3; ++i) {
result(i) = d1_is(0) * gradE(0,i) + d1_is(1) * gradE(1,i) + d1_is(2) * gradE(2,i);
}
return result;
}
/// Return the Jacobian Curvature for the linear second map
Mat3d applyIJC(const Mat3d& in) const override { return mSecondMap.applyIJC(in); }
/// Return the Jacobian Curvature: all the second derivatives in range space
/// @param d2_is second derivative matrix computed in index space
/// @param d1_is gradient computed in index space
/// @param ijk the index space location where the result is computed
Mat3d applyIJC(const Mat3d& d2_is, const Vec3d& d1_is, const Vec3d& ijk) const override
{
const Vec3d loc = applyFrustumMap(ijk);
const double s = mGamma * loc.z() + 1.;
// verify that we aren't at the singularity
if (isApproxEqual(s, 0.)) {
OPENVDB_THROW(ArithmeticError, "Tried to evaluate the frustum transform"
" at the singular focal point (e.g. camera)");
}
// precompute
const double sinv = 1.0/s; // 1/(z*gamma + 1)
const double pt0 = mLx * sinv; // Lx / (z*gamma +1)
const double pt1 = mGamma * pt0; // gamma * Lx / ( z*gamma +1)
const double pt2 = pt1 * sinv; // gamma * Lx / ( z*gamma +1)**2
const double pt3 = pt2 * sinv; // gamma * Lx / ( z*gamma +1)**3
const Mat3d& jacinv = mSecondMap.getConstJacobianInv();
// compute \frac{\partial^2 E_i}{\partial x_j \partial x_k}
Mat3d matE0(Mat3d::zero());
Mat3d matE1(Mat3d::zero()); // matE2 = 0
for(int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
const double pt4 = 2. * jacinv(2,j) * jacinv(2,k) * pt3;
matE0(j,k) = -(jacinv(0,j) * jacinv(2,k) + jacinv(2,j) * jacinv(0,k)) * pt2 +
pt4 * loc.x();
matE1(j,k) = -(jacinv(1,j) * jacinv(2,k) + jacinv(2,j) * jacinv(1,k)) * pt2 +
pt4 * loc.y();
}
}
// compute \frac{\partial E_i}{\partial x_j}
Mat3d gradE(Mat3d::zero());
for (int j = 0; j < 3; ++j ) {
gradE(0,j) = pt0 * jacinv(0,j) - pt2 * loc.x()*jacinv(2,j);
gradE(1,j) = pt0 * jacinv(1,j) - pt2 * loc.y()*jacinv(2,j);
gradE(2,j) = (1./mDepthOnLz) * jacinv(2,j);
}
Mat3d result(Mat3d::zero());
// compute \fac{\partial E_j}{\partial x_m} \fac{\partial E_i}{\partial x_n}
// \frac{\partial^2 input}{\partial E_i \partial E_j}
for (int m = 0; m < 3; ++m ) {
for ( int n = 0; n < 3; ++n) {
for (int i = 0; i < 3; ++i ) {
for (int j = 0; j < 3; ++j) {
result(m, n) += gradE(j, m) * gradE(i, n) * d2_is(i, j);
}
}
}
}
for (int m = 0; m < 3; ++m ) {
for ( int n = 0; n < 3; ++n) {
result(m, n) +=
matE0(m, n) * d1_is(0) + matE1(m, n) * d1_is(1);// + matE2(m, n) * d1_is(2);
}
}
return result;
}
/// Return the determinant of the Jacobian of linear second map
double determinant() const override {return mSecondMap.determinant();} // no implementation
/// Return the determinate of the Jacobian evaluated at @c loc
/// @c loc is a location in the pre-image space (e.g., index space)
double determinant(const Vec3d& loc) const override
{
double s = mGamma * loc.z() + 1.0;
double frustum_determinant = s * s * mDepthOnLzLxLx;
return mSecondMap.determinant() * frustum_determinant;
}
/// Return the size of a voxel at the center of the near plane
Vec3d voxelSize() const override
{
const Vec3d loc( 0.5*(mBBox.min().x() + mBBox.max().x()),
0.5*(mBBox.min().y() + mBBox.max().y()),
mBBox.min().z());
return voxelSize(loc);
}
/// @brief Returns the lengths of the images of the three segments
/// from @a loc to @a loc + (1,0,0), from @a loc to @a loc + (0,1,0)
/// and from @a loc to @a loc + (0,0,1)
/// @param loc a location in the pre-image space (e.g., index space)
Vec3d voxelSize(const Vec3d& loc) const override
{
Vec3d out, pos = applyMap(loc);
out(0) = (applyMap(loc + Vec3d(1,0,0)) - pos).length();
out(1) = (applyMap(loc + Vec3d(0,1,0)) - pos).length();
out(2) = (applyMap(loc + Vec3d(0,0,1)) - pos).length();
return out;
}
AffineMap::Ptr getAffineMap() const override { return mSecondMap.getAffineMap(); }
/// set the taper value, the ratio of nearplane width / far plane width
void setTaper(double t) { mTaper = t; init();}
/// Return the taper value.
double getTaper() const { return mTaper; }
/// set the frustum depth: distance between near and far plane = frustm depth * frustm x-width
void setDepth(double d) { mDepth = d; init();}
/// Return the unscaled frustm depth
double getDepth() const { return mDepth; }
// gamma a non-dimensional number: nearplane x-width / camera to near plane distance
double getGamma() const { return mGamma; }
/// Return the bounding box that defines the frustum in pre-image space
const BBoxd& getBBox() const { return mBBox; }
/// Return MapBase::Ptr& to the second map
const AffineMap& secondMap() const { return mSecondMap; }
/// Return @c true if the the bounding box in index space that defines the region that
/// is maped into the frustum is non-zero, otherwise @c false
bool isValid() const { return !mBBox.empty();}
/// Return @c true if the second map is a uniform scale, Rotation and translation
bool hasSimpleAffine() const { return mHasSimpleAffine; }
/// read serialization
void read(std::istream& is) override
{
// for backward compatibility with earlier version
if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_FLOAT_FRUSTUM_BBOX ) {
CoordBBox bb;
bb.read(is);
mBBox = BBoxd(bb.min().asVec3d(), bb.max().asVec3d());
} else {
mBBox.read(is);
}
is.read(reinterpret_cast<char*>(&mTaper), sizeof(double));
is.read(reinterpret_cast<char*>(&mDepth), sizeof(double));
// Read the second maps type.
Name type = readString(is);
// Check if the map has been registered.
if(!MapRegistry::isRegistered(type)) {
OPENVDB_THROW(KeyError, "Map " << type << " is not registered");
}
// Create the second map of the type and then read it in.
MapBase::Ptr proxy = math::MapRegistry::createMap(type);
proxy->read(is);
mSecondMap = *(proxy->getAffineMap());
init();
}
/// write serialization
void write(std::ostream& os) const override
{
mBBox.write(os);
os.write(reinterpret_cast<const char*>(&mTaper), sizeof(double));
os.write(reinterpret_cast<const char*>(&mDepth), sizeof(double));
writeString(os, mSecondMap.type());
mSecondMap.write(os);
}
/// string serialization, useful for debuging
std::string str() const override
{
std::ostringstream buffer;
buffer << " - taper: " << mTaper << std::endl;
buffer << " - depth: " << mDepth << std::endl;
buffer << " SecondMap: "<< mSecondMap.type() << std::endl;
buffer << mSecondMap.str() << std::endl;
return buffer.str();
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given rotation to the linear part of this map
MapBase::Ptr preRotate(double radians, Axis axis = X_AXIS) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.preRotate(radians, axis)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given translation to the linear part of this map
MapBase::Ptr preTranslate(const Vec3d& t) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.preTranslate(t)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given scale to the linear part of this map
MapBase::Ptr preScale(const Vec3d& s) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.preScale(s)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of prepending the given shear to the linear part of this map
MapBase::Ptr preShear(double shear, Axis axis0, Axis axis1) const override
{
return MapBase::Ptr(new NonlinearFrustumMap(
mBBox, mTaper, mDepth, mSecondMap.preShear(shear, axis0, axis1)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given rotation to the linear part of this map.
MapBase::Ptr postRotate(double radians, Axis axis = X_AXIS) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.postRotate(radians, axis)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given translation to the linear part of this map.
MapBase::Ptr postTranslate(const Vec3d& t) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.postTranslate(t)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given scale to the linear part of this map.
MapBase::Ptr postScale(const Vec3d& s) const override
{
return MapBase::Ptr(
new NonlinearFrustumMap(mBBox, mTaper, mDepth, mSecondMap.postScale(s)));
}
/// @brief Return a MapBase::Ptr to a new map that is the result
/// of appending the given shear to the linear part of this map.
MapBase::Ptr postShear(double shear, Axis axis0, Axis axis1) const override
{
return MapBase::Ptr(new NonlinearFrustumMap(
mBBox, mTaper, mDepth, mSecondMap.postShear(shear, axis0, axis1)));
}
private:
void init()
{
// set up as a frustum
mLx = mBBox.extents().x();
mLy = mBBox.extents().y();
mLz = mBBox.extents().z();
if (isApproxEqual(mLx,0.) || isApproxEqual(mLy,0.) || isApproxEqual(mLz,0.) ) {
OPENVDB_THROW(ArithmeticError, "The index space bounding box"
" must have at least two index points in each direction.");
}
mXo = 0.5* mLx;
mYo = 0.5* mLy;
// mDepth is non-dimensionalized on near
mGamma = (1./mTaper - 1) / mDepth;
mDepthOnLz = mDepth/mLz;
mDepthOnLzLxLx = mDepthOnLz/(mLx * mLx);
/// test for shear and non-uniform scale
mHasSimpleAffine = true;
Vec3d tmp = mSecondMap.voxelSize();
/// false if there is non-uniform scale
if (!isApproxEqual(tmp(0), tmp(1))) { mHasSimpleAffine = false; return; }
if (!isApproxEqual(tmp(0), tmp(2))) { mHasSimpleAffine = false; return; }
Vec3d trans = mSecondMap.applyMap(Vec3d(0,0,0));
/// look for shear
Vec3d tmp1 = mSecondMap.applyMap(Vec3d(1,0,0)) - trans;
Vec3d tmp2 = mSecondMap.applyMap(Vec3d(0,1,0)) - trans;
Vec3d tmp3 = mSecondMap.applyMap(Vec3d(0,0,1)) - trans;
/// false if there is shear
if (!isApproxEqual(tmp1.dot(tmp2), 0., 1.e-7)) { mHasSimpleAffine = false; return; }
if (!isApproxEqual(tmp2.dot(tmp3), 0., 1.e-7)) { mHasSimpleAffine = false; return; }
if (!isApproxEqual(tmp3.dot(tmp1), 0., 1.e-7)) { mHasSimpleAffine = false; return; }
}
Vec3d applyFrustumMap(const Vec3d& in) const
{
// Move the center of the x-face of the bbox
// to the origin in index space.
Vec3d out(in);
out = out - mBBox.min();
out.x() -= mXo;
out.y() -= mYo;
// scale the z-direction on depth / K count
out.z() *= mDepthOnLz;
double scale = (mGamma * out.z() + 1.)/ mLx;
// scale the x-y on the length I count and apply tapper
out.x() *= scale ;
out.y() *= scale ;
return out;
}
Vec3d applyFrustumInverseMap(const Vec3d& in) const
{
// invert taper and resize: scale = 1/( (z+1)/2 (mt-1) + 1)
Vec3d out(in);
double invScale = mLx / (mGamma * out.z() + 1.);
out.x() *= invScale;
out.y() *= invScale;
out.x() += mXo;
out.y() += mYo;
out.z() /= mDepthOnLz;
// move back
out = out + mBBox.min();
return out;
}
// bounding box in index space used in Frustum transforms.
BBoxd mBBox;
// taper value used in constructing Frustums.
double mTaper;
double mDepth;
// defines the second map
AffineMap mSecondMap;
// these are derived from the above.
double mLx, mLy, mLz;
double mXo, mYo, mGamma, mDepthOnLz, mDepthOnLzLxLx;
// true: if the mSecondMap is linear and has no shear, and has no non-uniform scale
bool mHasSimpleAffine;
}; // class NonlinearFrustumMap
////////////////////////////////////////
/// @brief Creates the composition of two maps, each of which could be a composition.
/// In the case that each component of the composition classified as linear an
/// acceleration AffineMap is stored.
template<typename FirstMapType, typename SecondMapType>
class CompoundMap
{
public:
using MyType = CompoundMap<FirstMapType, SecondMapType>;
using Ptr = SharedPtr<MyType>;
using ConstPtr = SharedPtr<const MyType>;
CompoundMap() { updateAffineMatrix(); }
CompoundMap(const FirstMapType& f, const SecondMapType& s): mFirstMap(f), mSecondMap(s)
{
updateAffineMatrix();
}
CompoundMap(const MyType& other):
mFirstMap(other.mFirstMap),
mSecondMap(other.mSecondMap),
mAffineMap(other.mAffineMap)
{}
Name type() const { return mapType(); }
static Name mapType()
{
return (FirstMapType::mapType() + Name(":") + SecondMapType::mapType());
}
bool operator==(const MyType& other) const
{
if (mFirstMap != other.mFirstMap) return false;
if (mSecondMap != other.mSecondMap) return false;
if (mAffineMap != other.mAffineMap) return false;
return true;
}
bool operator!=(const MyType& other) const { return !(*this == other); }
MyType& operator=(const MyType& other)
{
mFirstMap = other.mFirstMap;
mSecondMap = other.mSecondMap;
mAffineMap = other.mAffineMap;
return *this;
}
bool isIdentity() const
{
if (is_linear<MyType>::value) {
return mAffineMap.isIdentity();
} else {
return mFirstMap.isIdentity()&&mSecondMap.isIdentity();
}
}
bool isDiagonal() const {
if (is_linear<MyType>::value) {
return mAffineMap.isDiagonal();
} else {
return mFirstMap.isDiagonal()&&mSecondMap.isDiagonal();
}
}
AffineMap::Ptr getAffineMap() const
{
if (is_linear<MyType>::value) {
AffineMap::Ptr affine(new AffineMap(mAffineMap));
return affine;
} else {
OPENVDB_THROW(ArithmeticError,
"Constant affine matrix representation not possible for this nonlinear map");
}
}
// direct decompotion
const FirstMapType& firstMap() const { return mFirstMap; }
const SecondMapType& secondMap() const {return mSecondMap; }
void setFirstMap(const FirstMapType& first) { mFirstMap = first; updateAffineMatrix(); }
void setSecondMap(const SecondMapType& second) { mSecondMap = second; updateAffineMatrix(); }
void read(std::istream& is)
{
mAffineMap.read(is);
mFirstMap.read(is);
mSecondMap.read(is);
}
void write(std::ostream& os) const
{
mAffineMap.write(os);
mFirstMap.write(os);
mSecondMap.write(os);
}
private:
void updateAffineMatrix()
{
if (is_linear<MyType>::value) {
// both maps need to be linear, these methods are only defined for linear maps
AffineMap::Ptr first = mFirstMap.getAffineMap();
AffineMap::Ptr second= mSecondMap.getAffineMap();
mAffineMap = AffineMap(*first, *second);
}
}
FirstMapType mFirstMap;
SecondMapType mSecondMap;
// used for acceleration
AffineMap mAffineMap;
}; // class CompoundMap
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_MAPS_HAS_BEEN_INCLUDED
| 100,968 | C | 36.025669 | 105 | 0.635637 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Proximity.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Proximity.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
OPENVDB_API Vec3d
closestPointOnTriangleToPoint(
const Vec3d& a, const Vec3d& b, const Vec3d& c, const Vec3d& p, Vec3d& uvw)
{
uvw.setZero();
// degenerate triangle, singular
if ((isApproxEqual(a, b) && isApproxEqual(a, c))) {
uvw[0] = 1.0;
return a;
}
Vec3d ab = b - a, ac = c - a, ap = p - a;
double d1 = ab.dot(ap), d2 = ac.dot(ap);
// degenerate triangle edges
if (isApproxEqual(a, b)) {
double t = 0.0;
Vec3d cp = closestPointOnSegmentToPoint(a, c, p, t);
uvw[0] = 1.0 - t;
uvw[2] = t;
return cp;
} else if (isApproxEqual(a, c) || isApproxEqual(b, c)) {
double t = 0.0;
Vec3d cp = closestPointOnSegmentToPoint(a, b, p, t);
uvw[0] = 1.0 - t;
uvw[1] = t;
return cp;
}
if (d1 <= 0.0 && d2 <= 0.0) {
uvw[0] = 1.0;
return a; // barycentric coordinates (1,0,0)
}
// Check if P in vertex region outside B
Vec3d bp = p - b;
double d3 = ab.dot(bp), d4 = ac.dot(bp);
if (d3 >= 0.0 && d4 <= d3) {
uvw[1] = 1.0;
return b; // barycentric coordinates (0,1,0)
}
// Check if P in edge region of AB, if so return projection of P onto AB
double vc = d1 * d4 - d3 * d2;
if (vc <= 0.0 && d1 >= 0.0 && d3 <= 0.0) {
uvw[1] = d1 / (d1 - d3);
uvw[0] = 1.0 - uvw[1];
return a + uvw[1] * ab; // barycentric coordinates (1-v,v,0)
}
// Check if P in vertex region outside C
Vec3d cp = p - c;
double d5 = ab.dot(cp), d6 = ac.dot(cp);
if (d6 >= 0.0 && d5 <= d6) {
uvw[2] = 1.0;
return c; // barycentric coordinates (0,0,1)
}
// Check if P in edge region of AC, if so return projection of P onto AC
double vb = d5 * d2 - d1 * d6;
if (vb <= 0.0 && d2 >= 0.0 && d6 <= 0.0) {
uvw[2] = d2 / (d2 - d6);
uvw[0] = 1.0 - uvw[2];
return a + uvw[2] * ac; // barycentric coordinates (1-w,0,w)
}
// Check if P in edge region of BC, if so return projection of P onto BC
double va = d3*d6 - d5*d4;
if (va <= 0.0 && (d4 - d3) >= 0.0 && (d5 - d6) >= 0.0) {
uvw[2] = (d4 - d3) / ((d4 - d3) + (d5 - d6));
uvw[1] = 1.0 - uvw[2];
return b + uvw[2] * (c - b); // barycentric coordinates (0,1-w,w)
}
// P inside face region. Compute Q through its barycentric coordinates (u,v,w)
double denom = 1.0 / (va + vb + vc);
uvw[2] = vc * denom;
uvw[1] = vb * denom;
uvw[0] = 1.0 - uvw[1] - uvw[2];
return a + ab*uvw[1] + ac*uvw[2]; // = u*a + v*b + w*c , u= va*denom = 1.0-v-w
}
OPENVDB_API Vec3d
closestPointOnSegmentToPoint(const Vec3d& a, const Vec3d& b, const Vec3d& p, double& t)
{
Vec3d ab = b - a;
t = (p - a).dot(ab);
if (t <= 0.0) {
// c projects outside the [a,b] interval, on the a side.
t = 0.0;
return a;
} else {
// always nonnegative since denom = ||ab||^2
double denom = ab.dot(ab);
if (t >= denom) {
// c projects outside the [a,b] interval, on the b side.
t = 1.0;
return b;
} else {
// c projects inside the [a,b] interval.
t = t / denom;
return a + (ab * t);
}
}
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 3,578 | C++ | 26.113636 | 87 | 0.508944 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Vec2.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_VEC2_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_VEC2_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include "Math.h"
#include "Tuple.h"
#include <algorithm>
#include <cmath>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Mat2;
template<typename T>
class Vec2: public Tuple<2, T>
{
public:
using value_type = T;
using ValueType = T;
/// Trivial constructor, the vector is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Vec2() = default;
#else
Vec2() {}
#endif
/// @brief Construct a vector all of whose components have the given value.
explicit Vec2(T val) { this->mm[0] = this->mm[1] = val; }
/// Constructor with two arguments, e.g. Vec2f v(1,2,3);
Vec2(T x, T y)
{
this->mm[0] = x;
this->mm[1] = y;
}
/// Constructor with array argument, e.g. float a[2]; Vec2f v(a);
template <typename Source>
Vec2(Source *a)
{
this->mm[0] = static_cast<T>(a[0]);
this->mm[1] = static_cast<T>(a[1]);
} // trivial
/// Conversion constructor
template<typename Source>
explicit Vec2(const Tuple<2, Source> &t)
{
this->mm[0] = static_cast<T>(t[0]);
this->mm[1] = static_cast<T>(t[1]);
}
/// @brief Construct a vector all of whose components have the given value,
/// which may be of an arithmetic type different from this vector's value type.
/// @details Type conversion warnings are suppressed.
template<typename Other>
explicit Vec2(Other val,
typename std::enable_if<std::is_arithmetic<Other>::value, Conversion>::type = Conversion{})
{
this->mm[0] = this->mm[1] = static_cast<T>(val);
}
/// Reference to the component, e.g. v.x() = 4.5f;
T& x() {return this->mm[0];}
T& y() {return this->mm[1];}
/// Get the component, e.g. float f = v.y();
T x() const {return this->mm[0];}
T y() const {return this->mm[1];}
/// Alternative indexed reference to the elements
T& operator()(int i) {return this->mm[i];}
/// Alternative indexed constant reference to the elements,
T operator()(int i) const {return this->mm[i];}
T* asPointer() {return this->mm;}
const T* asPointer() const {return this->mm;}
/// "this" vector gets initialized to [x, y, z],
/// calling v.init(); has same effect as calling v = Vec2::zero();
const Vec2<T>& init(T x=0, T y=0)
{
this->mm[0] = x; this->mm[1] = y;
return *this;
}
/// Set "this" vector to zero
const Vec2<T>& setZero()
{
this->mm[0] = 0; this->mm[1] = 0;
return *this;
}
/// Assignment operator
template<typename Source>
const Vec2<T>& operator=(const Vec2<Source> &v)
{
// note: don't static_cast because that suppresses warnings
this->mm[0] = v[0];
this->mm[1] = v[1];
return *this;
}
/// Equality operator, does exact floating point comparisons
bool operator==(const Vec2<T> &v) const
{
return (isExactlyEqual(this->mm[0], v.mm[0]) && isExactlyEqual(this->mm[1], v.mm[1]));
}
/// Inequality operator, does exact floating point comparisons
bool operator!=(const Vec2<T> &v) const { return !(*this==v); }
/// Test if "this" vector is equivalent to vector v with tolerance of eps
bool eq(const Vec2<T> &v, T eps = static_cast<T>(1.0e-7)) const
{
return isApproxEqual(this->mm[0], v.mm[0], eps) &&
isApproxEqual(this->mm[1], v.mm[1], eps);
} // trivial
/// Negation operator, for e.g. v1 = -v2;
Vec2<T> operator-() const {return Vec2<T>(-this->mm[0], -this->mm[1]);}
/// this = v1 + v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.add(v1,v);
template <typename T0, typename T1>
const Vec2<T>& add(const Vec2<T0> &v1, const Vec2<T1> &v2)
{
this->mm[0] = v1[0] + v2[0];
this->mm[1] = v1[1] + v2[1];
return *this;
}
/// this = v1 - v2
/// "this", v1 and v2 need not be distinct objects, e.g. v.sub(v1,v);
template <typename T0, typename T1>
const Vec2<T>& sub(const Vec2<T0> &v1, const Vec2<T1> &v2)
{
this->mm[0] = v1[0] - v2[0];
this->mm[1] = v1[1] - v2[1];
return *this;
}
/// this = scalar*v, v need not be a distinct object from "this",
/// e.g. v.scale(1.5,v1);
template <typename T0, typename T1>
const Vec2<T>& scale(T0 scalar, const Vec2<T1> &v)
{
this->mm[0] = scalar * v[0];
this->mm[1] = scalar * v[1];
return *this;
}
template <typename T0, typename T1>
const Vec2<T> &div(T0 scalar, const Vec2<T1> &v)
{
this->mm[0] = v[0] / scalar;
this->mm[1] = v[1] / scalar;
return *this;
}
/// Dot product
T dot(const Vec2<T> &v) const { return this->mm[0]*v[0] + this->mm[1]*v[1]; } // trivial
/// Length of the vector
T length() const
{
return static_cast<T>(sqrt(double(this->mm[0]*this->mm[0] + this->mm[1]*this->mm[1])));
}
/// Squared length of the vector, much faster than length() as it
/// does not involve square root
T lengthSqr() const { return (this->mm[0]*this->mm[0] + this->mm[1]*this->mm[1]); }
/// Return a reference to itsef after the exponent has been
/// applied to all the vector components.
inline const Vec2<T>& exp()
{
this->mm[0] = std::exp(this->mm[0]);
this->mm[1] = std::exp(this->mm[1]);
return *this;
}
/// Return a reference to itself after log has been
/// applied to all the vector components.
inline const Vec2<T>& log()
{
this->mm[0] = std::log(this->mm[0]);
this->mm[1] = std::log(this->mm[1]);
return *this;
}
/// Return the sum of all the vector components.
inline T sum() const
{
return this->mm[0] + this->mm[1];
}
/// Return the product of all the vector components.
inline T product() const
{
return this->mm[0] * this->mm[1];
}
/// this = normalized this
bool normalize(T eps = static_cast<T>(1.0e-8))
{
T d = length();
if (isApproxEqual(d, T(0), eps)) {
return false;
}
*this *= (T(1) / d);
return true;
}
/// return normalized this, throws if null vector
Vec2<T> unit(T eps=0) const
{
T d;
return unit(eps, d);
}
/// return normalized this and length, throws if null vector
Vec2<T> unit(T eps, T& len) const
{
len = length();
if (isApproxEqual(len, T(0), eps)) {
OPENVDB_THROW(ArithmeticError, "Normalizing null 2-vector");
}
return *this / len;
}
/// return normalized this, or (1, 0) if this is null vector
Vec2<T> unitSafe() const
{
T l2 = lengthSqr();
return l2 ? *this/static_cast<T>(sqrt(l2)) : Vec2<T>(1,0);
}
/// Multiply each element of this vector by @a scalar.
template <typename S>
const Vec2<T> &operator*=(S scalar)
{
this->mm[0] *= scalar;
this->mm[1] *= scalar;
return *this;
}
/// Multiply each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec2<T> &operator*=(const Vec2<S> &v1)
{
this->mm[0] *= v1[0];
this->mm[1] *= v1[1];
return *this;
}
/// Divide each element of this vector by @a scalar.
template <typename S>
const Vec2<T> &operator/=(S scalar)
{
this->mm[0] /= scalar;
this->mm[1] /= scalar;
return *this;
}
/// Divide each element of this vector by the corresponding element of the given vector.
template <typename S>
const Vec2<T> &operator/=(const Vec2<S> &v1)
{
this->mm[0] /= v1[0];
this->mm[1] /= v1[1];
return *this;
}
/// Add @a scalar to each element of this vector.
template <typename S>
const Vec2<T> &operator+=(S scalar)
{
this->mm[0] += scalar;
this->mm[1] += scalar;
return *this;
}
/// Add each element of the given vector to the corresponding element of this vector.
template <typename S>
const Vec2<T> &operator+=(const Vec2<S> &v1)
{
this->mm[0] += v1[0];
this->mm[1] += v1[1];
return *this;
}
/// Subtract @a scalar from each element of this vector.
template <typename S>
const Vec2<T> &operator-=(S scalar)
{
this->mm[0] -= scalar;
this->mm[1] -= scalar;
return *this;
}
/// Subtract each element of the given vector from the corresponding element of this vector.
template <typename S>
const Vec2<T> &operator-=(const Vec2<S> &v1)
{
this->mm[0] -= v1[0];
this->mm[1] -= v1[1];
return *this;
}
// Number of cols, rows, elements
static unsigned numRows() { return 1; }
static unsigned numColumns() { return 2; }
static unsigned numElements() { return 2; }
/// Returns the scalar component of v in the direction of onto, onto need
/// not be unit. e.g float c = Vec2f::component(v1,v2);
T component(const Vec2<T> &onto, T eps = static_cast<T>(1.0e-8)) const
{
T l = onto.length();
if (isApproxEqual(l, T(0), eps)) return 0;
return dot(onto)*(T(1)/l);
}
/// Return the projection of v onto the vector, onto need not be unit
/// e.g. Vec2f v = Vec2f::projection(v,n);
Vec2<T> projection(const Vec2<T> &onto, T eps = static_cast<T>(1.0e-8)) const
{
T l = onto.lengthSqr();
if (isApproxEqual(l, T(0), eps)) return Vec2::zero();
return onto*(dot(onto)*(T(1)/l));
}
/// Return an arbitrary unit vector perpendicular to v
/// Vector v must be a unit vector
/// e.g. v.normalize(); Vec2f n = Vec2f::getArbPerpendicular(v);
Vec2<T> getArbPerpendicular() const { return Vec2<T>(-this->mm[1], this->mm[0]); }
/// Predefined constants, e.g. Vec2f v = Vec2f::xNegAxis();
static Vec2<T> zero() { return Vec2<T>(0, 0); }
static Vec2<T> ones() { return Vec2<T>(1, 1); }
};
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator*(S scalar, const Vec2<T> &v)
{
return v * scalar;
}
/// Multiply each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator*(const Vec2<T> &v, S scalar)
{
Vec2<typename promote<S, T>::type> result(v);
result *= scalar;
return result;
}
/// Multiply corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec2<typename promote<T0, T1>::type> operator*(const Vec2<T0> &v0, const Vec2<T1> &v1)
{
Vec2<typename promote<T0, T1>::type> result(v0[0] * v1[0], v0[1] * v1[1]);
return result;
}
/// Divide @a scalar by each element of the given vector and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator/(S scalar, const Vec2<T> &v)
{
return Vec2<typename promote<S, T>::type>(scalar/v[0], scalar/v[1]);
}
/// Divide each element of the given vector by @a scalar and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator/(const Vec2<T> &v, S scalar)
{
Vec2<typename promote<S, T>::type> result(v);
result /= scalar;
return result;
}
/// Divide corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec2<typename promote<T0, T1>::type> operator/(const Vec2<T0> &v0, const Vec2<T1> &v1)
{
Vec2<typename promote<T0, T1>::type> result(v0[0] / v1[0], v0[1] / v1[1]);
return result;
}
/// Add corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec2<typename promote<T0, T1>::type> operator+(const Vec2<T0> &v0, const Vec2<T1> &v1)
{
Vec2<typename promote<T0, T1>::type> result(v0);
result += v1;
return result;
}
/// Add @a scalar to each element of the given vector and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator+(const Vec2<T> &v, S scalar)
{
Vec2<typename promote<S, T>::type> result(v);
result += scalar;
return result;
}
/// Subtract corresponding elements of @a v0 and @a v1 and return the result.
template <typename T0, typename T1>
inline Vec2<typename promote<T0, T1>::type> operator-(const Vec2<T0> &v0, const Vec2<T1> &v1)
{
Vec2<typename promote<T0, T1>::type> result(v0);
result -= v1;
return result;
}
/// Subtract @a scalar from each element of the given vector and return the result.
template <typename S, typename T>
inline Vec2<typename promote<S, T>::type> operator-(const Vec2<T> &v, S scalar)
{
Vec2<typename promote<S, T>::type> result(v);
result -= scalar;
return result;
}
/// Angle between two vectors, the result is between [0, pi],
/// e.g. float a = Vec2f::angle(v1,v2);
template <typename T>
inline T angle(const Vec2<T> &v1, const Vec2<T> &v2)
{
T c = v1.dot(v2);
return acos(c);
}
template <typename T>
inline bool
isApproxEqual(const Vec2<T>& a, const Vec2<T>& b)
{
return a.eq(b);
}
template <typename T>
inline bool
isApproxEqual(const Vec2<T>& a, const Vec2<T>& b, const Vec2<T>& eps)
{
return isApproxEqual(a.x(), b.x(), eps.x()) &&
isApproxEqual(a.y(), b.y(), eps.y());
}
template<typename T>
inline Vec2<T>
Abs(const Vec2<T>& v)
{
return Vec2<T>(Abs(v[0]), Abs(v[1]));
}
/// Orthonormalize vectors v1 and v2 and store back the resulting basis
/// e.g. Vec2f::orthonormalize(v1,v2);
template <typename T>
inline void orthonormalize(Vec2<T> &v1, Vec2<T> &v2)
{
// If the input vectors are v0, v1, and v2, then the Gram-Schmidt
// orthonormalization produces vectors u0, u1, and u2 as follows,
//
// u0 = v0/|v0|
// u1 = (v1-(u0*v1)u0)/|v1-(u0*v1)u0|
//
// where |A| indicates length of vector A and A*B indicates dot
// product of vectors A and B.
// compute u0
v1.normalize();
// compute u1
T d0 = v1.dot(v2);
v2 -= v1*d0;
v2.normalize();
}
/// \remark We are switching to a more explicit name because the semantics
/// are different from std::min/max. In that case, the function returns a
/// reference to one of the objects based on a comparator. Here, we must
/// fabricate a new object which might not match either of the inputs.
/// Return component-wise minimum of the two vectors.
template <typename T>
inline Vec2<T> minComponent(const Vec2<T> &v1, const Vec2<T> &v2)
{
return Vec2<T>(
std::min(v1.x(), v2.x()),
std::min(v1.y(), v2.y()));
}
/// Return component-wise maximum of the two vectors.
template <typename T>
inline Vec2<T> maxComponent(const Vec2<T> &v1, const Vec2<T> &v2)
{
return Vec2<T>(
std::max(v1.x(), v2.x()),
std::max(v1.y(), v2.y()));
}
/// @brief Return a vector with the exponent applied to each of
/// the components of the input vector.
template <typename T>
inline Vec2<T> Exp(Vec2<T> v) { return v.exp(); }
/// @brief Return a vector with log applied to each of
/// the components of the input vector.
template <typename T>
inline Vec2<T> Log(Vec2<T> v) { return v.log(); }
using Vec2i = Vec2<int32_t>;
using Vec2ui = Vec2<uint32_t>;
using Vec2s = Vec2<float>;
using Vec2d = Vec2<double>;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Vec2i)
OPENVDB_IS_POD(Vec2ui)
OPENVDB_IS_POD(Vec2s)
OPENVDB_IS_POD(Vec2d)
#endif
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_VEC2_HAS_BEEN_INCLUDED
| 16,195 | C | 28.393829 | 99 | 0.600494 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Mat.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Mat.h
/// @author Joshua Schpok
#ifndef OPENVDB_MATH_MAT_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_MAT_HAS_BEEN_INCLUDED
#include "Math.h"
#include <openvdb/Exceptions.h>
#include <algorithm> // for std::max()
#include <cmath>
#include <iostream>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @class Mat "Mat.h"
/// A base class for square matrices.
template<unsigned SIZE, typename T>
class Mat
{
public:
using value_type = T;
using ValueType = T;
enum SIZE_ { size = SIZE };
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// Trivial constructor, the matrix is NOT initialized
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Mat() = default;
#else
/// Default ctor. Does nothing. Required because declaring a copy (or
/// other) constructor means the default constructor gets left out.
Mat() { }
/// Copy constructor. Used when the class signature matches exactly.
Mat(Mat const &src) {
for (unsigned i(0); i < numElements(); ++i) {
mm[i] = src.mm[i];
}
}
Mat& operator=(Mat const& src) {
if (&src != this) {
for (unsigned i = 0; i < numElements(); ++i) {
mm[i] = src.mm[i];
}
}
return *this;
}
#endif
// Number of cols, rows, elements
static unsigned numRows() { return SIZE; }
static unsigned numColumns() { return SIZE; }
static unsigned numElements() { return SIZE*SIZE; }
/// @return string representation of matrix
/// Since output is multiline, optional indentation argument prefixes
/// each newline with that much white space. It does not indent
/// the first line, since you might be calling this inline:
///
/// cout << "matrix: " << mat.str(7)
///
/// matrix: [[1 2]
/// [3 4]]
std::string
str(unsigned indentation = 0) const {
std::string ret;
std::string indent;
// We add +1 since we're indenting one for the first '['
indent.append(indentation+1, ' ');
ret.append("[");
// For each row,
for (unsigned i(0); i < SIZE; i++) {
ret.append("[");
// For each column
for (unsigned j(0); j < SIZE; j++) {
// Put a comma after everything except the last
if (j) ret.append(", ");
ret.append(std::to_string(mm[(i*SIZE)+j]));
}
ret.append("]");
// At the end of every row (except the last)...
if (i < SIZE - 1) {
// ...suffix the row bracket with a comma, newline, and advance indentation.
ret.append(",\n");
ret.append(indent);
}
}
ret.append("]");
return ret;
}
/// Write a Mat to an output stream
friend std::ostream& operator<<(
std::ostream& ostr,
const Mat<SIZE, T>& m)
{
ostr << m.str();
return ostr;
}
/// Direct access to the internal data
T* asPointer() { return mm; }
const T* asPointer() const { return mm; }
//@{
/// Array style reference to ith row
T* operator[](int i) { return &(mm[i*SIZE]); }
const T* operator[](int i) const { return &(mm[i*SIZE]); }
//@}
void write(std::ostream& os) const {
os.write(reinterpret_cast<const char*>(&mm), sizeof(T)*SIZE*SIZE);
}
void read(std::istream& is) {
is.read(reinterpret_cast<char*>(&mm), sizeof(T)*SIZE*SIZE);
}
/// Return the maximum of the absolute of all elements in this matrix
T absMax() const {
T x = static_cast<T>(std::fabs(mm[0]));
for (unsigned i = 1; i < numElements(); ++i) {
x = std::max(x, static_cast<T>(std::fabs(mm[i])));
}
return x;
}
/// True if a Nan is present in this matrix
bool isNan() const {
for (unsigned i = 0; i < numElements(); ++i) {
if (math::isNan(mm[i])) return true;
}
return false;
}
/// True if an Inf is present in this matrix
bool isInfinite() const {
for (unsigned i = 0; i < numElements(); ++i) {
if (math::isInfinite(mm[i])) return true;
}
return false;
}
/// True if no Nan or Inf values are present
bool isFinite() const {
for (unsigned i = 0; i < numElements(); ++i) {
if (!math::isFinite(mm[i])) return false;
}
return true;
}
/// True if all elements are exactly zero
bool isZero() const {
for (unsigned i = 0; i < numElements(); ++i) {
if (!math::isZero(mm[i])) return false;
}
return true;
}
protected:
T mm[SIZE*SIZE];
};
template<typename T> class Quat;
template<typename T> class Vec3;
/// @brief Return the rotation matrix specified by the given quaternion.
/// @details The quaternion is normalized and used to construct the matrix.
/// Note that the matrix is transposed to match post-multiplication semantics.
template<class MatType>
MatType
rotation(const Quat<typename MatType::value_type> &q,
typename MatType::value_type eps = static_cast<typename MatType::value_type>(1.0e-8))
{
using T = typename MatType::value_type;
T qdot(q.dot(q));
T s(0);
if (!isApproxEqual(qdot, T(0.0),eps)) {
s = T(2.0 / qdot);
}
T x = s*q.x();
T y = s*q.y();
T z = s*q.z();
T wx = x*q.w();
T wy = y*q.w();
T wz = z*q.w();
T xx = x*q.x();
T xy = y*q.x();
T xz = z*q.x();
T yy = y*q.y();
T yz = z*q.y();
T zz = z*q.z();
MatType r;
r[0][0]=T(1) - (yy+zz); r[0][1]=xy + wz; r[0][2]=xz - wy;
r[1][0]=xy - wz; r[1][1]=T(1) - (xx+zz); r[1][2]=yz + wx;
r[2][0]=xz + wy; r[2][1]=yz - wx; r[2][2]=T(1) - (xx+yy);
if(MatType::numColumns() == 4) padMat4(r);
return r;
}
/// @brief Return a matrix for rotation by @a angle radians about the given @a axis.
/// @param axis The axis (one of X, Y, Z) to rotate about.
/// @param angle The rotation angle, in radians.
template<class MatType>
MatType
rotation(Axis axis, typename MatType::value_type angle)
{
using T = typename MatType::value_type;
T c = static_cast<T>(cos(angle));
T s = static_cast<T>(sin(angle));
MatType result;
result.setIdentity();
switch (axis) {
case X_AXIS:
result[1][1] = c;
result[1][2] = s;
result[2][1] = -s;
result[2][2] = c;
return result;
case Y_AXIS:
result[0][0] = c;
result[0][2] = -s;
result[2][0] = s;
result[2][2] = c;
return result;
case Z_AXIS:
result[0][0] = c;
result[0][1] = s;
result[1][0] = -s;
result[1][1] = c;
return result;
default:
throw ValueError("Unrecognized rotation axis");
}
}
/// @brief Return a matrix for rotation by @a angle radians about the given @a axis.
/// @note The axis must be a unit vector.
template<class MatType>
MatType
rotation(const Vec3<typename MatType::value_type> &_axis, typename MatType::value_type angle)
{
using T = typename MatType::value_type;
T txy, txz, tyz, sx, sy, sz;
Vec3<T> axis(_axis.unit());
// compute trig properties of angle:
T c(cos(double(angle)));
T s(sin(double(angle)));
T t(1 - c);
MatType result;
// handle diagonal elements
result[0][0] = axis[0]*axis[0] * t + c;
result[1][1] = axis[1]*axis[1] * t + c;
result[2][2] = axis[2]*axis[2] * t + c;
txy = axis[0]*axis[1] * t;
sz = axis[2] * s;
txz = axis[0]*axis[2] * t;
sy = axis[1] * s;
tyz = axis[1]*axis[2] * t;
sx = axis[0] * s;
// right handed space
// Contribution from rotation about 'z'
result[0][1] = txy + sz;
result[1][0] = txy - sz;
// Contribution from rotation about 'y'
result[0][2] = txz - sy;
result[2][0] = txz + sy;
// Contribution from rotation about 'x'
result[1][2] = tyz + sx;
result[2][1] = tyz - sx;
if(MatType::numColumns() == 4) padMat4(result);
return MatType(result);
}
/// @brief Return the Euler angles composing the given rotation matrix.
/// @details Optional axes arguments describe in what order elementary rotations
/// are applied. Note that in our convention, XYZ means Rz * Ry * Rx.
/// Because we are using rows rather than columns to represent the
/// local axes of a coordinate frame, the interpretation from a local
/// reference point of view is to first rotate about the x axis, then
/// about the newly rotated y axis, and finally by the new local z axis.
/// From a fixed reference point of view, the interpretation is to
/// rotate about the stationary world z, y, and x axes respectively.
///
/// Irrespective of the Euler angle convention, in the case of distinct
/// axes, eulerAngles() returns the x, y, and z angles in the corresponding
/// x, y, z components of the returned Vec3. For the XZX convention, the
/// left X value is returned in Vec3.x, and the right X value in Vec3.y.
/// For the ZXZ convention the left Z value is returned in Vec3.z and
/// the right Z value in Vec3.y
///
/// Examples of reconstructing r from its Euler angle decomposition
///
/// v = eulerAngles(r, ZYX_ROTATION);
/// rx.setToRotation(Vec3d(1,0,0), v[0]);
/// ry.setToRotation(Vec3d(0,1,0), v[1]);
/// rz.setToRotation(Vec3d(0,0,1), v[2]);
/// r = rx * ry * rz;
///
/// v = eulerAngles(r, ZXZ_ROTATION);
/// rz1.setToRotation(Vec3d(0,0,1), v[2]);
/// rx.setToRotation (Vec3d(1,0,0), v[0]);
/// rz2.setToRotation(Vec3d(0,0,1), v[1]);
/// r = rz2 * rx * rz1;
///
/// v = eulerAngles(r, XZX_ROTATION);
/// rx1.setToRotation (Vec3d(1,0,0), v[0]);
/// rx2.setToRotation (Vec3d(1,0,0), v[1]);
/// rz.setToRotation (Vec3d(0,0,1), v[2]);
/// r = rx2 * rz * rx1;
///
template<class MatType>
Vec3<typename MatType::value_type>
eulerAngles(
const MatType& mat,
RotationOrder rotationOrder,
typename MatType::value_type eps = static_cast<typename MatType::value_type>(1.0e-8))
{
using ValueType = typename MatType::value_type;
using V = Vec3<ValueType>;
ValueType phi, theta, psi;
switch(rotationOrder)
{
case XYZ_ROTATION:
if (isApproxEqual(mat[2][0], ValueType(1.0), eps)) {
theta = ValueType(M_PI_2);
phi = ValueType(0.5 * atan2(mat[1][2], mat[1][1]));
psi = phi;
} else if (isApproxEqual(mat[2][0], ValueType(-1.0), eps)) {
theta = ValueType(-M_PI_2);
phi = ValueType(0.5 * atan2(mat[1][2], mat[1][1]));
psi = -phi;
} else {
psi = ValueType(atan2(-mat[1][0],mat[0][0]));
phi = ValueType(atan2(-mat[2][1],mat[2][2]));
theta = ValueType(atan2(mat[2][0],
sqrt( mat[2][1]*mat[2][1] +
mat[2][2]*mat[2][2])));
}
return V(phi, theta, psi);
case ZXY_ROTATION:
if (isApproxEqual(mat[1][2], ValueType(1.0), eps)) {
theta = ValueType(M_PI_2);
phi = ValueType(0.5 * atan2(mat[0][1], mat[0][0]));
psi = phi;
} else if (isApproxEqual(mat[1][2], ValueType(-1.0), eps)) {
theta = ValueType(-M_PI/2);
phi = ValueType(0.5 * atan2(mat[0][1],mat[2][1]));
psi = -phi;
} else {
psi = ValueType(atan2(-mat[0][2], mat[2][2]));
phi = ValueType(atan2(-mat[1][0], mat[1][1]));
theta = ValueType(atan2(mat[1][2],
sqrt(mat[0][2] * mat[0][2] +
mat[2][2] * mat[2][2])));
}
return V(theta, psi, phi);
case YZX_ROTATION:
if (isApproxEqual(mat[0][1], ValueType(1.0), eps)) {
theta = ValueType(M_PI_2);
phi = ValueType(0.5 * atan2(mat[2][0], mat[2][2]));
psi = phi;
} else if (isApproxEqual(mat[0][1], ValueType(-1.0), eps)) {
theta = ValueType(-M_PI/2);
phi = ValueType(0.5 * atan2(mat[2][0], mat[1][0]));
psi = -phi;
} else {
psi = ValueType(atan2(-mat[2][1], mat[1][1]));
phi = ValueType(atan2(-mat[0][2], mat[0][0]));
theta = ValueType(atan2(mat[0][1],
sqrt(mat[0][0] * mat[0][0] +
mat[0][2] * mat[0][2])));
}
return V(psi, phi, theta);
case XZX_ROTATION:
if (isApproxEqual(mat[0][0], ValueType(1.0), eps)) {
theta = ValueType(0.0);
phi = ValueType(0.5 * atan2(mat[1][2], mat[1][1]));
psi = phi;
} else if (isApproxEqual(mat[0][0], ValueType(-1.0), eps)) {
theta = ValueType(M_PI);
psi = ValueType(0.5 * atan2(mat[2][1], -mat[1][1]));
phi = - psi;
} else {
psi = ValueType(atan2(mat[2][0], -mat[1][0]));
phi = ValueType(atan2(mat[0][2], mat[0][1]));
theta = ValueType(atan2(sqrt(mat[0][1] * mat[0][1] +
mat[0][2] * mat[0][2]),
mat[0][0]));
}
return V(phi, psi, theta);
case ZXZ_ROTATION:
if (isApproxEqual(mat[2][2], ValueType(1.0), eps)) {
theta = ValueType(0.0);
phi = ValueType(0.5 * atan2(mat[0][1], mat[0][0]));
psi = phi;
} else if (isApproxEqual(mat[2][2], ValueType(-1.0), eps)) {
theta = ValueType(M_PI);
phi = ValueType(0.5 * atan2(mat[0][1], mat[0][0]));
psi = -phi;
} else {
psi = ValueType(atan2(mat[0][2], mat[1][2]));
phi = ValueType(atan2(mat[2][0], -mat[2][1]));
theta = ValueType(atan2(sqrt(mat[0][2] * mat[0][2] +
mat[1][2] * mat[1][2]),
mat[2][2]));
}
return V(theta, psi, phi);
case YXZ_ROTATION:
if (isApproxEqual(mat[2][1], ValueType(1.0), eps)) {
theta = ValueType(-M_PI_2);
phi = ValueType(0.5 * atan2(-mat[1][0], mat[0][0]));
psi = phi;
} else if (isApproxEqual(mat[2][1], ValueType(-1.0), eps)) {
theta = ValueType(M_PI_2);
phi = ValueType(0.5 * atan2(mat[1][0], mat[0][0]));
psi = -phi;
} else {
psi = ValueType(atan2(mat[0][1], mat[1][1]));
phi = ValueType(atan2(mat[2][0], mat[2][2]));
theta = ValueType(atan2(-mat[2][1],
sqrt(mat[0][1] * mat[0][1] +
mat[1][1] * mat[1][1])));
}
return V(theta, phi, psi);
case ZYX_ROTATION:
if (isApproxEqual(mat[0][2], ValueType(1.0), eps)) {
theta = ValueType(-M_PI_2);
phi = ValueType(0.5 * atan2(-mat[1][0], mat[1][1]));
psi = phi;
} else if (isApproxEqual(mat[0][2], ValueType(-1.0), eps)) {
theta = ValueType(M_PI_2);
phi = ValueType(0.5 * atan2(mat[2][1], mat[2][0]));
psi = -phi;
} else {
psi = ValueType(atan2(mat[1][2], mat[2][2]));
phi = ValueType(atan2(mat[0][1], mat[0][0]));
theta = ValueType(atan2(-mat[0][2],
sqrt(mat[0][1] * mat[0][1] +
mat[0][0] * mat[0][0])));
}
return V(psi, theta, phi);
case XZY_ROTATION:
if (isApproxEqual(mat[1][0], ValueType(-1.0), eps)) {
theta = ValueType(M_PI_2);
psi = ValueType(0.5 * atan2(mat[2][1], mat[2][2]));
phi = -psi;
} else if (isApproxEqual(mat[1][0], ValueType(1.0), eps)) {
theta = ValueType(-M_PI_2);
psi = ValueType(0.5 * atan2(- mat[2][1], mat[2][2]));
phi = psi;
} else {
psi = ValueType(atan2(mat[2][0], mat[0][0]));
phi = ValueType(atan2(mat[1][2], mat[1][1]));
theta = ValueType(atan2(- mat[1][0],
sqrt(mat[1][1] * mat[1][1] +
mat[1][2] * mat[1][2])));
}
return V(phi, psi, theta);
}
OPENVDB_THROW(NotImplementedError, "Euler extraction sequence not implemented");
}
/// @brief Return a rotation matrix that maps @a v1 onto @a v2
/// about the cross product of @a v1 and @a v2.
/// <a name="rotation_v1_v2"></a>
template<typename MatType, typename ValueType1, typename ValueType2>
inline MatType
rotation(
const Vec3<ValueType1>& _v1,
const Vec3<ValueType2>& _v2,
typename MatType::value_type eps = static_cast<typename MatType::value_type>(1.0e-8))
{
using T = typename MatType::value_type;
Vec3<T> v1(_v1);
Vec3<T> v2(_v2);
// Check if v1 and v2 are unit length
if (!isApproxEqual(T(1), v1.dot(v1), eps)) {
v1.normalize();
}
if (!isApproxEqual(T(1), v2.dot(v2), eps)) {
v2.normalize();
}
Vec3<T> cross;
cross.cross(v1, v2);
if (isApproxEqual(cross[0], zeroVal<T>(), eps) &&
isApproxEqual(cross[1], zeroVal<T>(), eps) &&
isApproxEqual(cross[2], zeroVal<T>(), eps)) {
// Given two unit vectors v1 and v2 that are nearly parallel, build a
// rotation matrix that maps v1 onto v2. First find which principal axis
// p is closest to perpendicular to v1. Find a reflection that exchanges
// v1 and p, and find a reflection that exchanges p2 and v2. The desired
// rotation matrix is the composition of these two reflections. See the
// paper "Efficiently Building a Matrix to Rotate One Vector to
// Another" by Tomas Moller and John Hughes in Journal of Graphics
// Tools Vol 4, No 4 for details.
Vec3<T> u, v, p(0.0, 0.0, 0.0);
double x = Abs(v1[0]);
double y = Abs(v1[1]);
double z = Abs(v1[2]);
if (x < y) {
if (z < x) {
p[2] = 1;
} else {
p[0] = 1;
}
} else {
if (z < y) {
p[2] = 1;
} else {
p[1] = 1;
}
}
u = p - v1;
v = p - v2;
double udot = u.dot(u);
double vdot = v.dot(v);
double a = -2 / udot;
double b = -2 / vdot;
double c = 4 * u.dot(v) / (udot * vdot);
MatType result;
result.setIdentity();
for (int j = 0; j < 3; j++) {
for (int i = 0; i < 3; i++)
result[i][j] = static_cast<T>(
a * u[i] * u[j] + b * v[i] * v[j] + c * v[j] * u[i]);
}
result[0][0] += 1.0;
result[1][1] += 1.0;
result[2][2] += 1.0;
if(MatType::numColumns() == 4) padMat4(result);
return result;
} else {
double c = v1.dot(v2);
double a = (1.0 - c) / cross.dot(cross);
double a0 = a * cross[0];
double a1 = a * cross[1];
double a2 = a * cross[2];
double a01 = a0 * cross[1];
double a02 = a0 * cross[2];
double a12 = a1 * cross[2];
MatType r;
r[0][0] = static_cast<T>(c + a0 * cross[0]);
r[0][1] = static_cast<T>(a01 + cross[2]);
r[0][2] = static_cast<T>(a02 - cross[1]);
r[1][0] = static_cast<T>(a01 - cross[2]);
r[1][1] = static_cast<T>(c + a1 * cross[1]);
r[1][2] = static_cast<T>(a12 + cross[0]);
r[2][0] = static_cast<T>(a02 + cross[1]);
r[2][1] = static_cast<T>(a12 - cross[0]);
r[2][2] = static_cast<T>(c + a2 * cross[2]);
if(MatType::numColumns() == 4) padMat4(r);
return r;
}
}
/// Return a matrix that scales by @a s.
template<class MatType>
MatType
scale(const Vec3<typename MatType::value_type>& s)
{
// Gets identity, then sets top 3 diagonal
// Inefficient by 3 sets.
MatType result;
result.setIdentity();
result[0][0] = s[0];
result[1][1] = s[1];
result[2][2] = s[2];
return result;
}
/// Return a Vec3 representing the lengths of the passed matrix's upper 3×3's rows.
template<class MatType>
Vec3<typename MatType::value_type>
getScale(const MatType &mat)
{
using V = Vec3<typename MatType::value_type>;
return V(
V(mat[0][0], mat[0][1], mat[0][2]).length(),
V(mat[1][0], mat[1][1], mat[1][2]).length(),
V(mat[2][0], mat[2][1], mat[2][2]).length());
}
/// @brief Return a copy of the given matrix with its upper 3×3 rows normalized.
/// @details This can be geometrically interpreted as a matrix with no scaling
/// along its major axes.
template<class MatType>
MatType
unit(const MatType &mat, typename MatType::value_type eps = 1.0e-8)
{
Vec3<typename MatType::value_type> dud;
return unit(mat, eps, dud);
}
/// @brief Return a copy of the given matrix with its upper 3×3 rows normalized,
/// and return the length of each of these rows in @a scaling.
/// @details This can be geometrically interpretted as a matrix with no scaling
/// along its major axes, and the scaling in the input vector
template<class MatType>
MatType
unit(
const MatType &in,
typename MatType::value_type eps,
Vec3<typename MatType::value_type>& scaling)
{
using T = typename MatType::value_type;
MatType result(in);
for (int i(0); i < 3; i++) {
try {
const Vec3<T> u(
Vec3<T>(in[i][0], in[i][1], in[i][2]).unit(eps, scaling[i]));
for (int j=0; j<3; j++) result[i][j] = u[j];
} catch (ArithmeticError&) {
for (int j=0; j<3; j++) result[i][j] = 0;
}
}
return result;
}
/// @brief Set the matrix to a shear along @a axis0 by a fraction of @a axis1.
/// @param axis0 The fixed axis of the shear.
/// @param axis1 The shear axis.
/// @param shear The shear factor.
template <class MatType>
MatType
shear(Axis axis0, Axis axis1, typename MatType::value_type shear)
{
int index0 = static_cast<int>(axis0);
int index1 = static_cast<int>(axis1);
MatType result;
result.setIdentity();
if (axis0 == axis1) {
result[index1][index0] = shear + 1;
} else {
result[index1][index0] = shear;
}
return result;
}
/// Return a matrix as the cross product of the given vector.
template<class MatType>
MatType
skew(const Vec3<typename MatType::value_type> &skew)
{
using T = typename MatType::value_type;
MatType r;
r[0][0] = T(0); r[0][1] = skew.z(); r[0][2] = -skew.y();
r[1][0] = -skew.z(); r[1][1] = T(0); r[2][1] = skew.x();
r[2][0] = skew.y(); r[2][1] = -skew.x(); r[2][2] = T(0);
if(MatType::numColumns() == 4) padMat4(r);
return r;
}
/// @brief Return an orientation matrix such that z points along @a direction,
/// and y is along the @a direction / @a vertical plane.
template<class MatType>
MatType
aim(const Vec3<typename MatType::value_type>& direction,
const Vec3<typename MatType::value_type>& vertical)
{
using T = typename MatType::value_type;
Vec3<T> forward(direction.unit());
Vec3<T> horizontal(vertical.unit().cross(forward).unit());
Vec3<T> up(forward.cross(horizontal).unit());
MatType r;
r[0][0]=horizontal.x(); r[0][1]=horizontal.y(); r[0][2]=horizontal.z();
r[1][0]=up.x(); r[1][1]=up.y(); r[1][2]=up.z();
r[2][0]=forward.x(); r[2][1]=forward.y(); r[2][2]=forward.z();
if(MatType::numColumns() == 4) padMat4(r);
return r;
}
/// @brief This function snaps a specific axis to a specific direction,
/// preserving scaling.
/// @details It does this using minimum energy, thus posing a unique solution if
/// basis & direction aren't parallel.
/// @note @a direction need not be unit.
template<class MatType>
inline MatType
snapMatBasis(const MatType& source, Axis axis, const Vec3<typename MatType::value_type>& direction)
{
using T = typename MatType::value_type;
Vec3<T> unitDir(direction.unit());
Vec3<T> ourUnitAxis(source.row(axis).unit());
// Are the two parallel?
T parallel = unitDir.dot(ourUnitAxis);
// Already snapped!
if (isApproxEqual(parallel, T(1.0))) return source;
if (isApproxEqual(parallel, T(-1.0))) {
OPENVDB_THROW(ValueError, "Cannot snap to inverse axis");
}
// Find angle between our basis and the one specified
T angleBetween(angle(unitDir, ourUnitAxis));
// Caclulate axis to rotate along
Vec3<T> rotationAxis = unitDir.cross(ourUnitAxis);
MatType rotation;
rotation.setToRotation(rotationAxis, angleBetween);
return source * rotation;
}
/// @brief Write 0s along Mat4's last row and column, and a 1 on its diagonal.
/// @details Useful initialization when we're initializing just the 3×3 block.
template<class MatType>
inline MatType&
padMat4(MatType& dest)
{
dest[0][3] = dest[1][3] = dest[2][3] = 0;
dest[3][2] = dest[3][1] = dest[3][0] = 0;
dest[3][3] = 1;
return dest;
}
/// @brief Solve for A=B*B, given A.
/// @details Denman-Beavers square root iteration
template<typename MatType>
inline void
sqrtSolve(const MatType& aA, MatType& aB, double aTol=0.01)
{
unsigned int iterations = static_cast<unsigned int>(log(aTol)/log(0.5));
MatType Y[2], Z[2];
Y[0] = aA;
Z[0] = MatType::identity();
unsigned int current = 0;
for (unsigned int iteration=0; iteration < iterations; iteration++) {
unsigned int last = current;
current = !current;
MatType invY = Y[last].inverse();
MatType invZ = Z[last].inverse();
Y[current] = 0.5 * (Y[last] + invZ);
Z[current] = 0.5 * (Z[last] + invY);
}
aB = Y[current];
}
template<typename MatType>
inline void
powSolve(const MatType& aA, MatType& aB, double aPower, double aTol=0.01)
{
unsigned int iterations = static_cast<unsigned int>(log(aTol)/log(0.5));
const bool inverted = (aPower < 0.0);
if (inverted) { aPower = -aPower; }
unsigned int whole = static_cast<unsigned int>(aPower);
double fraction = aPower - whole;
MatType R = MatType::identity();
MatType partial = aA;
double contribution = 1.0;
for (unsigned int iteration = 0; iteration < iterations; iteration++) {
sqrtSolve(partial, partial, aTol);
contribution *= 0.5;
if (fraction >= contribution) {
R *= partial;
fraction -= contribution;
}
}
partial = aA;
while (whole) {
if (whole & 1) { R *= partial; }
whole >>= 1;
if (whole) { partial *= partial; }
}
if (inverted) { aB = R.inverse(); }
else { aB = R; }
}
/// @brief Determine if a matrix is an identity matrix.
template<typename MatType>
inline bool
isIdentity(const MatType& m)
{
return m.eq(MatType::identity());
}
/// @brief Determine if a matrix is invertible.
template<typename MatType>
inline bool
isInvertible(const MatType& m)
{
using ValueType = typename MatType::ValueType;
return !isApproxEqual(m.det(), ValueType(0));
}
/// @brief Determine if a matrix is symmetric.
/// @details This implicitly uses math::isApproxEqual() to determine equality.
template<typename MatType>
inline bool
isSymmetric(const MatType& m)
{
return m.eq(m.transpose());
}
/// Determine if a matrix is unitary (i.e., rotation or reflection).
template<typename MatType>
inline bool
isUnitary(const MatType& m)
{
using ValueType = typename MatType::ValueType;
if (!isApproxEqual(std::abs(m.det()), ValueType(1.0))) return false;
// check that the matrix transpose is the inverse
MatType temp = m * m.transpose();
return temp.eq(MatType::identity());
}
/// Determine if a matrix is diagonal.
template<typename MatType>
inline bool
isDiagonal(const MatType& mat)
{
int n = MatType::size;
typename MatType::ValueType temp(0);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i != j) {
temp += std::abs(mat(i,j));
}
}
}
return isApproxEqual(temp, typename MatType::ValueType(0.0));
}
/// Return the <i>L</i><sub>∞</sub> norm of an <i>N</i>×<i>N</i> matrix.
template<typename MatType>
typename MatType::ValueType
lInfinityNorm(const MatType& matrix)
{
int n = MatType::size;
typename MatType::ValueType norm = 0;
for( int j = 0; j<n; ++j) {
typename MatType::ValueType column_sum = 0;
for (int i = 0; i<n; ++i) {
column_sum += std::fabs(matrix(i,j));
}
norm = std::max(norm, column_sum);
}
return norm;
}
/// Return the <i>L</i><sub>1</sub> norm of an <i>N</i>×<i>N</i> matrix.
template<typename MatType>
typename MatType::ValueType
lOneNorm(const MatType& matrix)
{
int n = MatType::size;
typename MatType::ValueType norm = 0;
for( int i = 0; i<n; ++i) {
typename MatType::ValueType row_sum = 0;
for (int j = 0; j<n; ++j) {
row_sum += std::fabs(matrix(i,j));
}
norm = std::max(norm, row_sum);
}
return norm;
}
/// @brief Decompose an invertible 3×3 matrix into a unitary matrix
/// followed by a symmetric matrix (positive semi-definite Hermitian),
/// i.e., M = U * S.
/// @details If det(U) = 1 it is a rotation, otherwise det(U) = -1,
/// meaning there is some part reflection.
/// See "Computing the polar decomposition with applications"
/// Higham, N.J. - SIAM J. Sc. Stat Comput 7(4):1160-1174
template<typename MatType>
bool
polarDecomposition(const MatType& input, MatType& unitary,
MatType& positive_hermitian, unsigned int MAX_ITERATIONS=100)
{
unitary = input;
MatType new_unitary(input);
MatType unitary_inv;
if (fabs(unitary.det()) < math::Tolerance<typename MatType::ValueType>::value()) return false;
unsigned int iteration(0);
typename MatType::ValueType linf_of_u;
typename MatType::ValueType l1nm_of_u;
typename MatType::ValueType linf_of_u_inv;
typename MatType::ValueType l1nm_of_u_inv;
typename MatType::ValueType l1_error = 100;
double gamma;
do {
unitary_inv = unitary.inverse();
linf_of_u = lInfinityNorm(unitary);
l1nm_of_u = lOneNorm(unitary);
linf_of_u_inv = lInfinityNorm(unitary_inv);
l1nm_of_u_inv = lOneNorm(unitary_inv);
gamma = sqrt( sqrt( (l1nm_of_u_inv * linf_of_u_inv ) / (l1nm_of_u * linf_of_u) ));
new_unitary = 0.5*(gamma * unitary + (1./gamma) * unitary_inv.transpose() );
l1_error = lInfinityNorm(unitary - new_unitary);
unitary = new_unitary;
/// this generally converges in less than ten iterations
if (iteration > MAX_ITERATIONS) return false;
iteration++;
} while (l1_error > math::Tolerance<typename MatType::ValueType>::value());
positive_hermitian = unitary.transpose() * input;
return true;
}
////////////////////////////////////////
/// @return true if m0 < m1, comparing components in order of significance.
template<unsigned SIZE, typename T>
inline bool
cwiseLessThan(const Mat<SIZE, T>& m0, const Mat<SIZE, T>& m1)
{
const T* m0p = m0.asPointer();
const T* m1p = m1.asPointer();
constexpr unsigned size = SIZE*SIZE;
for (unsigned i = 0; i < size-1; ++i, ++m0p, ++m1p) {
if (!math::isExactlyEqual(*m0p, *m1p)) return *m0p < *m1p;
}
return *m0p < *m1p;
}
/// @return true if m0 > m1, comparing components in order of significance.
template<unsigned SIZE, typename T>
inline bool
cwiseGreaterThan(const Mat<SIZE, T>& m0, const Mat<SIZE, T>& m1)
{
const T* m0p = m0.asPointer();
const T* m1p = m1.asPointer();
constexpr unsigned size = SIZE*SIZE;
for (unsigned i = 0; i < size-1; ++i, ++m0p, ++m1p) {
if (!math::isExactlyEqual(*m0p, *m1p)) return *m0p > *m1p;
}
return *m0p > *m1p;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_MAT_HAS_BEEN_INCLUDED
| 32,171 | C | 29.151828 | 99 | 0.558049 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/QuantizedUnitVec.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "QuantizedUnitVec.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
// The table below is generated as follows:
//
// uint16_t xbits, ybits;
// double x, y, z, w;
// for (uint16_t b = 0; b < 8192; ++b) {
// xbits = uint16_t((b & MASK_XSLOT) >> 7);
// ybits = b & MASK_YSLOT;
// if ((xbits + ybits) > 126) {
// xbits = uint16_t(127 - xbits);
// ybits = uint16_t(127 - ybits);
// }
// x = double(xbits);
// y = double(ybits);
// z = double(126 - xbits - ybits);
// w = 1.0 / std::sqrt(x*x + y*y + z*z);
// sNormalizationWeights[b] = float(w);
// }
float QuantizedUnitVec::sNormalizationWeights[8192] = {
0.007936508395f, 0.007999744266f, 0.008063467219f, 0.008127664216f,
0.008192319423f, 0.008257416077f, 0.008322936483f, 0.008388860151f,
0.008455166593f, 0.008521833457f, 0.008588834666f, 0.008656143211f,
0.008723732084f, 0.00879156962f, 0.008859624155f, 0.008927859366f,
0.008996240795f, 0.009064726532f, 0.009133277461f, 0.009201847948f,
0.00927039329f, 0.009338863194f, 0.009407208301f, 0.009475374594f,
0.009543305263f, 0.009610942565f, 0.009678225033f, 0.009745089337f,
0.009811468422f, 0.009877296165f, 0.009942499921f, 0.01000700705f,
0.01007074397f, 0.01013363153f, 0.01019559242f, 0.01025654469f,
0.01031640731f, 0.01037509646f, 0.01043252647f, 0.01048861258f,
0.01054326911f, 0.01059640758f, 0.01064794231f, 0.01069778763f,
0.01074585691f, 0.01079206541f, 0.01083632838f, 0.01087856572f,
0.01091869641f, 0.01095664315f, 0.01099232957f, 0.01102568675f,
0.01105664484f, 0.01108513959f, 0.01111111138f, 0.01113450434f,
0.01115526911f, 0.01117335912f, 0.01118873432f, 0.01120136213f,
0.01121121366f, 0.01121826563f, 0.01122250315f, 0.0112239169f,
0.01122250315f, 0.01121826563f, 0.01121121366f, 0.01120136213f,
0.01118873432f, 0.01117335912f, 0.01115526911f, 0.01113450434f,
0.01111111138f, 0.01108513959f, 0.01105664484f, 0.01102568675f,
0.01099232957f, 0.01095664315f, 0.01091869641f, 0.01087856572f,
0.01083632838f, 0.01079206541f, 0.01074585691f, 0.01069778763f,
0.01064794231f, 0.01059640758f, 0.01054326911f, 0.01048861258f,
0.01043252647f, 0.01037509646f, 0.01031640731f, 0.01025654469f,
0.01019559242f, 0.01013363153f, 0.01007074397f, 0.01000700705f,
0.009942499921f, 0.009877296165f, 0.009811468422f, 0.009745089337f,
0.009678225033f, 0.009610942565f, 0.009543305263f, 0.009475374594f,
0.009407208301f, 0.009338863194f, 0.00927039329f, 0.009201847948f,
0.009133277461f, 0.009064726532f, 0.008996240795f, 0.008927859366f,
0.008859624155f, 0.00879156962f, 0.008723732084f, 0.008656143211f,
0.008588834666f, 0.008521833457f, 0.008455166593f, 0.008388860151f,
0.008322936483f, 0.008257416077f, 0.008192319423f, 0.008127664216f,
0.008063467219f, 0.007999744266f, 0.007936508395f, 0.007873771712f,
0.007999744266f, 0.008063991554f, 0.008128738031f, 0.008193968795f,
0.008259668946f, 0.008325820789f, 0.008392404765f, 0.008459401317f,
0.008526788093f, 0.00859454181f, 0.008662636392f, 0.008731043898f,
0.008799735457f, 0.008868678473f, 0.008937839419f, 0.009007181972f,
0.00907666795f, 0.009146256372f, 0.009215905331f, 0.009285567328f,
0.009355195798f, 0.009424740449f, 0.009494146332f, 0.009563359432f,
0.009632320143f, 0.009700968862f, 0.009769240394f, 0.00983707048f,
0.00990438927f, 0.009971125983f, 0.01003720704f, 0.01010255609f,
0.01016709674f, 0.01023074798f, 0.01029342785f, 0.01035505254f,
0.01041553635f, 0.01047479361f, 0.01053273678f, 0.01058927551f,
0.01064432319f, 0.01069778763f, 0.01074958127f, 0.01079961471f,
0.01084779948f, 0.01089404803f, 0.01093827467f, 0.0109803956f,
0.01102032885f, 0.01105799619f, 0.01109332126f, 0.0111262314f,
0.01115665678f, 0.01118453499f, 0.01120980456f, 0.01123241056f,
0.01125230361f, 0.01126943901f, 0.01128377859f, 0.01129528973f,
0.01130394638f, 0.01130972803f, 0.01131262258f, 0.01131262258f,
0.01130972803f, 0.01130394638f, 0.01129528973f, 0.01128377859f,
0.01126943901f, 0.01125230361f, 0.01123241056f, 0.01120980456f,
0.01118453499f, 0.01115665678f, 0.0111262314f, 0.01109332126f,
0.01105799619f, 0.01102032885f, 0.0109803956f, 0.01093827467f,
0.01089404803f, 0.01084779948f, 0.01079961471f, 0.01074958127f,
0.01069778763f, 0.01064432319f, 0.01058927551f, 0.01053273678f,
0.01047479361f, 0.01041553635f, 0.01035505254f, 0.01029342785f,
0.01023074798f, 0.01016709674f, 0.01010255609f, 0.01003720704f,
0.009971125983f, 0.00990438927f, 0.00983707048f, 0.009769240394f,
0.009700968862f, 0.009632320143f, 0.009563359432f, 0.009494146332f,
0.009424740449f, 0.009355195798f, 0.009285567328f, 0.009215905331f,
0.009146256372f, 0.00907666795f, 0.009007181972f, 0.008937839419f,
0.008868678473f, 0.008799735457f, 0.008731043898f, 0.008662636392f,
0.00859454181f, 0.008526788093f, 0.008459401317f, 0.008392404765f,
0.008325820789f, 0.008259668946f, 0.008193968795f, 0.008128738031f,
0.008063991554f, 0.007999744266f, 0.007936008275f, 0.007936508395f,
0.008063467219f, 0.008128738031f, 0.008194519207f, 0.008260795847f,
0.008327552117f, 0.008394770324f, 0.008462429978f, 0.008530510589f,
0.008598989807f, 0.008667841554f, 0.008737040684f, 0.008806557395f,
0.008876360953f, 0.008946419694f, 0.009016696364f, 0.009087154642f,
0.009157754481f, 0.009228453971f, 0.009299207479f, 0.009369968437f,
0.009440686554f, 0.009511308745f, 0.009581780061f, 0.00965204183f,
0.009722034447f, 0.009791694582f, 0.009860955179f, 0.009929747321f,
0.009998000227f, 0.01006564032f, 0.01013259124f, 0.01019877382f,
0.01026410609f, 0.01032850612f, 0.01039188914f, 0.01045416761f,
0.01051525306f, 0.01057505608f, 0.01063348539f, 0.01069044974f,
0.01074585691f, 0.01079961471f, 0.01085163094f, 0.0109018134f,
0.01095007174f, 0.01099631656f, 0.01104046032f, 0.0110824164f,
0.01112210099f, 0.01115943585f, 0.01119434182f, 0.01122674625f,
0.01125658024f, 0.01128377859f, 0.01130828168f, 0.01133003552f,
0.01134899072f, 0.01136510447f, 0.01137833856f, 0.01138866507f,
0.0113960579f, 0.01140050031f, 0.01140198205f, 0.01140050031f,
0.0113960579f, 0.01138866507f, 0.01137833856f, 0.01136510447f,
0.01134899072f, 0.01133003552f, 0.01130828168f, 0.01128377859f,
0.01125658024f, 0.01122674625f, 0.01119434182f, 0.01115943585f,
0.01112210099f, 0.0110824164f, 0.01104046032f, 0.01099631656f,
0.01095007174f, 0.0109018134f, 0.01085163094f, 0.01079961471f,
0.01074585691f, 0.01069044974f, 0.01063348539f, 0.01057505608f,
0.01051525306f, 0.01045416761f, 0.01039188914f, 0.01032850612f,
0.01026410609f, 0.01019877382f, 0.01013259124f, 0.01006564032f,
0.009998000227f, 0.009929747321f, 0.009860955179f, 0.009791694582f,
0.009722034447f, 0.00965204183f, 0.009581780061f, 0.009511308745f,
0.009440686554f, 0.009369968437f, 0.009299207479f, 0.009228453971f,
0.009157754481f, 0.009087154642f, 0.009016696364f, 0.008946419694f,
0.008876360953f, 0.008806557395f, 0.008737040684f, 0.008667841554f,
0.008598989807f, 0.008530510589f, 0.008462429978f, 0.008394770324f,
0.008327552117f, 0.008260795847f, 0.008194519207f, 0.008128738031f,
0.008063467219f, 0.007998720743f, 0.007999744266f, 0.007999744266f,
0.008127664216f, 0.008193968795f, 0.008260795847f, 0.008328129537f,
0.008395953104f, 0.008464248851f, 0.008532994427f, 0.008602170274f,
0.008671752177f, 0.008741713129f, 0.008812026121f, 0.008882662281f,
0.008953588083f, 0.00902477093f, 0.009096172638f, 0.009167755023f,
0.009239477105f, 0.009311294183f, 0.009383158758f, 0.009455023333f,
0.009526834823f, 0.00959853828f, 0.009670076892f, 0.009741389193f,
0.009812413715f, 0.009883082472f, 0.009953328408f, 0.01002307981f,
0.01009226125f, 0.01016079728f, 0.01022860687f, 0.01029560994f,
0.01036172081f, 0.01042685378f, 0.01049092133f, 0.01055383217f,
0.01061549596f, 0.01067581866f, 0.01073470619f, 0.01079206541f,
0.01084779948f, 0.0109018134f, 0.01095401309f, 0.01100430358f,
0.01105259173f, 0.01109878626f, 0.01114279591f, 0.01118453499f,
0.0112239169f, 0.01126086153f, 0.01129528973f, 0.01132712793f,
0.01135630626f, 0.01138276048f, 0.01140643191f, 0.01142726559f,
0.01144521404f, 0.01146023627f, 0.01147229597f, 0.01148136612f,
0.0114874253f, 0.01149045862f, 0.01149045862f, 0.0114874253f,
0.01148136612f, 0.01147229597f, 0.01146023627f, 0.01144521404f,
0.01142726559f, 0.01140643191f, 0.01138276048f, 0.01135630626f,
0.01132712793f, 0.01129528973f, 0.01126086153f, 0.0112239169f,
0.01118453499f, 0.01114279591f, 0.01109878626f, 0.01105259173f,
0.01100430358f, 0.01095401309f, 0.0109018134f, 0.01084779948f,
0.01079206541f, 0.01073470619f, 0.01067581866f, 0.01061549596f,
0.01055383217f, 0.01049092133f, 0.01042685378f, 0.01036172081f,
0.01029560994f, 0.01022860687f, 0.01016079728f, 0.01009226125f,
0.01002307981f, 0.009953328408f, 0.009883082472f, 0.009812413715f,
0.009741389193f, 0.009670076892f, 0.00959853828f, 0.009526834823f,
0.009455023333f, 0.009383158758f, 0.009311294183f, 0.009239477105f,
0.009167755023f, 0.009096172638f, 0.00902477093f, 0.008953588083f,
0.008882662281f, 0.008812026121f, 0.008741713129f, 0.008671752177f,
0.008602170274f, 0.008532994427f, 0.008464248851f, 0.008395953104f,
0.008328129537f, 0.008260795847f, 0.008193968795f, 0.008127664216f,
0.008061895147f, 0.008063467219f, 0.008063991554f, 0.008063467219f,
0.008192319423f, 0.008259668946f, 0.008327552117f, 0.008395953104f,
0.008464855142f, 0.008534237742f, 0.008604080416f, 0.008674361743f,
0.008745054714f, 0.008816135116f, 0.008887572214f, 0.008959336206f,
0.009031393565f, 0.009103707969f, 0.009176243097f, 0.009248957038f,
0.009321806021f, 0.009394746274f, 0.009467727505f, 0.009540699422f,
0.009613607079f, 0.009686394595f, 0.009759000503f, 0.009831363335f,
0.0099034179f, 0.009975093417f, 0.01004632004f, 0.01011702232f,
0.0101871239f, 0.01025654469f, 0.01032520272f, 0.01039301138f,
0.010459885f, 0.0105257323f, 0.01059046388f, 0.0106539838f,
0.01071619987f, 0.01077701338f, 0.01083632838f, 0.01089404803f,
0.01095007174f, 0.01100430358f, 0.01105664484f, 0.01110699773f,
0.01115526911f, 0.01120136213f, 0.01124518644f, 0.01128665265f,
0.01132567506f, 0.01136216894f, 0.0113960579f, 0.01142726559f,
0.01145572308f, 0.01148136612f, 0.01150413603f, 0.01152398065f,
0.01154085156f, 0.0115547115f, 0.01156552508f, 0.0115732681f,
0.01157792099f, 0.0115794735f, 0.01157792099f, 0.0115732681f,
0.01156552508f, 0.0115547115f, 0.01154085156f, 0.01152398065f,
0.01150413603f, 0.01148136612f, 0.01145572308f, 0.01142726559f,
0.0113960579f, 0.01136216894f, 0.01132567506f, 0.01128665265f,
0.01124518644f, 0.01120136213f, 0.01115526911f, 0.01110699773f,
0.01105664484f, 0.01100430358f, 0.01095007174f, 0.01089404803f,
0.01083632838f, 0.01077701338f, 0.01071619987f, 0.0106539838f,
0.01059046388f, 0.0105257323f, 0.010459885f, 0.01039301138f,
0.01032520272f, 0.01025654469f, 0.0101871239f, 0.01011702232f,
0.01004632004f, 0.009975093417f, 0.0099034179f, 0.009831363335f,
0.009759000503f, 0.009686394595f, 0.009613607079f, 0.009540699422f,
0.009467727505f, 0.009394746274f, 0.009321806021f, 0.009248957038f,
0.009176243097f, 0.009103707969f, 0.009031393565f, 0.008959336206f,
0.008887572214f, 0.008816135116f, 0.008745054714f, 0.008674361743f,
0.008604080416f, 0.008534237742f, 0.008464855142f, 0.008395953104f,
0.008327552117f, 0.008259668946f, 0.008192319423f, 0.008125517517f,
0.008127664216f, 0.008128738031f, 0.008128738031f, 0.008127664216f,
0.008257416077f, 0.008325820789f, 0.008394770324f, 0.008464248851f,
0.008534237742f, 0.008604717441f, 0.008675667457f, 0.008747061715f,
0.00881887693f, 0.008891084231f, 0.008963654749f, 0.009036554955f,
0.00910975039f, 0.009183204733f, 0.009256878868f, 0.009330729023f,
0.009404712357f, 0.00947877951f, 0.00955288019f, 0.009626962245f,
0.009700968862f, 0.009774839506f, 0.009848512709f, 0.009921924211f,
0.009995004162f, 0.01006768085f, 0.01013988163f, 0.01021152735f,
0.01028253883f, 0.01035283227f, 0.0104223229f, 0.01049092133f,
0.01055853814f, 0.01062507927f, 0.01069044974f, 0.0107545536f,
0.01081729215f, 0.01087856572f, 0.01093827467f, 0.01099631656f,
0.01105259173f, 0.01110699773f, 0.01115943585f, 0.01120980456f,
0.01125800703f, 0.01130394638f, 0.01134752948f, 0.01138866507f,
0.01142726559f, 0.01146324724f, 0.01149653178f, 0.01152704284f,
0.0115547115f, 0.0115794735f, 0.01160127111f, 0.01162005402f,
0.01163577568f, 0.01164839976f, 0.01165789459f, 0.0116642369f,
0.01166741177f, 0.01166741177f, 0.0116642369f, 0.01165789459f,
0.01164839976f, 0.01163577568f, 0.01162005402f, 0.01160127111f,
0.0115794735f, 0.0115547115f, 0.01152704284f, 0.01149653178f,
0.01146324724f, 0.01142726559f, 0.01138866507f, 0.01134752948f,
0.01130394638f, 0.01125800703f, 0.01120980456f, 0.01115943585f,
0.01110699773f, 0.01105259173f, 0.01099631656f, 0.01093827467f,
0.01087856572f, 0.01081729215f, 0.0107545536f, 0.01069044974f,
0.01062507927f, 0.01055853814f, 0.01049092133f, 0.0104223229f,
0.01035283227f, 0.01028253883f, 0.01021152735f, 0.01013988163f,
0.01006768085f, 0.009995004162f, 0.009921924211f, 0.009848512709f,
0.009774839506f, 0.009700968862f, 0.009626962245f, 0.00955288019f,
0.00947877951f, 0.009404712357f, 0.009330729023f, 0.009256878868f,
0.009183204733f, 0.00910975039f, 0.009036554955f, 0.008963654749f,
0.008891084231f, 0.00881887693f, 0.008747061715f, 0.008675667457f,
0.008604717441f, 0.008534237742f, 0.008464248851f, 0.008394770324f,
0.008325820789f, 0.008257416077f, 0.00818957109f, 0.008192319423f,
0.008193968795f, 0.008194519207f, 0.008193968795f, 0.008192319423f,
0.008322936483f, 0.008392404765f, 0.008462429978f, 0.008532994427f,
0.008604080416f, 0.008675667457f, 0.008747731335f, 0.008820248768f,
0.008893193677f, 0.008966536261f, 0.009040246718f, 0.009114289656f,
0.009188630618f, 0.009263231419f, 0.009338049218f, 0.009413041174f,
0.009488161653f, 0.009563359432f, 0.009638582356f, 0.009713775478f,
0.009788879193f, 0.009863832965f, 0.009938570671f, 0.01001302525f,
0.01008712593f, 0.01016079728f, 0.01023396198f, 0.01030653995f,
0.01037844829f, 0.01044960041f, 0.01051990688f, 0.01058927551f,
0.0106576141f, 0.01072482392f, 0.01079080813f, 0.01085546613f,
0.01091869641f, 0.0109803956f, 0.01104046032f, 0.01109878626f,
0.01115526911f, 0.01120980456f, 0.01126228925f, 0.01131262258f,
0.01136070304f, 0.01140643191f, 0.01144971419f, 0.01149045862f,
0.01152857486f, 0.01156397816f, 0.01159659028f, 0.01162633486f,
0.01165314391f, 0.01167695317f, 0.01169770677f, 0.0117153544f,
0.0117298523f, 0.011741166f, 0.01174926758f, 0.0117541356f,
0.01175575983f, 0.0117541356f, 0.01174926758f, 0.011741166f,
0.0117298523f, 0.0117153544f, 0.01169770677f, 0.01167695317f,
0.01165314391f, 0.01162633486f, 0.01159659028f, 0.01156397816f,
0.01152857486f, 0.01149045862f, 0.01144971419f, 0.01140643191f,
0.01136070304f, 0.01131262258f, 0.01126228925f, 0.01120980456f,
0.01115526911f, 0.01109878626f, 0.01104046032f, 0.0109803956f,
0.01091869641f, 0.01085546613f, 0.01079080813f, 0.01072482392f,
0.0106576141f, 0.01058927551f, 0.01051990688f, 0.01044960041f,
0.01037844829f, 0.01030653995f, 0.01023396198f, 0.01016079728f,
0.01008712593f, 0.01001302525f, 0.009938570671f, 0.009863832965f,
0.009788879193f, 0.009713775478f, 0.009638582356f, 0.009563359432f,
0.009488161653f, 0.009413041174f, 0.009338049218f, 0.009263231419f,
0.009188630618f, 0.009114289656f, 0.009040246718f, 0.008966536261f,
0.008893193677f, 0.008820248768f, 0.008747731335f, 0.008675667457f,
0.008604080416f, 0.008532994427f, 0.008462429978f, 0.008392404765f,
0.008322936483f, 0.008254040033f, 0.008257416077f, 0.008259668946f,
0.008260795847f, 0.008260795847f, 0.008259668946f, 0.008257416077f,
0.008388860151f, 0.008459401317f, 0.008530510589f, 0.008602170274f,
0.008674361743f, 0.008747061715f, 0.008820248768f, 0.008893896826f,
0.00896797888f, 0.009042463265f, 0.00911732018f, 0.009192512371f,
0.009268003516f, 0.0093437545f, 0.00941972062f, 0.009495858103f,
0.009572117589f, 0.009648446925f, 0.009724793024f, 0.009801096283f,
0.009877296165f, 0.009953328408f, 0.01002912689f, 0.01010461897f,
0.01017973199f, 0.01025438774f, 0.01032850612f, 0.01040200423f,
0.01047479361f, 0.01054678671f, 0.01061788946f, 0.01068800688f,
0.0107570421f, 0.01082489453f, 0.01089146268f, 0.01095664315f,
0.01102032885f, 0.0110824164f, 0.01114279591f, 0.01120136213f,
0.01125800703f, 0.01131262258f, 0.01136510447f, 0.01141534653f,
0.01146324724f, 0.01150870696f, 0.01155162696f, 0.01159191411f,
0.01162947994f, 0.0116642369f, 0.01169610675f, 0.01172501314f,
0.01175088901f, 0.01177367195f, 0.0117933061f, 0.01180974208f,
0.01182294171f, 0.01183286961f, 0.01183950249f, 0.01184282266f,
0.01184282266f, 0.01183950249f, 0.01183286961f, 0.01182294171f,
0.01180974208f, 0.0117933061f, 0.01177367195f, 0.01175088901f,
0.01172501314f, 0.01169610675f, 0.0116642369f, 0.01162947994f,
0.01159191411f, 0.01155162696f, 0.01150870696f, 0.01146324724f,
0.01141534653f, 0.01136510447f, 0.01131262258f, 0.01125800703f,
0.01120136213f, 0.01114279591f, 0.0110824164f, 0.01102032885f,
0.01095664315f, 0.01089146268f, 0.01082489453f, 0.0107570421f,
0.01068800688f, 0.01061788946f, 0.01054678671f, 0.01047479361f,
0.01040200423f, 0.01032850612f, 0.01025438774f, 0.01017973199f,
0.01010461897f, 0.01002912689f, 0.009953328408f, 0.009877296165f,
0.009801096283f, 0.009724793024f, 0.009648446925f, 0.009572117589f,
0.009495858103f, 0.00941972062f, 0.0093437545f, 0.009268003516f,
0.009192512371f, 0.00911732018f, 0.009042463265f, 0.00896797888f,
0.008893896826f, 0.008820248768f, 0.008747061715f, 0.008674361743f,
0.008602170274f, 0.008530510589f, 0.008459401317f, 0.008388860151f,
0.008318902925f, 0.008322936483f, 0.008325820789f, 0.008327552117f,
0.008328129537f, 0.008327552117f, 0.008325820789f, 0.008322936483f,
0.008455166593f, 0.008526788093f, 0.008598989807f, 0.008671752177f,
0.008745054714f, 0.00881887693f, 0.008893193677f, 0.00896797888f,
0.009043202735f, 0.009118835442f, 0.009194843471f, 0.00927118957f,
0.009347835556f, 0.009424740449f, 0.009501857683f, 0.009579141624f,
0.009656541049f, 0.009734002873f, 0.009811468422f, 0.009888879955f,
0.009966172278f, 0.01004327927f, 0.01012013014f, 0.01019665226f,
0.01027276739f, 0.01034839638f, 0.01042345539f, 0.01049785595f,
0.0105715096f, 0.01064432319f, 0.01071619987f, 0.01078704093f,
0.01085674576f, 0.01092521101f, 0.01099232957f, 0.01105799619f,
0.01112210099f, 0.01118453499f, 0.01124518644f, 0.01130394638f,
0.01136070304f, 0.01141534653f, 0.01146776881f, 0.01151786372f,
0.01156552508f, 0.01161065139f, 0.01165314391f, 0.01169290766f,
0.0117298523f, 0.01176389214f, 0.01179494616f, 0.01182294171f,
0.01184780896f, 0.01186948828f, 0.01188792568f, 0.0119030755f,
0.01191489771f, 0.01192336436f, 0.01192845311f, 0.01193015091f,
0.01192845311f, 0.01192336436f, 0.01191489771f, 0.0119030755f,
0.01188792568f, 0.01186948828f, 0.01184780896f, 0.01182294171f,
0.01179494616f, 0.01176389214f, 0.0117298523f, 0.01169290766f,
0.01165314391f, 0.01161065139f, 0.01156552508f, 0.01151786372f,
0.01146776881f, 0.01141534653f, 0.01136070304f, 0.01130394638f,
0.01124518644f, 0.01118453499f, 0.01112210099f, 0.01105799619f,
0.01099232957f, 0.01092521101f, 0.01085674576f, 0.01078704093f,
0.01071619987f, 0.01064432319f, 0.0105715096f, 0.01049785595f,
0.01042345539f, 0.01034839638f, 0.01027276739f, 0.01019665226f,
0.01012013014f, 0.01004327927f, 0.009966172278f, 0.009888879955f,
0.009811468422f, 0.009734002873f, 0.009656541049f, 0.009579141624f,
0.009501857683f, 0.009424740449f, 0.009347835556f, 0.00927118957f,
0.009194843471f, 0.009118835442f, 0.009043202735f, 0.00896797888f,
0.008893193677f, 0.00881887693f, 0.008745054714f, 0.008671752177f,
0.008598989807f, 0.008526788093f, 0.008455166593f, 0.00838414114f,
0.008388860151f, 0.008392404765f, 0.008394770324f, 0.008395953104f,
0.008395953104f, 0.008394770324f, 0.008392404765f, 0.008388860151f,
0.008521833457f, 0.00859454181f, 0.008667841554f, 0.008741713129f,
0.008816135116f, 0.008891084231f, 0.008966536261f, 0.009042463265f,
0.009118835442f, 0.009195621125f, 0.009272783995f, 0.009350287728f,
0.009428090416f, 0.009506150149f, 0.009584420361f, 0.00966285076f,
0.009741389193f, 0.009819980711f, 0.009898564778f, 0.009977078997f,
0.01005545817f, 0.01013363153f, 0.01021152735f, 0.0102890674f,
0.01036617346f, 0.01044276077f, 0.01051874273f, 0.01059402898f,
0.01066852547f, 0.01074213628f, 0.01081476174f, 0.01088629849f,
0.01095664315f, 0.01102568675f, 0.01109332126f, 0.01115943585f,
0.0112239169f, 0.01128665265f, 0.01134752948f, 0.01140643191f,
0.01146324724f, 0.01151786372f, 0.01157016866f, 0.01162005402f,
0.01166741177f, 0.01171213947f, 0.0117541356f, 0.0117933061f,
0.0118295569f, 0.01186280511f, 0.01189296879f, 0.01191997528f,
0.01194375753f, 0.01196425594f, 0.01198141929f, 0.01199520286f,
0.01200557221f, 0.01201249938f, 0.01201596763f, 0.01201596763f,
0.01201249938f, 0.01200557221f, 0.01199520286f, 0.01198141929f,
0.01196425594f, 0.01194375753f, 0.01191997528f, 0.01189296879f,
0.01186280511f, 0.0118295569f, 0.0117933061f, 0.0117541356f,
0.01171213947f, 0.01166741177f, 0.01162005402f, 0.01157016866f,
0.01151786372f, 0.01146324724f, 0.01140643191f, 0.01134752948f,
0.01128665265f, 0.0112239169f, 0.01115943585f, 0.01109332126f,
0.01102568675f, 0.01095664315f, 0.01088629849f, 0.01081476174f,
0.01074213628f, 0.01066852547f, 0.01059402898f, 0.01051874273f,
0.01044276077f, 0.01036617346f, 0.0102890674f, 0.01021152735f,
0.01013363153f, 0.01005545817f, 0.009977078997f, 0.009898564778f,
0.009819980711f, 0.009741389193f, 0.00966285076f, 0.009584420361f,
0.009506150149f, 0.009428090416f, 0.009350287728f, 0.009272783995f,
0.009195621125f, 0.009118835442f, 0.009042463265f, 0.008966536261f,
0.008891084231f, 0.008816135116f, 0.008741713129f, 0.008667841554f,
0.00859454181f, 0.008521833457f, 0.008449732326f, 0.008455166593f,
0.008459401317f, 0.008462429978f, 0.008464248851f, 0.008464855142f,
0.008464248851f, 0.008462429978f, 0.008459401317f, 0.008455166593f,
0.008588834666f, 0.008662636392f, 0.008737040684f, 0.008812026121f,
0.008887572214f, 0.008963654749f, 0.009040246718f, 0.00911732018f,
0.009194843471f, 0.009272783995f, 0.009351104498f, 0.009429766797f,
0.00950872805f, 0.009587943554f, 0.00966736488f, 0.009746940807f,
0.009826615453f, 0.00990633294f, 0.009986029007f, 0.01006564032f,
0.01014509797f, 0.01022432931f, 0.01030325703f, 0.01038180385f,
0.010459885f, 0.01053741388f, 0.01061430015f, 0.01069044974f,
0.01076576579f, 0.01084014867f, 0.01091349311f, 0.01098569483f,
0.01105664484f, 0.0111262314f, 0.01119434182f, 0.01126086153f,
0.01132567506f, 0.01138866507f, 0.01144971419f, 0.01150870696f,
0.01156552508f, 0.01162005402f, 0.01167218015f, 0.01172179077f,
0.01176877879f, 0.01181303803f, 0.01185446698f, 0.01189296879f,
0.01192845311f, 0.0119608324f, 0.01199002843f, 0.01201596763f,
0.01203858573f, 0.01205782313f, 0.01207363233f, 0.01208597142f,
0.01209480781f, 0.01210011914f, 0.01210189145f, 0.01210011914f,
0.01209480781f, 0.01208597142f, 0.01207363233f, 0.01205782313f,
0.01203858573f, 0.01201596763f, 0.01199002843f, 0.0119608324f,
0.01192845311f, 0.01189296879f, 0.01185446698f, 0.01181303803f,
0.01176877879f, 0.01172179077f, 0.01167218015f, 0.01162005402f,
0.01156552508f, 0.01150870696f, 0.01144971419f, 0.01138866507f,
0.01132567506f, 0.01126086153f, 0.01119434182f, 0.0111262314f,
0.01105664484f, 0.01098569483f, 0.01091349311f, 0.01084014867f,
0.01076576579f, 0.01069044974f, 0.01061430015f, 0.01053741388f,
0.010459885f, 0.01038180385f, 0.01030325703f, 0.01022432931f,
0.01014509797f, 0.01006564032f, 0.009986029007f, 0.00990633294f,
0.009826615453f, 0.009746940807f, 0.00966736488f, 0.009587943554f,
0.00950872805f, 0.009429766797f, 0.009351104498f, 0.009272783995f,
0.009194843471f, 0.00911732018f, 0.009040246718f, 0.008963654749f,
0.008887572214f, 0.008812026121f, 0.008737040684f, 0.008662636392f,
0.008588834666f, 0.008515651338f, 0.008521833457f, 0.008526788093f,
0.008530510589f, 0.008532994427f, 0.008534237742f, 0.008534237742f,
0.008532994427f, 0.008530510589f, 0.008526788093f, 0.008521833457f,
0.008656143211f, 0.008731043898f, 0.008806557395f, 0.008882662281f,
0.008959336206f, 0.009036554955f, 0.009114289656f, 0.009192512371f,
0.00927118957f, 0.009350287728f, 0.009429766797f, 0.00950958766f,
0.009589706548f, 0.009670076892f, 0.009750646539f, 0.009831363335f,
0.00991217047f, 0.009993007407f, 0.01007380895f, 0.01015450899f,
0.01023503393f, 0.01031531021f, 0.01039525773f, 0.01047479361f,
0.01055383217f, 0.01063228305f, 0.01071005128f, 0.01078704093f,
0.01086314954f, 0.01093827467f, 0.01101230737f, 0.01108513959f,
0.01115665678f, 0.01122674625f, 0.01129528973f, 0.01136216894f,
0.01142726559f, 0.01149045862f, 0.01155162696f, 0.01161065139f,
0.01166741177f, 0.01172179077f, 0.01177367195f, 0.01182294171f,
0.01186948828f, 0.01191320643f, 0.0119539937f, 0.01199175231f,
0.01202639099f, 0.01205782313f, 0.01208597142f, 0.01211076323f,
0.01213213522f, 0.01215003151f, 0.01216440555f, 0.01217522006f,
0.0121824462f, 0.01218606345f, 0.01218606345f, 0.0121824462f,
0.01217522006f, 0.01216440555f, 0.01215003151f, 0.01213213522f,
0.01211076323f, 0.01208597142f, 0.01205782313f, 0.01202639099f,
0.01199175231f, 0.0119539937f, 0.01191320643f, 0.01186948828f,
0.01182294171f, 0.01177367195f, 0.01172179077f, 0.01166741177f,
0.01161065139f, 0.01155162696f, 0.01149045862f, 0.01142726559f,
0.01136216894f, 0.01129528973f, 0.01122674625f, 0.01115665678f,
0.01108513959f, 0.01101230737f, 0.01093827467f, 0.01086314954f,
0.01078704093f, 0.01071005128f, 0.01063228305f, 0.01055383217f,
0.01047479361f, 0.01039525773f, 0.01031531021f, 0.01023503393f,
0.01015450899f, 0.01007380895f, 0.009993007407f, 0.00991217047f,
0.009831363335f, 0.009750646539f, 0.009670076892f, 0.009589706548f,
0.00950958766f, 0.009429766797f, 0.009350287728f, 0.00927118957f,
0.009192512371f, 0.009114289656f, 0.009036554955f, 0.008959336206f,
0.008882662281f, 0.008806557395f, 0.008731043898f, 0.008656143211f,
0.008581873029f, 0.008588834666f, 0.00859454181f, 0.008598989807f,
0.008602170274f, 0.008604080416f, 0.008604717441f, 0.008604080416f,
0.008602170274f, 0.008598989807f, 0.00859454181f, 0.008588834666f,
0.008723732084f, 0.008799735457f, 0.008876360953f, 0.008953588083f,
0.009031393565f, 0.00910975039f, 0.009188630618f, 0.009268003516f,
0.009347835556f, 0.009428090416f, 0.00950872805f, 0.009589706548f,
0.009670981206f, 0.009752500802f, 0.009834215976f, 0.009916068986f,
0.009998000227f, 0.01007994823f, 0.01016184594f, 0.01024362259f,
0.01032520272f, 0.01040650904f, 0.0104874596f, 0.01056796685f,
0.01064794231f, 0.01072729193f, 0.0108059179f, 0.01088371873f,
0.01096059103f, 0.0110364249f, 0.01111111138f, 0.01118453499f,
0.01125658024f, 0.01132712793f, 0.0113960579f, 0.01146324724f,
0.01152857486f, 0.01159191411f, 0.01165314391f, 0.01171213947f,
0.01176877879f, 0.01182294171f, 0.01187450811f, 0.01192336436f,
0.01196939778f, 0.01201249938f, 0.01205256768f, 0.01208950393f,
0.01212321594f, 0.0121536199f, 0.0121806385f, 0.01220420003f,
0.01222424489f, 0.01224071812f, 0.01225357689f, 0.01226278674f,
0.01226832252f, 0.01227016933f, 0.01226832252f, 0.01226278674f,
0.01225357689f, 0.01224071812f, 0.01222424489f, 0.01220420003f,
0.0121806385f, 0.0121536199f, 0.01212321594f, 0.01208950393f,
0.01205256768f, 0.01201249938f, 0.01196939778f, 0.01192336436f,
0.01187450811f, 0.01182294171f, 0.01176877879f, 0.01171213947f,
0.01165314391f, 0.01159191411f, 0.01152857486f, 0.01146324724f,
0.0113960579f, 0.01132712793f, 0.01125658024f, 0.01118453499f,
0.01111111138f, 0.0110364249f, 0.01096059103f, 0.01088371873f,
0.0108059179f, 0.01072729193f, 0.01064794231f, 0.01056796685f,
0.0104874596f, 0.01040650904f, 0.01032520272f, 0.01024362259f,
0.01016184594f, 0.01007994823f, 0.009998000227f, 0.009916068986f,
0.009834215976f, 0.009752500802f, 0.009670981206f, 0.009589706548f,
0.00950872805f, 0.009428090416f, 0.009347835556f, 0.009268003516f,
0.009188630618f, 0.00910975039f, 0.009031393565f, 0.008953588083f,
0.008876360953f, 0.008799735457f, 0.008723732084f, 0.008648370393f,
0.008656143211f, 0.008662636392f, 0.008667841554f, 0.008671752177f,
0.008674361743f, 0.008675667457f, 0.008675667457f, 0.008674361743f,
0.008671752177f, 0.008667841554f, 0.008662636392f, 0.008656143211f,
0.00879156962f, 0.008868678473f, 0.008946419694f, 0.00902477093f,
0.009103707969f, 0.009183204733f, 0.009263231419f, 0.0093437545f,
0.009424740449f, 0.009506150149f, 0.009587943554f, 0.009670076892f,
0.009752500802f, 0.009835166857f, 0.009918019176f, 0.01000100002f,
0.01008404791f, 0.01016709674f, 0.01025007758f, 0.01033291686f,
0.01041553635f, 0.01049785595f, 0.01057978906f, 0.01066124719f,
0.01074213628f, 0.01082235854f, 0.0109018134f, 0.0109803956f,
0.01105799619f, 0.01113450434f, 0.01120980456f, 0.01128377859f,
0.01135630626f, 0.01142726559f, 0.01149653178f, 0.01156397816f,
0.01162947994f, 0.01169290766f, 0.0117541356f, 0.01181303803f,
0.01186948828f, 0.01192336436f, 0.0119745452f, 0.01202291343f,
0.01206835546f, 0.01211076323f, 0.01215003151f, 0.01218606345f,
0.01221876871f, 0.0122480616f, 0.01227386575f, 0.01229611505f,
0.01231474802f, 0.01232971624f, 0.01234097779f, 0.01234850287f,
0.01235227007f, 0.01235227007f, 0.01234850287f, 0.01234097779f,
0.01232971624f, 0.01231474802f, 0.01229611505f, 0.01227386575f,
0.0122480616f, 0.01221876871f, 0.01218606345f, 0.01215003151f,
0.01211076323f, 0.01206835546f, 0.01202291343f, 0.0119745452f,
0.01192336436f, 0.01186948828f, 0.01181303803f, 0.0117541356f,
0.01169290766f, 0.01162947994f, 0.01156397816f, 0.01149653178f,
0.01142726559f, 0.01135630626f, 0.01128377859f, 0.01120980456f,
0.01113450434f, 0.01105799619f, 0.0109803956f, 0.0109018134f,
0.01082235854f, 0.01074213628f, 0.01066124719f, 0.01057978906f,
0.01049785595f, 0.01041553635f, 0.01033291686f, 0.01025007758f,
0.01016709674f, 0.01008404791f, 0.01000100002f, 0.009918019176f,
0.009835166857f, 0.009752500802f, 0.009670076892f, 0.009587943554f,
0.009506150149f, 0.009424740449f, 0.0093437545f, 0.009263231419f,
0.009183204733f, 0.009103707969f, 0.00902477093f, 0.008946419694f,
0.008868678473f, 0.00879156962f, 0.008715113625f, 0.008723732084f,
0.008731043898f, 0.008737040684f, 0.008741713129f, 0.008745054714f,
0.008747061715f, 0.008747731335f, 0.008747061715f, 0.008745054714f,
0.008741713129f, 0.008737040684f, 0.008731043898f, 0.008723732084f,
0.008859624155f, 0.008937839419f, 0.009016696364f, 0.009096172638f,
0.009176243097f, 0.009256878868f, 0.009338049218f, 0.00941972062f,
0.009501857683f, 0.009584420361f, 0.00966736488f, 0.009750646539f,
0.009834215976f, 0.009918019176f, 0.01000200026f, 0.01008609962f,
0.01017025113f, 0.01025438774f, 0.01033843681f, 0.0104223229f,
0.01050596405f, 0.01058927551f, 0.01067216974f, 0.0107545536f,
0.01083632838f, 0.01091739535f, 0.01099764649f, 0.01107697561f,
0.01115526911f, 0.01123241056f, 0.01130828168f, 0.01138276048f,
0.01145572308f, 0.01152704284f, 0.01159659028f, 0.0116642369f,
0.0117298523f, 0.0117933061f, 0.01185446698f, 0.01191320643f,
0.01196939778f, 0.01202291343f, 0.01207363233f, 0.01212143432f,
0.01216620672f, 0.01220783778f, 0.0122462241f, 0.01228126884f,
0.01231288072f, 0.01234097779f, 0.01236548461f, 0.01238633506f,
0.01240347326f, 0.01241685264f, 0.01242643595f, 0.01243219618f,
0.01243411843f, 0.01243219618f, 0.01242643595f, 0.01241685264f,
0.01240347326f, 0.01238633506f, 0.01236548461f, 0.01234097779f,
0.01231288072f, 0.01228126884f, 0.0122462241f, 0.01220783778f,
0.01216620672f, 0.01212143432f, 0.01207363233f, 0.01202291343f,
0.01196939778f, 0.01191320643f, 0.01185446698f, 0.0117933061f,
0.0117298523f, 0.0116642369f, 0.01159659028f, 0.01152704284f,
0.01145572308f, 0.01138276048f, 0.01130828168f, 0.01123241056f,
0.01115526911f, 0.01107697561f, 0.01099764649f, 0.01091739535f,
0.01083632838f, 0.0107545536f, 0.01067216974f, 0.01058927551f,
0.01050596405f, 0.0104223229f, 0.01033843681f, 0.01025438774f,
0.01017025113f, 0.01008609962f, 0.01000200026f, 0.009918019176f,
0.009834215976f, 0.009750646539f, 0.00966736488f, 0.009584420361f,
0.009501857683f, 0.00941972062f, 0.009338049218f, 0.009256878868f,
0.009176243097f, 0.009096172638f, 0.009016696364f, 0.008937839419f,
0.008859624155f, 0.008782071993f, 0.00879156962f, 0.008799735457f,
0.008806557395f, 0.008812026121f, 0.008816135116f, 0.00881887693f,
0.008820248768f, 0.008820248768f, 0.00881887693f, 0.008816135116f,
0.008812026121f, 0.008806557395f, 0.008799735457f, 0.00879156962f,
0.008927859366f, 0.009007181972f, 0.009087154642f, 0.009167755023f,
0.009248957038f, 0.009330729023f, 0.009413041174f, 0.009495858103f,
0.009579141624f, 0.00966285076f, 0.009746940807f, 0.009831363335f,
0.009916068986f, 0.01000100002f, 0.01008609962f, 0.01017130353f,
0.01025654469f, 0.01034175418f, 0.01042685378f, 0.01051176619f,
0.01059640758f, 0.01068068855f, 0.01076451875f, 0.01084779948f,
0.01093043014f, 0.01101230737f, 0.01109332126f, 0.01117335912f,
0.01125230361f, 0.01133003552f, 0.01140643191f, 0.01148136612f,
0.0115547115f, 0.01162633486f, 0.01169610675f, 0.01176389214f,
0.0118295569f, 0.01189296879f, 0.0119539937f, 0.01201249938f,
0.01206835546f, 0.01212143432f, 0.01217161212f, 0.01221876871f,
0.01226278674f, 0.01230355818f, 0.01234097779f, 0.01237494871f,
0.01240538247f, 0.01243219618f, 0.01245531905f, 0.01247468684f,
0.01249024551f, 0.01250195317f, 0.01250977721f, 0.01251369435f,
0.01251369435f, 0.01250977721f, 0.01250195317f, 0.01249024551f,
0.01247468684f, 0.01245531905f, 0.01243219618f, 0.01240538247f,
0.01237494871f, 0.01234097779f, 0.01230355818f, 0.01226278674f,
0.01221876871f, 0.01217161212f, 0.01212143432f, 0.01206835546f,
0.01201249938f, 0.0119539937f, 0.01189296879f, 0.0118295569f,
0.01176389214f, 0.01169610675f, 0.01162633486f, 0.0115547115f,
0.01148136612f, 0.01140643191f, 0.01133003552f, 0.01125230361f,
0.01117335912f, 0.01109332126f, 0.01101230737f, 0.01093043014f,
0.01084779948f, 0.01076451875f, 0.01068068855f, 0.01059640758f,
0.01051176619f, 0.01042685378f, 0.01034175418f, 0.01025654469f,
0.01017130353f, 0.01008609962f, 0.01000100002f, 0.009916068986f,
0.009831363335f, 0.009746940807f, 0.00966285076f, 0.009579141624f,
0.009495858103f, 0.009413041174f, 0.009330729023f, 0.009248957038f,
0.009167755023f, 0.009087154642f, 0.009007181972f, 0.008927859366f,
0.008849211037f, 0.008859624155f, 0.008868678473f, 0.008876360953f,
0.008882662281f, 0.008887572214f, 0.008891084231f, 0.008893193677f,
0.008893896826f, 0.008893193677f, 0.008891084231f, 0.008887572214f,
0.008882662281f, 0.008876360953f, 0.008868678473f, 0.008859624155f,
0.008996240795f, 0.00907666795f, 0.009157754481f, 0.009239477105f,
0.009321806021f, 0.009404712357f, 0.009488161653f, 0.009572117589f,
0.009656541049f, 0.009741389193f, 0.009826615453f, 0.00991217047f,
0.009998000227f, 0.01008404791f, 0.01017025113f, 0.01025654469f,
0.01034285966f, 0.01042912249f, 0.01051525306f, 0.01060117036f,
0.01068678591f, 0.01077201031f, 0.01085674576f, 0.01094089262f,
0.01102434658f, 0.01110699773f, 0.01118873432f, 0.01126943901f,
0.01134899072f, 0.01142726559f, 0.01150413603f, 0.0115794735f,
0.01165314391f, 0.01172501314f, 0.01179494616f, 0.01186280511f,
0.01192845311f, 0.01199175231f, 0.01205256768f, 0.01211076323f,
0.01216620672f, 0.01221876871f, 0.01226832252f, 0.01231474802f,
0.01235792786f, 0.01239775307f, 0.01243411843f, 0.01246692892f,
0.01249609515f, 0.01252153981f, 0.0125431912f, 0.01256099064f,
0.01257488597f, 0.01258483995f, 0.0125908237f, 0.01259282045f,
0.0125908237f, 0.01258483995f, 0.01257488597f, 0.01256099064f,
0.0125431912f, 0.01252153981f, 0.01249609515f, 0.01246692892f,
0.01243411843f, 0.01239775307f, 0.01235792786f, 0.01231474802f,
0.01226832252f, 0.01221876871f, 0.01216620672f, 0.01211076323f,
0.01205256768f, 0.01199175231f, 0.01192845311f, 0.01186280511f,
0.01179494616f, 0.01172501314f, 0.01165314391f, 0.0115794735f,
0.01150413603f, 0.01142726559f, 0.01134899072f, 0.01126943901f,
0.01118873432f, 0.01110699773f, 0.01102434658f, 0.01094089262f,
0.01085674576f, 0.01077201031f, 0.01068678591f, 0.01060117036f,
0.01051525306f, 0.01042912249f, 0.01034285966f, 0.01025654469f,
0.01017025113f, 0.01008404791f, 0.009998000227f, 0.00991217047f,
0.009826615453f, 0.009741389193f, 0.009656541049f, 0.009572117589f,
0.009488161653f, 0.009404712357f, 0.009321806021f, 0.009239477105f,
0.009157754481f, 0.00907666795f, 0.008996240795f, 0.008916495368f,
0.008927859366f, 0.008937839419f, 0.008946419694f, 0.008953588083f,
0.008959336206f, 0.008963654749f, 0.008966536261f, 0.00896797888f,
0.00896797888f, 0.008966536261f, 0.008963654749f, 0.008959336206f,
0.008953588083f, 0.008946419694f, 0.008937839419f, 0.008927859366f,
0.009064726532f, 0.009146256372f, 0.009228453971f, 0.009311294183f,
0.009394746274f, 0.00947877951f, 0.009563359432f, 0.009648446925f,
0.009734002873f, 0.009819980711f, 0.00990633294f, 0.009993007407f,
0.01007994823f, 0.01016709674f, 0.01025438774f, 0.01034175418f,
0.01042912249f, 0.01051641535f, 0.01060355362f, 0.01069044974f,
0.01077701338f, 0.01086314954f, 0.0109487595f, 0.01103373803f,
0.01111797616f, 0.01120136213f, 0.01128377859f, 0.01136510447f,
0.01144521404f, 0.01152398065f, 0.01160127111f, 0.01167695317f,
0.01175088901f, 0.01182294171f, 0.01189296879f, 0.0119608324f,
0.01202639099f, 0.01208950393f, 0.01215003151f, 0.01220783778f,
0.01226278674f, 0.01231474802f, 0.01236359403f, 0.01240920182f,
0.01245145593f, 0.01249024551f, 0.01252546813f, 0.01255702879f,
0.01258483995f, 0.01260882709f, 0.01262892038f, 0.01264506485f,
0.01265721396f, 0.0126653323f, 0.01266939752f, 0.01266939752f,
0.0126653323f, 0.01265721396f, 0.01264506485f, 0.01262892038f,
0.01260882709f, 0.01258483995f, 0.01255702879f, 0.01252546813f,
0.01249024551f, 0.01245145593f, 0.01240920182f, 0.01236359403f,
0.01231474802f, 0.01226278674f, 0.01220783778f, 0.01215003151f,
0.01208950393f, 0.01202639099f, 0.0119608324f, 0.01189296879f,
0.01182294171f, 0.01175088901f, 0.01167695317f, 0.01160127111f,
0.01152398065f, 0.01144521404f, 0.01136510447f, 0.01128377859f,
0.01120136213f, 0.01111797616f, 0.01103373803f, 0.0109487595f,
0.01086314954f, 0.01077701338f, 0.01069044974f, 0.01060355362f,
0.01051641535f, 0.01042912249f, 0.01034175418f, 0.01025438774f,
0.01016709674f, 0.01007994823f, 0.009993007407f, 0.00990633294f,
0.009819980711f, 0.009734002873f, 0.009648446925f, 0.009563359432f,
0.00947877951f, 0.009394746274f, 0.009311294183f, 0.009228453971f,
0.009146256372f, 0.009064726532f, 0.008983888663f, 0.008996240795f,
0.009007181972f, 0.009016696364f, 0.00902477093f, 0.009031393565f,
0.009036554955f, 0.009040246718f, 0.009042463265f, 0.009043202735f,
0.009042463265f, 0.009040246718f, 0.009036554955f, 0.009031393565f,
0.00902477093f, 0.009016696364f, 0.009007181972f, 0.008996240795f,
0.009133277461f, 0.009215905331f, 0.009299207479f, 0.009383158758f,
0.009467727505f, 0.00955288019f, 0.009638582356f, 0.009724793024f,
0.009811468422f, 0.009898564778f, 0.009986029007f, 0.01007380895f,
0.01016184594f, 0.01025007758f, 0.01033843681f, 0.01042685378f,
0.01051525306f, 0.01060355362f, 0.01069167163f, 0.0107795177f,
0.01086699776f, 0.01095401309f, 0.01104046032f, 0.0111262314f,
0.01121121366f, 0.01129528973f, 0.01137833856f, 0.01146023627f,
0.01154085156f, 0.01162005402f, 0.01169770677f, 0.01177367195f,
0.01184780896f, 0.01191997528f, 0.01199002843f, 0.01205782313f,
0.01212321594f, 0.01218606345f, 0.0122462241f, 0.01230355818f,
0.01235792786f, 0.01240920182f, 0.01245725155f, 0.01250195317f,
0.0125431912f, 0.01258085575f, 0.01261484437f, 0.01264506485f,
0.01267143246f, 0.01269387174f, 0.01271232124f, 0.01272672601f,
0.01273704506f, 0.0127432486f, 0.01274531893f, 0.0127432486f,
0.01273704506f, 0.01272672601f, 0.01271232124f, 0.01269387174f,
0.01267143246f, 0.01264506485f, 0.01261484437f, 0.01258085575f,
0.0125431912f, 0.01250195317f, 0.01245725155f, 0.01240920182f,
0.01235792786f, 0.01230355818f, 0.0122462241f, 0.01218606345f,
0.01212321594f, 0.01205782313f, 0.01199002843f, 0.01191997528f,
0.01184780896f, 0.01177367195f, 0.01169770677f, 0.01162005402f,
0.01154085156f, 0.01146023627f, 0.01137833856f, 0.01129528973f,
0.01121121366f, 0.0111262314f, 0.01104046032f, 0.01095401309f,
0.01086699776f, 0.0107795177f, 0.01069167163f, 0.01060355362f,
0.01051525306f, 0.01042685378f, 0.01033843681f, 0.01025007758f,
0.01016184594f, 0.01007380895f, 0.009986029007f, 0.009898564778f,
0.009811468422f, 0.009724793024f, 0.009638582356f, 0.00955288019f,
0.009467727505f, 0.009383158758f, 0.009299207479f, 0.009215905331f,
0.009133277461f, 0.009051349014f, 0.009064726532f, 0.00907666795f,
0.009087154642f, 0.009096172638f, 0.009103707969f, 0.00910975039f,
0.009114289656f, 0.00911732018f, 0.009118835442f, 0.009118835442f,
0.00911732018f, 0.009114289656f, 0.00910975039f, 0.009103707969f,
0.009096172638f, 0.009087154642f, 0.00907666795f, 0.009064726532f,
0.009201847948f, 0.009285567328f, 0.009369968437f, 0.009455023333f,
0.009540699422f, 0.009626962245f, 0.009713775478f, 0.009801096283f,
0.009888879955f, 0.009977078997f, 0.01006564032f, 0.01015450899f,
0.01024362259f, 0.01033291686f, 0.0104223229f, 0.01051176619f,
0.01060117036f, 0.01069044974f, 0.0107795177f, 0.01086828113f,
0.01095664315f, 0.01104449946f, 0.01113174483f, 0.01121826563f,
0.01130394638f, 0.01138866507f, 0.01147229597f, 0.0115547115f,
0.01163577568f, 0.0117153544f, 0.0117933061f, 0.01186948828f,
0.01194375753f, 0.01201596763f, 0.01208597142f, 0.0121536199f,
0.01221876871f, 0.01228126884f, 0.01234097779f, 0.01239775307f,
0.01245145593f, 0.01250195317f, 0.01254911628f, 0.01259282045f,
0.01263295114f, 0.01266939752f, 0.01270206179f, 0.01273085084f,
0.01275568269f, 0.01277648844f, 0.01279320568f, 0.01280578785f,
0.01281419583f, 0.01281840634f, 0.01281840634f, 0.01281419583f,
0.01280578785f, 0.01279320568f, 0.01277648844f, 0.01275568269f,
0.01273085084f, 0.01270206179f, 0.01266939752f, 0.01263295114f,
0.01259282045f, 0.01254911628f, 0.01250195317f, 0.01245145593f,
0.01239775307f, 0.01234097779f, 0.01228126884f, 0.01221876871f,
0.0121536199f, 0.01208597142f, 0.01201596763f, 0.01194375753f,
0.01186948828f, 0.0117933061f, 0.0117153544f, 0.01163577568f,
0.0115547115f, 0.01147229597f, 0.01138866507f, 0.01130394638f,
0.01121826563f, 0.01113174483f, 0.01104449946f, 0.01095664315f,
0.01086828113f, 0.0107795177f, 0.01069044974f, 0.01060117036f,
0.01051176619f, 0.0104223229f, 0.01033291686f, 0.01024362259f,
0.01015450899f, 0.01006564032f, 0.009977078997f, 0.009888879955f,
0.009801096283f, 0.009713775478f, 0.009626962245f, 0.009540699422f,
0.009455023333f, 0.009369968437f, 0.009285567328f, 0.009201847948f,
0.009118835442f, 0.009133277461f, 0.009146256372f, 0.009157754481f,
0.009167755023f, 0.009176243097f, 0.009183204733f, 0.009188630618f,
0.009192512371f, 0.009194843471f, 0.009195621125f, 0.009194843471f,
0.009192512371f, 0.009188630618f, 0.009183204733f, 0.009176243097f,
0.009167755023f, 0.009157754481f, 0.009146256372f, 0.009133277461f,
0.00927039329f, 0.009355195798f, 0.009440686554f, 0.009526834823f,
0.009613607079f, 0.009700968862f, 0.009788879193f, 0.009877296165f,
0.009966172278f, 0.01005545817f, 0.01014509797f, 0.01023503393f,
0.01032520272f, 0.01041553635f, 0.01050596405f, 0.01059640758f,
0.01068678591f, 0.01077701338f, 0.01086699776f, 0.01095664315f,
0.01104584709f, 0.01113450434f, 0.01122250315f, 0.01130972803f,
0.0113960579f, 0.01148136612f, 0.01156552508f, 0.01164839976f,
0.0117298523f, 0.01180974208f, 0.01188792568f, 0.01196425594f,
0.01203858573f, 0.01211076323f, 0.0121806385f, 0.0122480616f,
0.01231288072f, 0.01237494871f, 0.01243411843f, 0.01249024551f,
0.0125431912f, 0.01259282045f, 0.01263900381f, 0.0126816174f,
0.01272054669f, 0.01275568269f, 0.0127869295f, 0.01281419583f,
0.01283740439f, 0.01285648718f, 0.01287138835f, 0.01288206317f,
0.01288848184f, 0.01289062295f, 0.01288848184f, 0.01288206317f,
0.01287138835f, 0.01285648718f, 0.01283740439f, 0.01281419583f,
0.0127869295f, 0.01275568269f, 0.01272054669f, 0.0126816174f,
0.01263900381f, 0.01259282045f, 0.0125431912f, 0.01249024551f,
0.01243411843f, 0.01237494871f, 0.01231288072f, 0.0122480616f,
0.0121806385f, 0.01211076323f, 0.01203858573f, 0.01196425594f,
0.01188792568f, 0.01180974208f, 0.0117298523f, 0.01164839976f,
0.01156552508f, 0.01148136612f, 0.0113960579f, 0.01130972803f,
0.01122250315f, 0.01113450434f, 0.01104584709f, 0.01095664315f,
0.01086699776f, 0.01077701338f, 0.01068678591f, 0.01059640758f,
0.01050596405f, 0.01041553635f, 0.01032520272f, 0.01023503393f,
0.01014509797f, 0.01005545817f, 0.009966172278f, 0.009877296165f,
0.009788879193f, 0.009700968862f, 0.009613607079f, 0.009526834823f,
0.009440686554f, 0.009355195798f, 0.00927039329f, 0.009186304174f,
0.009201847948f, 0.009215905331f, 0.009228453971f, 0.009239477105f,
0.009248957038f, 0.009256878868f, 0.009263231419f, 0.009268003516f,
0.00927118957f, 0.009272783995f, 0.009272783995f, 0.00927118957f,
0.009268003516f, 0.009263231419f, 0.009256878868f, 0.009248957038f,
0.009239477105f, 0.009228453971f, 0.009215905331f, 0.009201847948f,
0.009338863194f, 0.009424740449f, 0.009511308745f, 0.00959853828f,
0.009686394595f, 0.009774839506f, 0.009863832965f, 0.009953328408f,
0.01004327927f, 0.01013363153f, 0.01022432931f, 0.01031531021f,
0.01040650904f, 0.01049785595f, 0.01058927551f, 0.01068068855f,
0.01077201031f, 0.01086314954f, 0.01095401309f, 0.01104449946f,
0.01113450434f, 0.0112239169f, 0.01131262258f, 0.01140050031f,
0.0114874253f, 0.0115732681f, 0.01165789459f, 0.011741166f,
0.01182294171f, 0.0119030755f, 0.01198141929f, 0.01205782313f,
0.01213213522f, 0.01220420003f, 0.01227386575f, 0.01234097779f,
0.01240538247f, 0.01246692892f, 0.01252546813f, 0.01258085575f,
0.01263295114f, 0.0126816174f, 0.01272672601f, 0.01276815403f,
0.01280578785f, 0.01283952035f, 0.01286925655f, 0.0128949089f,
0.01291640475f, 0.01293367799f, 0.01294667926f, 0.01295536757f,
0.0129597187f, 0.0129597187f, 0.01295536757f, 0.01294667926f,
0.01293367799f, 0.01291640475f, 0.0128949089f, 0.01286925655f,
0.01283952035f, 0.01280578785f, 0.01276815403f, 0.01272672601f,
0.0126816174f, 0.01263295114f, 0.01258085575f, 0.01252546813f,
0.01246692892f, 0.01240538247f, 0.01234097779f, 0.01227386575f,
0.01220420003f, 0.01213213522f, 0.01205782313f, 0.01198141929f,
0.0119030755f, 0.01182294171f, 0.011741166f, 0.01165789459f,
0.0115732681f, 0.0114874253f, 0.01140050031f, 0.01131262258f,
0.0112239169f, 0.01113450434f, 0.01104449946f, 0.01095401309f,
0.01086314954f, 0.01077201031f, 0.01068068855f, 0.01058927551f,
0.01049785595f, 0.01040650904f, 0.01031531021f, 0.01022432931f,
0.01013363153f, 0.01004327927f, 0.009953328408f, 0.009863832965f,
0.009774839506f, 0.009686394595f, 0.00959853828f, 0.009511308745f,
0.009424740449f, 0.009338863194f, 0.009253707714f, 0.00927039329f,
0.009285567328f, 0.009299207479f, 0.009311294183f, 0.009321806021f,
0.009330729023f, 0.009338049218f, 0.0093437545f, 0.009347835556f,
0.009350287728f, 0.009351104498f, 0.009350287728f, 0.009347835556f,
0.0093437545f, 0.009338049218f, 0.009330729023f, 0.009321806021f,
0.009311294183f, 0.009299207479f, 0.009285567328f, 0.00927039329f,
0.009407208301f, 0.009494146332f, 0.009581780061f, 0.009670076892f,
0.009759000503f, 0.009848512709f, 0.009938570671f, 0.01002912689f,
0.01012013014f, 0.01021152735f, 0.01030325703f, 0.01039525773f,
0.0104874596f, 0.01057978906f, 0.01067216974f, 0.01076451875f,
0.01085674576f, 0.0109487595f, 0.01104046032f, 0.01113174483f,
0.01122250315f, 0.01131262258f, 0.01140198205f, 0.01149045862f,
0.01157792099f, 0.0116642369f, 0.01174926758f, 0.01183286961f,
0.01191489771f, 0.01199520286f, 0.01207363233f, 0.01215003151f,
0.01222424489f, 0.01229611505f, 0.01236548461f, 0.01243219618f,
0.01249609515f, 0.01255702879f, 0.01261484437f, 0.01266939752f,
0.01272054669f, 0.01276815403f, 0.01281209197f, 0.01285223942f,
0.01288848184f, 0.01292071585f, 0.01294884924f, 0.0129727982f,
0.01299249288f, 0.01300787181f, 0.01301889122f, 0.01302551571f,
0.01302772667f, 0.01302551571f, 0.01301889122f, 0.01300787181f,
0.01299249288f, 0.0129727982f, 0.01294884924f, 0.01292071585f,
0.01288848184f, 0.01285223942f, 0.01281209197f, 0.01276815403f,
0.01272054669f, 0.01266939752f, 0.01261484437f, 0.01255702879f,
0.01249609515f, 0.01243219618f, 0.01236548461f, 0.01229611505f,
0.01222424489f, 0.01215003151f, 0.01207363233f, 0.01199520286f,
0.01191489771f, 0.01183286961f, 0.01174926758f, 0.0116642369f,
0.01157792099f, 0.01149045862f, 0.01140198205f, 0.01131262258f,
0.01122250315f, 0.01113174483f, 0.01104046032f, 0.0109487595f,
0.01085674576f, 0.01076451875f, 0.01067216974f, 0.01057978906f,
0.0104874596f, 0.01039525773f, 0.01030325703f, 0.01021152735f,
0.01012013014f, 0.01002912689f, 0.009938570671f, 0.009848512709f,
0.009759000503f, 0.009670076892f, 0.009581780061f, 0.009494146332f,
0.009407208301f, 0.009320996702f, 0.009338863194f, 0.009355195798f,
0.009369968437f, 0.009383158758f, 0.009394746274f, 0.009404712357f,
0.009413041174f, 0.00941972062f, 0.009424740449f, 0.009428090416f,
0.009429766797f, 0.009429766797f, 0.009428090416f, 0.009424740449f,
0.00941972062f, 0.009413041174f, 0.009404712357f, 0.009394746274f,
0.009383158758f, 0.009369968437f, 0.009355195798f, 0.009338863194f,
0.009475374594f, 0.009563359432f, 0.00965204183f, 0.009741389193f,
0.009831363335f, 0.009921924211f, 0.01001302525f, 0.01010461897f,
0.01019665226f, 0.0102890674f, 0.01038180385f, 0.01047479361f,
0.01056796685f, 0.01066124719f, 0.0107545536f, 0.01084779948f,
0.01094089262f, 0.01103373803f, 0.0111262314f, 0.01121826563f,
0.01130972803f, 0.01140050031f, 0.01149045862f, 0.0115794735f,
0.01166741177f, 0.0117541356f, 0.01183950249f, 0.01192336436f,
0.01200557221f, 0.01208597142f, 0.01216440555f, 0.01224071812f,
0.01231474802f, 0.01238633506f, 0.01245531905f, 0.01252153981f,
0.01258483995f, 0.01264506485f, 0.01270206179f, 0.01275568269f,
0.01280578785f, 0.01285223942f, 0.0128949089f, 0.01293367799f,
0.01296843402f, 0.01299907733f, 0.01302551571f, 0.01304767188f,
0.01306547876f, 0.01307888143f, 0.01308783889f, 0.01309232507f,
0.01309232507f, 0.01308783889f, 0.01307888143f, 0.01306547876f,
0.01304767188f, 0.01302551571f, 0.01299907733f, 0.01296843402f,
0.01293367799f, 0.0128949089f, 0.01285223942f, 0.01280578785f,
0.01275568269f, 0.01270206179f, 0.01264506485f, 0.01258483995f,
0.01252153981f, 0.01245531905f, 0.01238633506f, 0.01231474802f,
0.01224071812f, 0.01216440555f, 0.01208597142f, 0.01200557221f,
0.01192336436f, 0.01183950249f, 0.0117541356f, 0.01166741177f,
0.0115794735f, 0.01149045862f, 0.01140050031f, 0.01130972803f,
0.01121826563f, 0.0111262314f, 0.01103373803f, 0.01094089262f,
0.01084779948f, 0.0107545536f, 0.01066124719f, 0.01056796685f,
0.01047479361f, 0.01038180385f, 0.0102890674f, 0.01019665226f,
0.01010461897f, 0.01001302525f, 0.009921924211f, 0.009831363335f,
0.009741389193f, 0.00965204183f, 0.009563359432f, 0.009475374594f,
0.009388119914f, 0.009407208301f, 0.009424740449f, 0.009440686554f,
0.009455023333f, 0.009467727505f, 0.00947877951f, 0.009488161653f,
0.009495858103f, 0.009501857683f, 0.009506150149f, 0.00950872805f,
0.00950958766f, 0.00950872805f, 0.009506150149f, 0.009501857683f,
0.009495858103f, 0.009488161653f, 0.00947877951f, 0.009467727505f,
0.009455023333f, 0.009440686554f, 0.009424740449f, 0.009407208301f,
0.009543305263f, 0.009632320143f, 0.009722034447f, 0.009812413715f,
0.0099034179f, 0.009995004162f, 0.01008712593f, 0.01017973199f,
0.01027276739f, 0.01036617346f, 0.010459885f, 0.01055383217f,
0.01064794231f, 0.01074213628f, 0.01083632838f, 0.01093043014f,
0.01102434658f, 0.01111797616f, 0.01121121366f, 0.01130394638f,
0.0113960579f, 0.0114874253f, 0.01157792099f, 0.01166741177f,
0.01175575983f, 0.01184282266f, 0.01192845311f, 0.01201249938f,
0.01209480781f, 0.01217522006f, 0.01225357689f, 0.01232971624f,
0.01240347326f, 0.01247468684f, 0.0125431912f, 0.01260882709f,
0.01267143246f, 0.01273085084f, 0.0127869295f, 0.01283952035f,
0.01288848184f, 0.01293367799f, 0.01297498215f, 0.01301227603f,
0.0130454516f, 0.01307440922f, 0.01309906319f, 0.01311933808f,
0.01313517336f, 0.01314651966f, 0.01315334067f, 0.01315561775f,
0.01315334067f, 0.01314651966f, 0.01313517336f, 0.01311933808f,
0.01309906319f, 0.01307440922f, 0.0130454516f, 0.01301227603f,
0.01297498215f, 0.01293367799f, 0.01288848184f, 0.01283952035f,
0.0127869295f, 0.01273085084f, 0.01267143246f, 0.01260882709f,
0.0125431912f, 0.01247468684f, 0.01240347326f, 0.01232971624f,
0.01225357689f, 0.01217522006f, 0.01209480781f, 0.01201249938f,
0.01192845311f, 0.01184282266f, 0.01175575983f, 0.01166741177f,
0.01157792099f, 0.0114874253f, 0.0113960579f, 0.01130394638f,
0.01121121366f, 0.01111797616f, 0.01102434658f, 0.01093043014f,
0.01083632838f, 0.01074213628f, 0.01064794231f, 0.01055383217f,
0.010459885f, 0.01036617346f, 0.01027276739f, 0.01017973199f,
0.01008712593f, 0.009995004162f, 0.0099034179f, 0.009812413715f,
0.009722034447f, 0.009632320143f, 0.009543305263f, 0.009455023333f,
0.009475374594f, 0.009494146332f, 0.009511308745f, 0.009526834823f,
0.009540699422f, 0.00955288019f, 0.009563359432f, 0.009572117589f,
0.009579141624f, 0.009584420361f, 0.009587943554f, 0.009589706548f,
0.009589706548f, 0.009587943554f, 0.009584420361f, 0.009579141624f,
0.009572117589f, 0.009563359432f, 0.00955288019f, 0.009540699422f,
0.009526834823f, 0.009511308745f, 0.009494146332f, 0.009475374594f,
0.009610942565f, 0.009700968862f, 0.009791694582f, 0.009883082472f,
0.009975093417f, 0.01006768085f, 0.01016079728f, 0.01025438774f,
0.01034839638f, 0.01044276077f, 0.01053741388f, 0.01063228305f,
0.01072729193f, 0.01082235854f, 0.01091739535f, 0.01101230737f,
0.01110699773f, 0.01120136213f, 0.01129528973f, 0.01138866507f,
0.01148136612f, 0.0115732681f, 0.0116642369f, 0.0117541356f,
0.01184282266f, 0.01193015091f, 0.01201596763f, 0.01210011914f,
0.0121824462f, 0.01226278674f, 0.01234097779f, 0.01241685264f,
0.01249024551f, 0.01256099064f, 0.01262892038f, 0.01269387174f,
0.01275568269f, 0.01281419583f, 0.01286925655f, 0.01292071585f,
0.01296843402f, 0.01301227603f, 0.01305211708f, 0.01308783889f,
0.01311933808f, 0.01314651966f, 0.01316929981f, 0.01318760961f,
0.01320139226f, 0.0132106049f, 0.01321521774f, 0.01321521774f,
0.0132106049f, 0.01320139226f, 0.01318760961f, 0.01316929981f,
0.01314651966f, 0.01311933808f, 0.01308783889f, 0.01305211708f,
0.01301227603f, 0.01296843402f, 0.01292071585f, 0.01286925655f,
0.01281419583f, 0.01275568269f, 0.01269387174f, 0.01262892038f,
0.01256099064f, 0.01249024551f, 0.01241685264f, 0.01234097779f,
0.01226278674f, 0.0121824462f, 0.01210011914f, 0.01201596763f,
0.01193015091f, 0.01184282266f, 0.0117541356f, 0.0116642369f,
0.0115732681f, 0.01148136612f, 0.01138866507f, 0.01129528973f,
0.01120136213f, 0.01110699773f, 0.01101230737f, 0.01091739535f,
0.01082235854f, 0.01072729193f, 0.01063228305f, 0.01053741388f,
0.01044276077f, 0.01034839638f, 0.01025438774f, 0.01016079728f,
0.01006768085f, 0.009975093417f, 0.009883082472f, 0.009791694582f,
0.009700968862f, 0.009610942565f, 0.009521651082f, 0.009543305263f,
0.009563359432f, 0.009581780061f, 0.00959853828f, 0.009613607079f,
0.009626962245f, 0.009638582356f, 0.009648446925f, 0.009656541049f,
0.00966285076f, 0.00966736488f, 0.009670076892f, 0.009670981206f,
0.009670076892f, 0.00966736488f, 0.00966285076f, 0.009656541049f,
0.009648446925f, 0.009638582356f, 0.009626962245f, 0.009613607079f,
0.00959853828f, 0.009581780061f, 0.009563359432f, 0.009543305263f,
0.009678225033f, 0.009769240394f, 0.009860955179f, 0.009953328408f,
0.01004632004f, 0.01013988163f, 0.01023396198f, 0.01032850612f,
0.01042345539f, 0.01051874273f, 0.01061430015f, 0.01071005128f,
0.0108059179f, 0.0109018134f, 0.01099764649f, 0.01109332126f,
0.01118873432f, 0.01128377859f, 0.01137833856f, 0.01147229597f,
0.01156552508f, 0.01165789459f, 0.01174926758f, 0.01183950249f,
0.01192845311f, 0.01201596763f, 0.01210189145f, 0.01218606345f,
0.01226832252f, 0.01234850287f, 0.01242643595f, 0.01250195317f,
0.01257488597f, 0.01264506485f, 0.01271232124f, 0.01277648844f,
0.01283740439f, 0.0128949089f, 0.01294884924f, 0.01299907733f,
0.0130454516f, 0.01308783889f, 0.01312611811f, 0.01316017378f,
0.01318990346f, 0.01321521774f, 0.01323603839f, 0.01325230021f,
0.01326395292f, 0.01327095926f, 0.01327329688f, 0.01327095926f,
0.01326395292f, 0.01325230021f, 0.01323603839f, 0.01321521774f,
0.01318990346f, 0.01316017378f, 0.01312611811f, 0.01308783889f,
0.0130454516f, 0.01299907733f, 0.01294884924f, 0.0128949089f,
0.01283740439f, 0.01277648844f, 0.01271232124f, 0.01264506485f,
0.01257488597f, 0.01250195317f, 0.01242643595f, 0.01234850287f,
0.01226832252f, 0.01218606345f, 0.01210189145f, 0.01201596763f,
0.01192845311f, 0.01183950249f, 0.01174926758f, 0.01165789459f,
0.01156552508f, 0.01147229597f, 0.01137833856f, 0.01128377859f,
0.01118873432f, 0.01109332126f, 0.01099764649f, 0.0109018134f,
0.0108059179f, 0.01071005128f, 0.01061430015f, 0.01051874273f,
0.01042345539f, 0.01032850612f, 0.01023396198f, 0.01013988163f,
0.01004632004f, 0.009953328408f, 0.009860955179f, 0.009769240394f,
0.009678225033f, 0.009587943554f, 0.009610942565f, 0.009632320143f,
0.00965204183f, 0.009670076892f, 0.009686394595f, 0.009700968862f,
0.009713775478f, 0.009724793024f, 0.009734002873f, 0.009741389193f,
0.009746940807f, 0.009750646539f, 0.009752500802f, 0.009752500802f,
0.009750646539f, 0.009746940807f, 0.009741389193f, 0.009734002873f,
0.009724793024f, 0.009713775478f, 0.009700968862f, 0.009686394595f,
0.009670076892f, 0.00965204183f, 0.009632320143f, 0.009610942565f,
0.009745089337f, 0.00983707048f, 0.009929747321f, 0.01002307981f,
0.01011702232f, 0.01021152735f, 0.01030653995f, 0.01040200423f,
0.01049785595f, 0.01059402898f, 0.01069044974f, 0.01078704093f,
0.01088371873f, 0.0109803956f, 0.01107697561f, 0.01117335912f,
0.01126943901f, 0.01136510447f, 0.01146023627f, 0.0115547115f,
0.01164839976f, 0.011741166f, 0.01183286961f, 0.01192336436f,
0.01201249938f, 0.01210011914f, 0.01218606345f, 0.01227016933f,
0.01235227007f, 0.01243219618f, 0.01250977721f, 0.01258483995f,
0.01265721396f, 0.01272672601f, 0.01279320568f, 0.01285648718f,
0.01291640475f, 0.0129727982f, 0.01302551571f, 0.01307440922f,
0.01311933808f, 0.01316017378f, 0.01319679338f, 0.013229087f,
0.01325695775f, 0.01328031812f, 0.01329909544f, 0.01331323106f,
0.01332267933f, 0.01332741138f, 0.01332741138f, 0.01332267933f,
0.01331323106f, 0.01329909544f, 0.01328031812f, 0.01325695775f,
0.013229087f, 0.01319679338f, 0.01316017378f, 0.01311933808f,
0.01307440922f, 0.01302551571f, 0.0129727982f, 0.01291640475f,
0.01285648718f, 0.01279320568f, 0.01272672601f, 0.01265721396f,
0.01258483995f, 0.01250977721f, 0.01243219618f, 0.01235227007f,
0.01227016933f, 0.01218606345f, 0.01210011914f, 0.01201249938f,
0.01192336436f, 0.01183286961f, 0.011741166f, 0.01164839976f,
0.0115547115f, 0.01146023627f, 0.01136510447f, 0.01126943901f,
0.01117335912f, 0.01107697561f, 0.0109803956f, 0.01088371873f,
0.01078704093f, 0.01069044974f, 0.01059402898f, 0.01049785595f,
0.01040200423f, 0.01030653995f, 0.01021152735f, 0.01011702232f,
0.01002307981f, 0.009929747321f, 0.00983707048f, 0.009745089337f,
0.009653841145f, 0.009678225033f, 0.009700968862f, 0.009722034447f,
0.009741389193f, 0.009759000503f, 0.009774839506f, 0.009788879193f,
0.009801096283f, 0.009811468422f, 0.009819980711f, 0.009826615453f,
0.009831363335f, 0.009834215976f, 0.009835166857f, 0.009834215976f,
0.009831363335f, 0.009826615453f, 0.009819980711f, 0.009811468422f,
0.009801096283f, 0.009788879193f, 0.009774839506f, 0.009759000503f,
0.009741389193f, 0.009722034447f, 0.009700968862f, 0.009678225033f,
0.009811468422f, 0.00990438927f, 0.009998000227f, 0.01009226125f,
0.0101871239f, 0.01028253883f, 0.01037844829f, 0.01047479361f,
0.0105715096f, 0.01066852547f, 0.01076576579f, 0.01086314954f,
0.01096059103f, 0.01105799619f, 0.01115526911f, 0.01125230361f,
0.01134899072f, 0.01144521404f, 0.01154085156f, 0.01163577568f,
0.0117298523f, 0.01182294171f, 0.01191489771f, 0.01200557221f,
0.01209480781f, 0.0121824462f, 0.01226832252f, 0.01235227007f,
0.01243411843f, 0.01251369435f, 0.0125908237f, 0.0126653323f,
0.01273704506f, 0.01280578785f, 0.01287138835f, 0.01293367799f,
0.01299249288f, 0.01304767188f, 0.01309906319f, 0.01314651966f,
0.01318990346f, 0.013229087f, 0.01326395292f, 0.0132943932f,
0.01332031563f, 0.01334163733f, 0.01335829217f, 0.01337022707f,
0.01337740291f, 0.01337979734f, 0.01337740291f, 0.01337022707f,
0.01335829217f, 0.01334163733f, 0.01332031563f, 0.0132943932f,
0.01326395292f, 0.013229087f, 0.01318990346f, 0.01314651966f,
0.01309906319f, 0.01304767188f, 0.01299249288f, 0.01293367799f,
0.01287138835f, 0.01280578785f, 0.01273704506f, 0.0126653323f,
0.0125908237f, 0.01251369435f, 0.01243411843f, 0.01235227007f,
0.01226832252f, 0.0121824462f, 0.01209480781f, 0.01200557221f,
0.01191489771f, 0.01182294171f, 0.0117298523f, 0.01163577568f,
0.01154085156f, 0.01144521404f, 0.01134899072f, 0.01125230361f,
0.01115526911f, 0.01105799619f, 0.01096059103f, 0.01086314954f,
0.01076576579f, 0.01066852547f, 0.0105715096f, 0.01047479361f,
0.01037844829f, 0.01028253883f, 0.0101871239f, 0.01009226125f,
0.009998000227f, 0.00990438927f, 0.009811468422f, 0.009719279595f,
0.009745089337f, 0.009769240394f, 0.009791694582f, 0.009812413715f,
0.009831363335f, 0.009848512709f, 0.009863832965f, 0.009877296165f,
0.009888879955f, 0.009898564778f, 0.00990633294f, 0.00991217047f,
0.009916068986f, 0.009918019176f, 0.009918019176f, 0.009916068986f,
0.00991217047f, 0.00990633294f, 0.009898564778f, 0.009888879955f,
0.009877296165f, 0.009863832965f, 0.009848512709f, 0.009831363335f,
0.009812413715f, 0.009791694582f, 0.009769240394f, 0.009745089337f,
0.009877296165f, 0.009971125983f, 0.01006564032f, 0.01016079728f,
0.01025654469f, 0.01035283227f, 0.01044960041f, 0.01054678671f,
0.01064432319f, 0.01074213628f, 0.01084014867f, 0.01093827467f,
0.0110364249f, 0.01113450434f, 0.01123241056f, 0.01133003552f,
0.01142726559f, 0.01152398065f, 0.01162005402f, 0.0117153544f,
0.01180974208f, 0.0119030755f, 0.01199520286f, 0.01208597142f,
0.01217522006f, 0.01226278674f, 0.01234850287f, 0.01243219618f,
0.01251369435f, 0.01259282045f, 0.01266939752f, 0.0127432486f,
0.01281419583f, 0.01288206317f, 0.01294667926f, 0.01300787181f,
0.01306547876f, 0.01311933808f, 0.01316929981f, 0.01321521774f,
0.01325695775f, 0.0132943932f, 0.01332741138f, 0.01335590892f,
0.01337979734f, 0.01339900028f, 0.0134134572f, 0.01342312153f,
0.01342796069f, 0.01342796069f, 0.01342312153f, 0.0134134572f,
0.01339900028f, 0.01337979734f, 0.01335590892f, 0.01332741138f,
0.0132943932f, 0.01325695775f, 0.01321521774f, 0.01316929981f,
0.01311933808f, 0.01306547876f, 0.01300787181f, 0.01294667926f,
0.01288206317f, 0.01281419583f, 0.0127432486f, 0.01266939752f,
0.01259282045f, 0.01251369435f, 0.01243219618f, 0.01234850287f,
0.01226278674f, 0.01217522006f, 0.01208597142f, 0.01199520286f,
0.0119030755f, 0.01180974208f, 0.0117153544f, 0.01162005402f,
0.01152398065f, 0.01142726559f, 0.01133003552f, 0.01123241056f,
0.01113450434f, 0.0110364249f, 0.01093827467f, 0.01084014867f,
0.01074213628f, 0.01064432319f, 0.01054678671f, 0.01044960041f,
0.01035283227f, 0.01025654469f, 0.01016079728f, 0.01006564032f,
0.009971125983f, 0.009877296165f, 0.009784192778f, 0.009811468422f,
0.00983707048f, 0.009860955179f, 0.009883082472f, 0.0099034179f,
0.009921924211f, 0.009938570671f, 0.009953328408f, 0.009966172278f,
0.009977078997f, 0.009986029007f, 0.009993007407f, 0.009998000227f,
0.01000100002f, 0.01000200026f, 0.01000100002f, 0.009998000227f,
0.009993007407f, 0.009986029007f, 0.009977078997f, 0.009966172278f,
0.009953328408f, 0.009938570671f, 0.009921924211f, 0.0099034179f,
0.009883082472f, 0.009860955179f, 0.00983707048f, 0.009811468422f,
0.009942499921f, 0.01003720704f, 0.01013259124f, 0.01022860687f,
0.01032520272f, 0.0104223229f, 0.01051990688f, 0.01061788946f,
0.01071619987f, 0.01081476174f, 0.01091349311f, 0.01101230737f,
0.01111111138f, 0.01120980456f, 0.01130828168f, 0.01140643191f,
0.01150413603f, 0.01160127111f, 0.01169770677f, 0.0117933061f,
0.01188792568f, 0.01198141929f, 0.01207363233f, 0.01216440555f,
0.01225357689f, 0.01234097779f, 0.01242643595f, 0.01250977721f,
0.0125908237f, 0.01266939752f, 0.01274531893f, 0.01281840634f,
0.01288848184f, 0.01295536757f, 0.01301889122f, 0.01307888143f,
0.01313517336f, 0.01318760961f, 0.01323603839f, 0.01328031812f,
0.01332031563f, 0.01335590892f, 0.01338698901f, 0.0134134572f,
0.01343523059f, 0.0134522384f, 0.01346442662f, 0.01347175613f,
0.01347420178f, 0.01347175613f, 0.01346442662f, 0.0134522384f,
0.01343523059f, 0.0134134572f, 0.01338698901f, 0.01335590892f,
0.01332031563f, 0.01328031812f, 0.01323603839f, 0.01318760961f,
0.01313517336f, 0.01307888143f, 0.01301889122f, 0.01295536757f,
0.01288848184f, 0.01281840634f, 0.01274531893f, 0.01266939752f,
0.0125908237f, 0.01250977721f, 0.01242643595f, 0.01234097779f,
0.01225357689f, 0.01216440555f, 0.01207363233f, 0.01198141929f,
0.01188792568f, 0.0117933061f, 0.01169770677f, 0.01160127111f,
0.01150413603f, 0.01140643191f, 0.01130828168f, 0.01120980456f,
0.01111111138f, 0.01101230737f, 0.01091349311f, 0.01081476174f,
0.01071619987f, 0.01061788946f, 0.01051990688f, 0.0104223229f,
0.01032520272f, 0.01022860687f, 0.01013259124f, 0.01003720704f,
0.009942499921f, 0.009848512709f, 0.009877296165f, 0.00990438927f,
0.009929747321f, 0.009953328408f, 0.009975093417f, 0.009995004162f,
0.01001302525f, 0.01002912689f, 0.01004327927f, 0.01005545817f,
0.01006564032f, 0.01007380895f, 0.01007994823f, 0.01008404791f,
0.01008609962f, 0.01008609962f, 0.01008404791f, 0.01007994823f,
0.01007380895f, 0.01006564032f, 0.01005545817f, 0.01004327927f,
0.01002912689f, 0.01001302525f, 0.009995004162f, 0.009975093417f,
0.009953328408f, 0.009929747321f, 0.00990438927f, 0.009877296165f,
0.01000700705f, 0.01010255609f, 0.01019877382f, 0.01029560994f,
0.01039301138f, 0.01049092133f, 0.01058927551f, 0.01068800688f,
0.01078704093f, 0.01088629849f, 0.01098569483f, 0.01108513959f,
0.01118453499f, 0.01128377859f, 0.01138276048f, 0.01148136612f,
0.0115794735f, 0.01167695317f, 0.01177367195f, 0.01186948828f,
0.01196425594f, 0.01205782313f, 0.01215003151f, 0.01224071812f,
0.01232971624f, 0.01241685264f, 0.01250195317f, 0.01258483995f,
0.0126653323f, 0.0127432486f, 0.01281840634f, 0.01289062295f,
0.0129597187f, 0.01302551571f, 0.01308783889f, 0.01314651966f,
0.01320139226f, 0.01325230021f, 0.01329909544f, 0.01334163733f,
0.01337979734f, 0.0134134572f, 0.01344251167f, 0.01346686855f,
0.01348644961f, 0.01350119151f, 0.01351104677f, 0.01351598185f,
0.01351598185f, 0.01351104677f, 0.01350119151f, 0.01348644961f,
0.01346686855f, 0.01344251167f, 0.0134134572f, 0.01337979734f,
0.01334163733f, 0.01329909544f, 0.01325230021f, 0.01320139226f,
0.01314651966f, 0.01308783889f, 0.01302551571f, 0.0129597187f,
0.01289062295f, 0.01281840634f, 0.0127432486f, 0.0126653323f,
0.01258483995f, 0.01250195317f, 0.01241685264f, 0.01232971624f,
0.01224071812f, 0.01215003151f, 0.01205782313f, 0.01196425594f,
0.01186948828f, 0.01177367195f, 0.01167695317f, 0.0115794735f,
0.01148136612f, 0.01138276048f, 0.01128377859f, 0.01118453499f,
0.01108513959f, 0.01098569483f, 0.01088629849f, 0.01078704093f,
0.01068800688f, 0.01058927551f, 0.01049092133f, 0.01039301138f,
0.01029560994f, 0.01019877382f, 0.01010255609f, 0.01000700705f,
0.00991217047f, 0.009942499921f, 0.009971125983f, 0.009998000227f,
0.01002307981f, 0.01004632004f, 0.01006768085f, 0.01008712593f,
0.01010461897f, 0.01012013014f, 0.01013363153f, 0.01014509797f,
0.01015450899f, 0.01016184594f, 0.01016709674f, 0.01017025113f,
0.01017130353f, 0.01017025113f, 0.01016709674f, 0.01016184594f,
0.01015450899f, 0.01014509797f, 0.01013363153f, 0.01012013014f,
0.01010461897f, 0.01008712593f, 0.01006768085f, 0.01004632004f,
0.01002307981f, 0.009998000227f, 0.009971125983f, 0.009942499921f,
0.01007074397f, 0.01016709674f, 0.01026410609f, 0.01036172081f,
0.010459885f, 0.01055853814f, 0.0106576141f, 0.0107570421f,
0.01085674576f, 0.01095664315f, 0.01105664484f, 0.01115665678f,
0.01125658024f, 0.01135630626f, 0.01145572308f, 0.0115547115f,
0.01165314391f, 0.01175088901f, 0.01184780896f, 0.01194375753f,
0.01203858573f, 0.01213213522f, 0.01222424489f, 0.01231474802f,
0.01240347326f, 0.01249024551f, 0.01257488597f, 0.01265721396f,
0.01273704506f, 0.01281419583f, 0.01288848184f, 0.0129597187f,
0.01302772667f, 0.01309232507f, 0.01315334067f, 0.0132106049f,
0.01326395292f, 0.01331323106f, 0.01335829217f, 0.01339900028f,
0.01343523059f, 0.01346686855f, 0.01349381451f, 0.01351598185f,
0.01353329886f, 0.01354570966f, 0.01355317142f, 0.01355566178f,
0.01355317142f, 0.01354570966f, 0.01353329886f, 0.01351598185f,
0.01349381451f, 0.01346686855f, 0.01343523059f, 0.01339900028f,
0.01335829217f, 0.01331323106f, 0.01326395292f, 0.0132106049f,
0.01315334067f, 0.01309232507f, 0.01302772667f, 0.0129597187f,
0.01288848184f, 0.01281419583f, 0.01273704506f, 0.01265721396f,
0.01257488597f, 0.01249024551f, 0.01240347326f, 0.01231474802f,
0.01222424489f, 0.01213213522f, 0.01203858573f, 0.01194375753f,
0.01184780896f, 0.01175088901f, 0.01165314391f, 0.0115547115f,
0.01145572308f, 0.01135630626f, 0.01125658024f, 0.01115665678f,
0.01105664484f, 0.01095664315f, 0.01085674576f, 0.0107570421f,
0.0106576141f, 0.01055853814f, 0.010459885f, 0.01036172081f,
0.01026410609f, 0.01016709674f, 0.01007074397f, 0.009975093417f,
0.01000700705f, 0.01003720704f, 0.01006564032f, 0.01009226125f,
0.01011702232f, 0.01013988163f, 0.01016079728f, 0.01017973199f,
0.01019665226f, 0.01021152735f, 0.01022432931f, 0.01023503393f,
0.01024362259f, 0.01025007758f, 0.01025438774f, 0.01025654469f,
0.01025654469f, 0.01025438774f, 0.01025007758f, 0.01024362259f,
0.01023503393f, 0.01022432931f, 0.01021152735f, 0.01019665226f,
0.01017973199f, 0.01016079728f, 0.01013988163f, 0.01011702232f,
0.01009226125f, 0.01006564032f, 0.01003720704f, 0.01000700705f,
0.01013363153f, 0.01023074798f, 0.01032850612f, 0.01042685378f,
0.0105257323f, 0.01062507927f, 0.01072482392f, 0.01082489453f,
0.01092521101f, 0.01102568675f, 0.0111262314f, 0.01122674625f,
0.01132712793f, 0.01142726559f, 0.01152704284f, 0.01162633486f,
0.01172501314f, 0.01182294171f, 0.01191997528f, 0.01201596763f,
0.01211076323f, 0.01220420003f, 0.01229611505f, 0.01238633506f,
0.01247468684f, 0.01256099064f, 0.01264506485f, 0.01272672601f,
0.01280578785f, 0.01288206317f, 0.01295536757f, 0.01302551571f,
0.01309232507f, 0.01315561775f, 0.01321521774f, 0.01327095926f,
0.01332267933f, 0.01337022707f, 0.0134134572f, 0.0134522384f,
0.01348644961f, 0.01351598185f, 0.01354074106f, 0.01356064621f,
0.01357563306f, 0.01358565222f, 0.01359067019f, 0.01359067019f,
0.01358565222f, 0.01357563306f, 0.01356064621f, 0.01354074106f,
0.01351598185f, 0.01348644961f, 0.0134522384f, 0.0134134572f,
0.01337022707f, 0.01332267933f, 0.01327095926f, 0.01321521774f,
0.01315561775f, 0.01309232507f, 0.01302551571f, 0.01295536757f,
0.01288206317f, 0.01280578785f, 0.01272672601f, 0.01264506485f,
0.01256099064f, 0.01247468684f, 0.01238633506f, 0.01229611505f,
0.01220420003f, 0.01211076323f, 0.01201596763f, 0.01191997528f,
0.01182294171f, 0.01172501314f, 0.01162633486f, 0.01152704284f,
0.01142726559f, 0.01132712793f, 0.01122674625f, 0.0111262314f,
0.01102568675f, 0.01092521101f, 0.01082489453f, 0.01072482392f,
0.01062507927f, 0.0105257323f, 0.01042685378f, 0.01032850612f,
0.01023074798f, 0.01013363153f, 0.01003720704f, 0.01007074397f,
0.01010255609f, 0.01013259124f, 0.01016079728f, 0.0101871239f,
0.01021152735f, 0.01023396198f, 0.01025438774f, 0.01027276739f,
0.0102890674f, 0.01030325703f, 0.01031531021f, 0.01032520272f,
0.01033291686f, 0.01033843681f, 0.01034175418f, 0.01034285966f,
0.01034175418f, 0.01033843681f, 0.01033291686f, 0.01032520272f,
0.01031531021f, 0.01030325703f, 0.0102890674f, 0.01027276739f,
0.01025438774f, 0.01023396198f, 0.01021152735f, 0.0101871239f,
0.01016079728f, 0.01013259124f, 0.01010255609f, 0.01007074397f,
0.01019559242f, 0.01029342785f, 0.01039188914f, 0.01049092133f,
0.01059046388f, 0.01069044974f, 0.01079080813f, 0.01089146268f,
0.01099232957f, 0.01109332126f, 0.01119434182f, 0.01129528973f,
0.0113960579f, 0.01149653178f, 0.01159659028f, 0.01169610675f,
0.01179494616f, 0.01189296879f, 0.01199002843f, 0.01208597142f,
0.0121806385f, 0.01227386575f, 0.01236548461f, 0.01245531905f,
0.0125431912f, 0.01262892038f, 0.01271232124f, 0.01279320568f,
0.01287138835f, 0.01294667926f, 0.01301889122f, 0.01308783889f,
0.01315334067f, 0.01321521774f, 0.01327329688f, 0.01332741138f,
0.01337740291f, 0.01342312153f, 0.01346442662f, 0.01350119151f,
0.01353329886f, 0.01356064621f, 0.01358314604f, 0.01360072289f,
0.01361331902f, 0.0136208944f, 0.01362342201f, 0.0136208944f,
0.01361331902f, 0.01360072289f, 0.01358314604f, 0.01356064621f,
0.01353329886f, 0.01350119151f, 0.01346442662f, 0.01342312153f,
0.01337740291f, 0.01332741138f, 0.01327329688f, 0.01321521774f,
0.01315334067f, 0.01308783889f, 0.01301889122f, 0.01294667926f,
0.01287138835f, 0.01279320568f, 0.01271232124f, 0.01262892038f,
0.0125431912f, 0.01245531905f, 0.01236548461f, 0.01227386575f,
0.0121806385f, 0.01208597142f, 0.01199002843f, 0.01189296879f,
0.01179494616f, 0.01169610675f, 0.01159659028f, 0.01149653178f,
0.0113960579f, 0.01129528973f, 0.01119434182f, 0.01109332126f,
0.01099232957f, 0.01089146268f, 0.01079080813f, 0.01069044974f,
0.01059046388f, 0.01049092133f, 0.01039188914f, 0.01029342785f,
0.01019559242f, 0.01009843498f, 0.01013363153f, 0.01016709674f,
0.01019877382f, 0.01022860687f, 0.01025654469f, 0.01028253883f,
0.01030653995f, 0.01032850612f, 0.01034839638f, 0.01036617346f,
0.01038180385f, 0.01039525773f, 0.01040650904f, 0.01041553635f,
0.0104223229f, 0.01042685378f, 0.01042912249f, 0.01042912249f,
0.01042685378f, 0.0104223229f, 0.01041553635f, 0.01040650904f,
0.01039525773f, 0.01038180385f, 0.01036617346f, 0.01034839638f,
0.01032850612f, 0.01030653995f, 0.01028253883f, 0.01025654469f,
0.01022860687f, 0.01019877382f, 0.01016709674f, 0.01013363153f,
0.01025654469f, 0.01035505254f, 0.01045416761f, 0.01055383217f,
0.0106539838f, 0.0107545536f, 0.01085546613f, 0.01095664315f,
0.01105799619f, 0.01115943585f, 0.01126086153f, 0.01136216894f,
0.01146324724f, 0.01156397816f, 0.0116642369f, 0.01176389214f,
0.01186280511f, 0.0119608324f, 0.01205782313f, 0.0121536199f,
0.0122480616f, 0.01234097779f, 0.01243219618f, 0.01252153981f,
0.01260882709f, 0.01269387174f, 0.01277648844f, 0.01285648718f,
0.01293367799f, 0.01300787181f, 0.01307888143f, 0.01314651966f,
0.0132106049f, 0.01327095926f, 0.01332741138f, 0.01337979734f,
0.01342796069f, 0.01347175613f, 0.01351104677f, 0.01354570966f,
0.01357563306f, 0.01360072289f, 0.0136208944f, 0.01363608148f,
0.01364623569f, 0.01365132071f, 0.01365132071f, 0.01364623569f,
0.01363608148f, 0.0136208944f, 0.01360072289f, 0.01357563306f,
0.01354570966f, 0.01351104677f, 0.01347175613f, 0.01342796069f,
0.01337979734f, 0.01332741138f, 0.01327095926f, 0.0132106049f,
0.01314651966f, 0.01307888143f, 0.01300787181f, 0.01293367799f,
0.01285648718f, 0.01277648844f, 0.01269387174f, 0.01260882709f,
0.01252153981f, 0.01243219618f, 0.01234097779f, 0.0122480616f,
0.0121536199f, 0.01205782313f, 0.0119608324f, 0.01186280511f,
0.01176389214f, 0.0116642369f, 0.01156397816f, 0.01146324724f,
0.01136216894f, 0.01126086153f, 0.01115943585f, 0.01105799619f,
0.01095664315f, 0.01085546613f, 0.0107545536f, 0.0106539838f,
0.01055383217f, 0.01045416761f, 0.01035505254f, 0.01025654469f,
0.01015869901f, 0.01019559242f, 0.01023074798f, 0.01026410609f,
0.01029560994f, 0.01032520272f, 0.01035283227f, 0.01037844829f,
0.01040200423f, 0.01042345539f, 0.01044276077f, 0.010459885f,
0.01047479361f, 0.0104874596f, 0.01049785595f, 0.01050596405f,
0.01051176619f, 0.01051525306f, 0.01051641535f, 0.01051525306f,
0.01051176619f, 0.01050596405f, 0.01049785595f, 0.0104874596f,
0.01047479361f, 0.010459885f, 0.01044276077f, 0.01042345539f,
0.01040200423f, 0.01037844829f, 0.01035283227f, 0.01032520272f,
0.01029560994f, 0.01026410609f, 0.01023074798f, 0.01019559242f,
0.01031640731f, 0.01041553635f, 0.01051525306f, 0.01061549596f,
0.01071619987f, 0.01081729215f, 0.01091869641f, 0.01102032885f,
0.01112210099f, 0.0112239169f, 0.01132567506f, 0.01142726559f,
0.01152857486f, 0.01162947994f, 0.0117298523f, 0.0118295569f,
0.01192845311f, 0.01202639099f, 0.01212321594f, 0.01221876871f,
0.01231288072f, 0.01240538247f, 0.01249609515f, 0.01258483995f,
0.01267143246f, 0.01275568269f, 0.01283740439f, 0.01291640475f,
0.01299249288f, 0.01306547876f, 0.01313517336f, 0.01320139226f,
0.01326395292f, 0.01332267933f, 0.01337740291f, 0.01342796069f,
0.01347420178f, 0.01351598185f, 0.01355317142f, 0.01358565222f,
0.01361331902f, 0.01363608148f, 0.01365386508f, 0.01366661023f,
0.01367427502f, 0.01367683243f, 0.01367427502f, 0.01366661023f,
0.01365386508f, 0.01363608148f, 0.01361331902f, 0.01358565222f,
0.01355317142f, 0.01351598185f, 0.01347420178f, 0.01342796069f,
0.01337740291f, 0.01332267933f, 0.01326395292f, 0.01320139226f,
0.01313517336f, 0.01306547876f, 0.01299249288f, 0.01291640475f,
0.01283740439f, 0.01275568269f, 0.01267143246f, 0.01258483995f,
0.01249609515f, 0.01240538247f, 0.01231288072f, 0.01221876871f,
0.01212321594f, 0.01202639099f, 0.01192845311f, 0.0118295569f,
0.0117298523f, 0.01162947994f, 0.01152857486f, 0.01142726559f,
0.01132567506f, 0.0112239169f, 0.01112210099f, 0.01102032885f,
0.01091869641f, 0.01081729215f, 0.01071619987f, 0.01061549596f,
0.01051525306f, 0.01041553635f, 0.01031640731f, 0.01021792181f,
0.01025654469f, 0.01029342785f, 0.01032850612f, 0.01036172081f,
0.01039301138f, 0.0104223229f, 0.01044960041f, 0.01047479361f,
0.01049785595f, 0.01051874273f, 0.01053741388f, 0.01055383217f,
0.01056796685f, 0.01057978906f, 0.01058927551f, 0.01059640758f,
0.01060117036f, 0.01060355362f, 0.01060355362f, 0.01060117036f,
0.01059640758f, 0.01058927551f, 0.01057978906f, 0.01056796685f,
0.01055383217f, 0.01053741388f, 0.01051874273f, 0.01049785595f,
0.01047479361f, 0.01044960041f, 0.0104223229f, 0.01039301138f,
0.01036172081f, 0.01032850612f, 0.01029342785f, 0.01025654469f,
0.01037509646f, 0.01047479361f, 0.01057505608f, 0.01067581866f,
0.01077701338f, 0.01087856572f, 0.0109803956f, 0.0110824164f,
0.01118453499f, 0.01128665265f, 0.01138866507f, 0.01149045862f,
0.01159191411f, 0.01169290766f, 0.0117933061f, 0.01189296879f,
0.01199175231f, 0.01208950393f, 0.01218606345f, 0.01228126884f,
0.01237494871f, 0.01246692892f, 0.01255702879f, 0.01264506485f,
0.01273085084f, 0.01281419583f, 0.0128949089f, 0.0129727982f,
0.01304767188f, 0.01311933808f, 0.01318760961f, 0.01325230021f,
0.01331323106f, 0.01337022707f, 0.01342312153f, 0.01347175613f,
0.01351598185f, 0.01355566178f, 0.01359067019f, 0.0136208944f,
0.01364623569f, 0.01366661023f, 0.01368195191f, 0.01369220857f,
0.01369734481f, 0.01369734481f, 0.01369220857f, 0.01368195191f,
0.01366661023f, 0.01364623569f, 0.0136208944f, 0.01359067019f,
0.01355566178f, 0.01351598185f, 0.01347175613f, 0.01342312153f,
0.01337022707f, 0.01331323106f, 0.01325230021f, 0.01318760961f,
0.01311933808f, 0.01304767188f, 0.0129727982f, 0.0128949089f,
0.01281419583f, 0.01273085084f, 0.01264506485f, 0.01255702879f,
0.01246692892f, 0.01237494871f, 0.01228126884f, 0.01218606345f,
0.01208950393f, 0.01199175231f, 0.01189296879f, 0.0117933061f,
0.01169290766f, 0.01159191411f, 0.01149045862f, 0.01138866507f,
0.01128665265f, 0.01118453499f, 0.0110824164f, 0.0109803956f,
0.01087856572f, 0.01077701338f, 0.01067581866f, 0.01057505608f,
0.01047479361f, 0.01037509646f, 0.01027602144f, 0.01031640731f,
0.01035505254f, 0.01039188914f, 0.01042685378f, 0.010459885f,
0.01049092133f, 0.01051990688f, 0.01054678671f, 0.0105715096f,
0.01059402898f, 0.01061430015f, 0.01063228305f, 0.01064794231f,
0.01066124719f, 0.01067216974f, 0.01068068855f, 0.01068678591f,
0.01069044974f, 0.01069167163f, 0.01069044974f, 0.01068678591f,
0.01068068855f, 0.01067216974f, 0.01066124719f, 0.01064794231f,
0.01063228305f, 0.01061430015f, 0.01059402898f, 0.0105715096f,
0.01054678671f, 0.01051990688f, 0.01049092133f, 0.010459885f,
0.01042685378f, 0.01039188914f, 0.01035505254f, 0.01031640731f,
0.01043252647f, 0.01053273678f, 0.01063348539f, 0.01073470619f,
0.01083632838f, 0.01093827467f, 0.01104046032f, 0.01114279591f,
0.01124518644f, 0.01134752948f, 0.01144971419f, 0.01155162696f,
0.01165314391f, 0.0117541356f, 0.01185446698f, 0.0119539937f,
0.01205256768f, 0.01215003151f, 0.0122462241f, 0.01234097779f,
0.01243411843f, 0.01252546813f, 0.01261484437f, 0.01270206179f,
0.0127869295f, 0.01286925655f, 0.01294884924f, 0.01302551571f,
0.01309906319f, 0.01316929981f, 0.01323603839f, 0.01329909544f,
0.01335829217f, 0.0134134572f, 0.01346442662f, 0.01351104677f,
0.01355317142f, 0.01359067019f, 0.01362342201f, 0.01365132071f,
0.01367427502f, 0.01369220857f, 0.01370506082f, 0.01371278986f,
0.01371536963f, 0.01371278986f, 0.01370506082f, 0.01369220857f,
0.01367427502f, 0.01365132071f, 0.01362342201f, 0.01359067019f,
0.01355317142f, 0.01351104677f, 0.01346442662f, 0.0134134572f,
0.01335829217f, 0.01329909544f, 0.01323603839f, 0.01316929981f,
0.01309906319f, 0.01302551571f, 0.01294884924f, 0.01286925655f,
0.0127869295f, 0.01270206179f, 0.01261484437f, 0.01252546813f,
0.01243411843f, 0.01234097779f, 0.0122462241f, 0.01215003151f,
0.01205256768f, 0.0119539937f, 0.01185446698f, 0.0117541356f,
0.01165314391f, 0.01155162696f, 0.01144971419f, 0.01134752948f,
0.01124518644f, 0.01114279591f, 0.01104046032f, 0.01093827467f,
0.01083632838f, 0.01073470619f, 0.01063348539f, 0.01053273678f,
0.01043252647f, 0.01033291686f, 0.01037509646f, 0.01041553635f,
0.01045416761f, 0.01049092133f, 0.0105257323f, 0.01055853814f,
0.01058927551f, 0.01061788946f, 0.01064432319f, 0.01066852547f,
0.01069044974f, 0.01071005128f, 0.01072729193f, 0.01074213628f,
0.0107545536f, 0.01076451875f, 0.01077201031f, 0.01077701338f,
0.0107795177f, 0.0107795177f, 0.01077701338f, 0.01077201031f,
0.01076451875f, 0.0107545536f, 0.01074213628f, 0.01072729193f,
0.01071005128f, 0.01069044974f, 0.01066852547f, 0.01064432319f,
0.01061788946f, 0.01058927551f, 0.01055853814f, 0.0105257323f,
0.01049092133f, 0.01045416761f, 0.01041553635f, 0.01037509646f,
0.01048861258f, 0.01058927551f, 0.01069044974f, 0.01079206541f,
0.01089404803f, 0.01099631656f, 0.01109878626f, 0.01120136213f,
0.01130394638f, 0.01140643191f, 0.01150870696f, 0.01161065139f,
0.01171213947f, 0.01181303803f, 0.01191320643f, 0.01201249938f,
0.01211076323f, 0.01220783778f, 0.01230355818f, 0.01239775307f,
0.01249024551f, 0.01258085575f, 0.01266939752f, 0.01275568269f,
0.01283952035f, 0.01292071585f, 0.01299907733f, 0.01307440922f,
0.01314651966f, 0.01321521774f, 0.01328031812f, 0.01334163733f,
0.01339900028f, 0.0134522384f, 0.01350119151f, 0.01354570966f,
0.01358565222f, 0.0136208944f, 0.01365132071f, 0.01367683243f,
0.01369734481f, 0.01371278986f, 0.01372311637f, 0.013728288f,
0.013728288f, 0.01372311637f, 0.01371278986f, 0.01369734481f,
0.01367683243f, 0.01365132071f, 0.0136208944f, 0.01358565222f,
0.01354570966f, 0.01350119151f, 0.0134522384f, 0.01339900028f,
0.01334163733f, 0.01328031812f, 0.01321521774f, 0.01314651966f,
0.01307440922f, 0.01299907733f, 0.01292071585f, 0.01283952035f,
0.01275568269f, 0.01266939752f, 0.01258085575f, 0.01249024551f,
0.01239775307f, 0.01230355818f, 0.01220783778f, 0.01211076323f,
0.01201249938f, 0.01191320643f, 0.01181303803f, 0.01171213947f,
0.01161065139f, 0.01150870696f, 0.01140643191f, 0.01130394638f,
0.01120136213f, 0.01109878626f, 0.01099631656f, 0.01089404803f,
0.01079206541f, 0.01069044974f, 0.01058927551f, 0.01048861258f,
0.01038852427f, 0.01043252647f, 0.01047479361f, 0.01051525306f,
0.01055383217f, 0.01059046388f, 0.01062507927f, 0.0106576141f,
0.01068800688f, 0.01071619987f, 0.01074213628f, 0.01076576579f,
0.01078704093f, 0.0108059179f, 0.01082235854f, 0.01083632838f,
0.01084779948f, 0.01085674576f, 0.01086314954f, 0.01086699776f,
0.01086828113f, 0.01086699776f, 0.01086314954f, 0.01085674576f,
0.01084779948f, 0.01083632838f, 0.01082235854f, 0.0108059179f,
0.01078704093f, 0.01076576579f, 0.01074213628f, 0.01071619987f,
0.01068800688f, 0.0106576141f, 0.01062507927f, 0.01059046388f,
0.01055383217f, 0.01051525306f, 0.01047479361f, 0.01043252647f,
0.01054326911f, 0.01064432319f, 0.01074585691f, 0.01084779948f,
0.01095007174f, 0.01105259173f, 0.01115526911f, 0.01125800703f,
0.01136070304f, 0.01146324724f, 0.01156552508f, 0.01166741177f,
0.01176877879f, 0.01186948828f, 0.01196939778f, 0.01206835546f,
0.01216620672f, 0.01226278674f, 0.01235792786f, 0.01245145593f,
0.0125431912f, 0.01263295114f, 0.01272054669f, 0.01280578785f,
0.01288848184f, 0.01296843402f, 0.0130454516f, 0.01311933808f,
0.01318990346f, 0.01325695775f, 0.01332031563f, 0.01337979734f,
0.01343523059f, 0.01348644961f, 0.01353329886f, 0.01357563306f,
0.01361331902f, 0.01364623569f, 0.01367427502f, 0.01369734481f,
0.01371536963f, 0.013728288f, 0.01373605616f, 0.01373864897f,
0.01373605616f, 0.013728288f, 0.01371536963f, 0.01369734481f,
0.01367427502f, 0.01364623569f, 0.01361331902f, 0.01357563306f,
0.01353329886f, 0.01348644961f, 0.01343523059f, 0.01337979734f,
0.01332031563f, 0.01325695775f, 0.01318990346f, 0.01311933808f,
0.0130454516f, 0.01296843402f, 0.01288848184f, 0.01280578785f,
0.01272054669f, 0.01263295114f, 0.0125431912f, 0.01245145593f,
0.01235792786f, 0.01226278674f, 0.01216620672f, 0.01206835546f,
0.01196939778f, 0.01186948828f, 0.01176877879f, 0.01166741177f,
0.01156552508f, 0.01146324724f, 0.01136070304f, 0.01125800703f,
0.01115526911f, 0.01105259173f, 0.01095007174f, 0.01084779948f,
0.01074585691f, 0.01064432319f, 0.01054326911f, 0.01044276077f,
0.01048861258f, 0.01053273678f, 0.01057505608f, 0.01061549596f,
0.0106539838f, 0.01069044974f, 0.01072482392f, 0.0107570421f,
0.01078704093f, 0.01081476174f, 0.01084014867f, 0.01086314954f,
0.01088371873f, 0.0109018134f, 0.01091739535f, 0.01093043014f,
0.01094089262f, 0.0109487595f, 0.01095401309f, 0.01095664315f,
0.01095664315f, 0.01095401309f, 0.0109487595f, 0.01094089262f,
0.01093043014f, 0.01091739535f, 0.0109018134f, 0.01088371873f,
0.01086314954f, 0.01084014867f, 0.01081476174f, 0.01078704093f,
0.0107570421f, 0.01072482392f, 0.01069044974f, 0.0106539838f,
0.01061549596f, 0.01057505608f, 0.01053273678f, 0.01048861258f,
0.01059640758f, 0.01069778763f, 0.01079961471f, 0.0109018134f,
0.01100430358f, 0.01110699773f, 0.01120980456f, 0.01131262258f,
0.01141534653f, 0.01151786372f, 0.01162005402f, 0.01172179077f,
0.01182294171f, 0.01192336436f, 0.01202291343f, 0.01212143432f,
0.01221876871f, 0.01231474802f, 0.01240920182f, 0.01250195317f,
0.01259282045f, 0.0126816174f, 0.01276815403f, 0.01285223942f,
0.01293367799f, 0.01301227603f, 0.01308783889f, 0.01316017378f,
0.013229087f, 0.0132943932f, 0.01335590892f, 0.0134134572f,
0.01346686855f, 0.01351598185f, 0.01356064621f, 0.01360072289f,
0.01363608148f, 0.01366661023f, 0.01369220857f, 0.01371278986f,
0.013728288f, 0.01373864897f, 0.0137438383f, 0.0137438383f,
0.01373864897f, 0.013728288f, 0.01371278986f, 0.01369220857f,
0.01366661023f, 0.01363608148f, 0.01360072289f, 0.01356064621f,
0.01351598185f, 0.01346686855f, 0.0134134572f, 0.01335590892f,
0.0132943932f, 0.013229087f, 0.01316017378f, 0.01308783889f,
0.01301227603f, 0.01293367799f, 0.01285223942f, 0.01276815403f,
0.0126816174f, 0.01259282045f, 0.01250195317f, 0.01240920182f,
0.01231474802f, 0.01221876871f, 0.01212143432f, 0.01202291343f,
0.01192336436f, 0.01182294171f, 0.01172179077f, 0.01162005402f,
0.01151786372f, 0.01141534653f, 0.01131262258f, 0.01120980456f,
0.01110699773f, 0.01100430358f, 0.0109018134f, 0.01079961471f,
0.01069778763f, 0.01059640758f, 0.01049554255f, 0.01054326911f,
0.01058927551f, 0.01063348539f, 0.01067581866f, 0.01071619987f,
0.0107545536f, 0.01079080813f, 0.01082489453f, 0.01085674576f,
0.01088629849f, 0.01091349311f, 0.01093827467f, 0.01096059103f,
0.0109803956f, 0.01099764649f, 0.01101230737f, 0.01102434658f,
0.01103373803f, 0.01104046032f, 0.01104449946f, 0.01104584709f,
0.01104449946f, 0.01104046032f, 0.01103373803f, 0.01102434658f,
0.01101230737f, 0.01099764649f, 0.0109803956f, 0.01096059103f,
0.01093827467f, 0.01091349311f, 0.01088629849f, 0.01085674576f,
0.01082489453f, 0.01079080813f, 0.0107545536f, 0.01071619987f,
0.01067581866f, 0.01063348539f, 0.01058927551f, 0.01054326911f,
0.01064794231f, 0.01074958127f, 0.01085163094f, 0.01095401309f,
0.01105664484f, 0.01115943585f, 0.01126228925f, 0.01136510447f,
0.01146776881f, 0.01157016866f, 0.01167218015f, 0.01177367195f,
0.01187450811f, 0.0119745452f, 0.01207363233f, 0.01217161212f,
0.01226832252f, 0.01236359403f, 0.01245725155f, 0.01254911628f,
0.01263900381f, 0.01272672601f, 0.01281209197f, 0.0128949089f,
0.01297498215f, 0.01305211708f, 0.01312611811f, 0.01319679338f,
0.01326395292f, 0.01332741138f, 0.01338698901f, 0.01344251167f,
0.01349381451f, 0.01354074106f, 0.01358314604f, 0.0136208944f,
0.01365386508f, 0.01368195191f, 0.01370506082f, 0.01372311637f,
0.01373605616f, 0.0137438383f, 0.01374643482f, 0.0137438383f,
0.01373605616f, 0.01372311637f, 0.01370506082f, 0.01368195191f,
0.01365386508f, 0.0136208944f, 0.01358314604f, 0.01354074106f,
0.01349381451f, 0.01344251167f, 0.01338698901f, 0.01332741138f,
0.01326395292f, 0.01319679338f, 0.01312611811f, 0.01305211708f,
0.01297498215f, 0.0128949089f, 0.01281209197f, 0.01272672601f,
0.01263900381f, 0.01254911628f, 0.01245725155f, 0.01236359403f,
0.01226832252f, 0.01217161212f, 0.01207363233f, 0.0119745452f,
0.01187450811f, 0.01177367195f, 0.01167218015f, 0.01157016866f,
0.01146776881f, 0.01136510447f, 0.01126228925f, 0.01115943585f,
0.01105664484f, 0.01095401309f, 0.01085163094f, 0.01074958127f,
0.01064794231f, 0.01054678671f, 0.01059640758f, 0.01064432319f,
0.01069044974f, 0.01073470619f, 0.01077701338f, 0.01081729215f,
0.01085546613f, 0.01089146268f, 0.01092521101f, 0.01095664315f,
0.01098569483f, 0.01101230737f, 0.0110364249f, 0.01105799619f,
0.01107697561f, 0.01109332126f, 0.01110699773f, 0.01111797616f,
0.0111262314f, 0.01113174483f, 0.01113450434f, 0.01113450434f,
0.01113174483f, 0.0111262314f, 0.01111797616f, 0.01110699773f,
0.01109332126f, 0.01107697561f, 0.01105799619f, 0.0110364249f,
0.01101230737f, 0.01098569483f, 0.01095664315f, 0.01092521101f,
0.01089146268f, 0.01085546613f, 0.01081729215f, 0.01077701338f,
0.01073470619f, 0.01069044974f, 0.01064432319f, 0.01059640758f,
0.01069778763f, 0.01079961471f, 0.0109018134f, 0.01100430358f,
0.01110699773f, 0.01120980456f, 0.01131262258f, 0.01141534653f,
0.01151786372f, 0.01162005402f, 0.01172179077f, 0.01182294171f,
0.01192336436f, 0.01202291343f, 0.01212143432f, 0.01221876871f,
0.01231474802f, 0.01240920182f, 0.01250195317f, 0.01259282045f,
0.0126816174f, 0.01276815403f, 0.01285223942f, 0.01293367799f,
0.01301227603f, 0.01308783889f, 0.01316017378f, 0.013229087f,
0.0132943932f, 0.01335590892f, 0.0134134572f, 0.01346686855f,
0.01351598185f, 0.01356064621f, 0.01360072289f, 0.01363608148f,
0.01366661023f, 0.01369220857f, 0.01371278986f, 0.013728288f,
0.01373864897f, 0.0137438383f, 0.0137438383f, 0.01373864897f,
0.013728288f, 0.01371278986f, 0.01369220857f, 0.01366661023f,
0.01363608148f, 0.01360072289f, 0.01356064621f, 0.01351598185f,
0.01346686855f, 0.0134134572f, 0.01335590892f, 0.0132943932f,
0.013229087f, 0.01316017378f, 0.01308783889f, 0.01301227603f,
0.01293367799f, 0.01285223942f, 0.01276815403f, 0.0126816174f,
0.01259282045f, 0.01250195317f, 0.01240920182f, 0.01231474802f,
0.01221876871f, 0.01212143432f, 0.01202291343f, 0.01192336436f,
0.01182294171f, 0.01172179077f, 0.01162005402f, 0.01151786372f,
0.01141534653f, 0.01131262258f, 0.01120980456f, 0.01110699773f,
0.01100430358f, 0.0109018134f, 0.01079961471f, 0.01069778763f,
0.01059640758f, 0.01064794231f, 0.01069778763f, 0.01074585691f,
0.01079206541f, 0.01083632838f, 0.01087856572f, 0.01091869641f,
0.01095664315f, 0.01099232957f, 0.01102568675f, 0.01105664484f,
0.01108513959f, 0.01111111138f, 0.01113450434f, 0.01115526911f,
0.01117335912f, 0.01118873432f, 0.01120136213f, 0.01121121366f,
0.01121826563f, 0.01122250315f, 0.0112239169f, 0.01122250315f,
0.01121826563f, 0.01121121366f, 0.01120136213f, 0.01118873432f,
0.01117335912f, 0.01115526911f, 0.01113450434f, 0.01111111138f,
0.01108513959f, 0.01105664484f, 0.01102568675f, 0.01099232957f,
0.01095664315f, 0.01091869641f, 0.01087856572f, 0.01083632838f,
0.01079206541f, 0.01074585691f, 0.01069778763f, 0.01064794231f,
0.01074585691f, 0.01084779948f, 0.01095007174f, 0.01105259173f,
0.01115526911f, 0.01125800703f, 0.01136070304f, 0.01146324724f,
0.01156552508f, 0.01166741177f, 0.01176877879f, 0.01186948828f,
0.01196939778f, 0.01206835546f, 0.01216620672f, 0.01226278674f,
0.01235792786f, 0.01245145593f, 0.0125431912f, 0.01263295114f,
0.01272054669f, 0.01280578785f, 0.01288848184f, 0.01296843402f,
0.0130454516f, 0.01311933808f, 0.01318990346f, 0.01325695775f,
0.01332031563f, 0.01337979734f, 0.01343523059f, 0.01348644961f,
0.01353329886f, 0.01357563306f, 0.01361331902f, 0.01364623569f,
0.01367427502f, 0.01369734481f, 0.01371536963f, 0.013728288f,
0.01373605616f, 0.01373864897f, 0.01373605616f, 0.013728288f,
0.01371536963f, 0.01369734481f, 0.01367427502f, 0.01364623569f,
0.01361331902f, 0.01357563306f, 0.01353329886f, 0.01348644961f,
0.01343523059f, 0.01337979734f, 0.01332031563f, 0.01325695775f,
0.01318990346f, 0.01311933808f, 0.0130454516f, 0.01296843402f,
0.01288848184f, 0.01280578785f, 0.01272054669f, 0.01263295114f,
0.0125431912f, 0.01245145593f, 0.01235792786f, 0.01226278674f,
0.01216620672f, 0.01206835546f, 0.01196939778f, 0.01186948828f,
0.01176877879f, 0.01166741177f, 0.01156552508f, 0.01146324724f,
0.01136070304f, 0.01125800703f, 0.01115526911f, 0.01105259173f,
0.01095007174f, 0.01084779948f, 0.01074585691f, 0.01064432319f,
0.01069778763f, 0.01074958127f, 0.01079961471f, 0.01084779948f,
0.01089404803f, 0.01093827467f, 0.0109803956f, 0.01102032885f,
0.01105799619f, 0.01109332126f, 0.0111262314f, 0.01115665678f,
0.01118453499f, 0.01120980456f, 0.01123241056f, 0.01125230361f,
0.01126943901f, 0.01128377859f, 0.01129528973f, 0.01130394638f,
0.01130972803f, 0.01131262258f, 0.01131262258f, 0.01130972803f,
0.01130394638f, 0.01129528973f, 0.01128377859f, 0.01126943901f,
0.01125230361f, 0.01123241056f, 0.01120980456f, 0.01118453499f,
0.01115665678f, 0.0111262314f, 0.01109332126f, 0.01105799619f,
0.01102032885f, 0.0109803956f, 0.01093827467f, 0.01089404803f,
0.01084779948f, 0.01079961471f, 0.01074958127f, 0.01069778763f,
0.01079206541f, 0.01089404803f, 0.01099631656f, 0.01109878626f,
0.01120136213f, 0.01130394638f, 0.01140643191f, 0.01150870696f,
0.01161065139f, 0.01171213947f, 0.01181303803f, 0.01191320643f,
0.01201249938f, 0.01211076323f, 0.01220783778f, 0.01230355818f,
0.01239775307f, 0.01249024551f, 0.01258085575f, 0.01266939752f,
0.01275568269f, 0.01283952035f, 0.01292071585f, 0.01299907733f,
0.01307440922f, 0.01314651966f, 0.01321521774f, 0.01328031812f,
0.01334163733f, 0.01339900028f, 0.0134522384f, 0.01350119151f,
0.01354570966f, 0.01358565222f, 0.0136208944f, 0.01365132071f,
0.01367683243f, 0.01369734481f, 0.01371278986f, 0.01372311637f,
0.013728288f, 0.013728288f, 0.01372311637f, 0.01371278986f,
0.01369734481f, 0.01367683243f, 0.01365132071f, 0.0136208944f,
0.01358565222f, 0.01354570966f, 0.01350119151f, 0.0134522384f,
0.01339900028f, 0.01334163733f, 0.01328031812f, 0.01321521774f,
0.01314651966f, 0.01307440922f, 0.01299907733f, 0.01292071585f,
0.01283952035f, 0.01275568269f, 0.01266939752f, 0.01258085575f,
0.01249024551f, 0.01239775307f, 0.01230355818f, 0.01220783778f,
0.01211076323f, 0.01201249938f, 0.01191320643f, 0.01181303803f,
0.01171213947f, 0.01161065139f, 0.01150870696f, 0.01140643191f,
0.01130394638f, 0.01120136213f, 0.01109878626f, 0.01099631656f,
0.01089404803f, 0.01079206541f, 0.01069044974f, 0.01074585691f,
0.01079961471f, 0.01085163094f, 0.0109018134f, 0.01095007174f,
0.01099631656f, 0.01104046032f, 0.0110824164f, 0.01112210099f,
0.01115943585f, 0.01119434182f, 0.01122674625f, 0.01125658024f,
0.01128377859f, 0.01130828168f, 0.01133003552f, 0.01134899072f,
0.01136510447f, 0.01137833856f, 0.01138866507f, 0.0113960579f,
0.01140050031f, 0.01140198205f, 0.01140050031f, 0.0113960579f,
0.01138866507f, 0.01137833856f, 0.01136510447f, 0.01134899072f,
0.01133003552f, 0.01130828168f, 0.01128377859f, 0.01125658024f,
0.01122674625f, 0.01119434182f, 0.01115943585f, 0.01112210099f,
0.0110824164f, 0.01104046032f, 0.01099631656f, 0.01095007174f,
0.0109018134f, 0.01085163094f, 0.01079961471f, 0.01074585691f,
0.01083632838f, 0.01093827467f, 0.01104046032f, 0.01114279591f,
0.01124518644f, 0.01134752948f, 0.01144971419f, 0.01155162696f,
0.01165314391f, 0.0117541356f, 0.01185446698f, 0.0119539937f,
0.01205256768f, 0.01215003151f, 0.0122462241f, 0.01234097779f,
0.01243411843f, 0.01252546813f, 0.01261484437f, 0.01270206179f,
0.0127869295f, 0.01286925655f, 0.01294884924f, 0.01302551571f,
0.01309906319f, 0.01316929981f, 0.01323603839f, 0.01329909544f,
0.01335829217f, 0.0134134572f, 0.01346442662f, 0.01351104677f,
0.01355317142f, 0.01359067019f, 0.01362342201f, 0.01365132071f,
0.01367427502f, 0.01369220857f, 0.01370506082f, 0.01371278986f,
0.01371536963f, 0.01371278986f, 0.01370506082f, 0.01369220857f,
0.01367427502f, 0.01365132071f, 0.01362342201f, 0.01359067019f,
0.01355317142f, 0.01351104677f, 0.01346442662f, 0.0134134572f,
0.01335829217f, 0.01329909544f, 0.01323603839f, 0.01316929981f,
0.01309906319f, 0.01302551571f, 0.01294884924f, 0.01286925655f,
0.0127869295f, 0.01270206179f, 0.01261484437f, 0.01252546813f,
0.01243411843f, 0.01234097779f, 0.0122462241f, 0.01215003151f,
0.01205256768f, 0.0119539937f, 0.01185446698f, 0.0117541356f,
0.01165314391f, 0.01155162696f, 0.01144971419f, 0.01134752948f,
0.01124518644f, 0.01114279591f, 0.01104046032f, 0.01093827467f,
0.01083632838f, 0.01073470619f, 0.01079206541f, 0.01084779948f,
0.0109018134f, 0.01095401309f, 0.01100430358f, 0.01105259173f,
0.01109878626f, 0.01114279591f, 0.01118453499f, 0.0112239169f,
0.01126086153f, 0.01129528973f, 0.01132712793f, 0.01135630626f,
0.01138276048f, 0.01140643191f, 0.01142726559f, 0.01144521404f,
0.01146023627f, 0.01147229597f, 0.01148136612f, 0.0114874253f,
0.01149045862f, 0.01149045862f, 0.0114874253f, 0.01148136612f,
0.01147229597f, 0.01146023627f, 0.01144521404f, 0.01142726559f,
0.01140643191f, 0.01138276048f, 0.01135630626f, 0.01132712793f,
0.01129528973f, 0.01126086153f, 0.0112239169f, 0.01118453499f,
0.01114279591f, 0.01109878626f, 0.01105259173f, 0.01100430358f,
0.01095401309f, 0.0109018134f, 0.01084779948f, 0.01079206541f,
0.01087856572f, 0.0109803956f, 0.0110824164f, 0.01118453499f,
0.01128665265f, 0.01138866507f, 0.01149045862f, 0.01159191411f,
0.01169290766f, 0.0117933061f, 0.01189296879f, 0.01199175231f,
0.01208950393f, 0.01218606345f, 0.01228126884f, 0.01237494871f,
0.01246692892f, 0.01255702879f, 0.01264506485f, 0.01273085084f,
0.01281419583f, 0.0128949089f, 0.0129727982f, 0.01304767188f,
0.01311933808f, 0.01318760961f, 0.01325230021f, 0.01331323106f,
0.01337022707f, 0.01342312153f, 0.01347175613f, 0.01351598185f,
0.01355566178f, 0.01359067019f, 0.0136208944f, 0.01364623569f,
0.01366661023f, 0.01368195191f, 0.01369220857f, 0.01369734481f,
0.01369734481f, 0.01369220857f, 0.01368195191f, 0.01366661023f,
0.01364623569f, 0.0136208944f, 0.01359067019f, 0.01355566178f,
0.01351598185f, 0.01347175613f, 0.01342312153f, 0.01337022707f,
0.01331323106f, 0.01325230021f, 0.01318760961f, 0.01311933808f,
0.01304767188f, 0.0129727982f, 0.0128949089f, 0.01281419583f,
0.01273085084f, 0.01264506485f, 0.01255702879f, 0.01246692892f,
0.01237494871f, 0.01228126884f, 0.01218606345f, 0.01208950393f,
0.01199175231f, 0.01189296879f, 0.0117933061f, 0.01169290766f,
0.01159191411f, 0.01149045862f, 0.01138866507f, 0.01128665265f,
0.01118453499f, 0.0110824164f, 0.0109803956f, 0.01087856572f,
0.01077701338f, 0.01083632838f, 0.01089404803f, 0.01095007174f,
0.01100430358f, 0.01105664484f, 0.01110699773f, 0.01115526911f,
0.01120136213f, 0.01124518644f, 0.01128665265f, 0.01132567506f,
0.01136216894f, 0.0113960579f, 0.01142726559f, 0.01145572308f,
0.01148136612f, 0.01150413603f, 0.01152398065f, 0.01154085156f,
0.0115547115f, 0.01156552508f, 0.0115732681f, 0.01157792099f,
0.0115794735f, 0.01157792099f, 0.0115732681f, 0.01156552508f,
0.0115547115f, 0.01154085156f, 0.01152398065f, 0.01150413603f,
0.01148136612f, 0.01145572308f, 0.01142726559f, 0.0113960579f,
0.01136216894f, 0.01132567506f, 0.01128665265f, 0.01124518644f,
0.01120136213f, 0.01115526911f, 0.01110699773f, 0.01105664484f,
0.01100430358f, 0.01095007174f, 0.01089404803f, 0.01083632838f,
0.01091869641f, 0.01102032885f, 0.01112210099f, 0.0112239169f,
0.01132567506f, 0.01142726559f, 0.01152857486f, 0.01162947994f,
0.0117298523f, 0.0118295569f, 0.01192845311f, 0.01202639099f,
0.01212321594f, 0.01221876871f, 0.01231288072f, 0.01240538247f,
0.01249609515f, 0.01258483995f, 0.01267143246f, 0.01275568269f,
0.01283740439f, 0.01291640475f, 0.01299249288f, 0.01306547876f,
0.01313517336f, 0.01320139226f, 0.01326395292f, 0.01332267933f,
0.01337740291f, 0.01342796069f, 0.01347420178f, 0.01351598185f,
0.01355317142f, 0.01358565222f, 0.01361331902f, 0.01363608148f,
0.01365386508f, 0.01366661023f, 0.01367427502f, 0.01367683243f,
0.01367427502f, 0.01366661023f, 0.01365386508f, 0.01363608148f,
0.01361331902f, 0.01358565222f, 0.01355317142f, 0.01351598185f,
0.01347420178f, 0.01342796069f, 0.01337740291f, 0.01332267933f,
0.01326395292f, 0.01320139226f, 0.01313517336f, 0.01306547876f,
0.01299249288f, 0.01291640475f, 0.01283740439f, 0.01275568269f,
0.01267143246f, 0.01258483995f, 0.01249609515f, 0.01240538247f,
0.01231288072f, 0.01221876871f, 0.01212321594f, 0.01202639099f,
0.01192845311f, 0.0118295569f, 0.0117298523f, 0.01162947994f,
0.01152857486f, 0.01142726559f, 0.01132567506f, 0.0112239169f,
0.01112210099f, 0.01102032885f, 0.01091869641f, 0.01081729215f,
0.01087856572f, 0.01093827467f, 0.01099631656f, 0.01105259173f,
0.01110699773f, 0.01115943585f, 0.01120980456f, 0.01125800703f,
0.01130394638f, 0.01134752948f, 0.01138866507f, 0.01142726559f,
0.01146324724f, 0.01149653178f, 0.01152704284f, 0.0115547115f,
0.0115794735f, 0.01160127111f, 0.01162005402f, 0.01163577568f,
0.01164839976f, 0.01165789459f, 0.0116642369f, 0.01166741177f,
0.01166741177f, 0.0116642369f, 0.01165789459f, 0.01164839976f,
0.01163577568f, 0.01162005402f, 0.01160127111f, 0.0115794735f,
0.0115547115f, 0.01152704284f, 0.01149653178f, 0.01146324724f,
0.01142726559f, 0.01138866507f, 0.01134752948f, 0.01130394638f,
0.01125800703f, 0.01120980456f, 0.01115943585f, 0.01110699773f,
0.01105259173f, 0.01099631656f, 0.01093827467f, 0.01087856572f,
0.01095664315f, 0.01105799619f, 0.01115943585f, 0.01126086153f,
0.01136216894f, 0.01146324724f, 0.01156397816f, 0.0116642369f,
0.01176389214f, 0.01186280511f, 0.0119608324f, 0.01205782313f,
0.0121536199f, 0.0122480616f, 0.01234097779f, 0.01243219618f,
0.01252153981f, 0.01260882709f, 0.01269387174f, 0.01277648844f,
0.01285648718f, 0.01293367799f, 0.01300787181f, 0.01307888143f,
0.01314651966f, 0.0132106049f, 0.01327095926f, 0.01332741138f,
0.01337979734f, 0.01342796069f, 0.01347175613f, 0.01351104677f,
0.01354570966f, 0.01357563306f, 0.01360072289f, 0.0136208944f,
0.01363608148f, 0.01364623569f, 0.01365132071f, 0.01365132071f,
0.01364623569f, 0.01363608148f, 0.0136208944f, 0.01360072289f,
0.01357563306f, 0.01354570966f, 0.01351104677f, 0.01347175613f,
0.01342796069f, 0.01337979734f, 0.01332741138f, 0.01327095926f,
0.0132106049f, 0.01314651966f, 0.01307888143f, 0.01300787181f,
0.01293367799f, 0.01285648718f, 0.01277648844f, 0.01269387174f,
0.01260882709f, 0.01252153981f, 0.01243219618f, 0.01234097779f,
0.0122480616f, 0.0121536199f, 0.01205782313f, 0.0119608324f,
0.01186280511f, 0.01176389214f, 0.0116642369f, 0.01156397816f,
0.01146324724f, 0.01136216894f, 0.01126086153f, 0.01115943585f,
0.01105799619f, 0.01095664315f, 0.01085546613f, 0.01091869641f,
0.0109803956f, 0.01104046032f, 0.01109878626f, 0.01115526911f,
0.01120980456f, 0.01126228925f, 0.01131262258f, 0.01136070304f,
0.01140643191f, 0.01144971419f, 0.01149045862f, 0.01152857486f,
0.01156397816f, 0.01159659028f, 0.01162633486f, 0.01165314391f,
0.01167695317f, 0.01169770677f, 0.0117153544f, 0.0117298523f,
0.011741166f, 0.01174926758f, 0.0117541356f, 0.01175575983f,
0.0117541356f, 0.01174926758f, 0.011741166f, 0.0117298523f,
0.0117153544f, 0.01169770677f, 0.01167695317f, 0.01165314391f,
0.01162633486f, 0.01159659028f, 0.01156397816f, 0.01152857486f,
0.01149045862f, 0.01144971419f, 0.01140643191f, 0.01136070304f,
0.01131262258f, 0.01126228925f, 0.01120980456f, 0.01115526911f,
0.01109878626f, 0.01104046032f, 0.0109803956f, 0.01091869641f,
0.01099232957f, 0.01109332126f, 0.01119434182f, 0.01129528973f,
0.0113960579f, 0.01149653178f, 0.01159659028f, 0.01169610675f,
0.01179494616f, 0.01189296879f, 0.01199002843f, 0.01208597142f,
0.0121806385f, 0.01227386575f, 0.01236548461f, 0.01245531905f,
0.0125431912f, 0.01262892038f, 0.01271232124f, 0.01279320568f,
0.01287138835f, 0.01294667926f, 0.01301889122f, 0.01308783889f,
0.01315334067f, 0.01321521774f, 0.01327329688f, 0.01332741138f,
0.01337740291f, 0.01342312153f, 0.01346442662f, 0.01350119151f,
0.01353329886f, 0.01356064621f, 0.01358314604f, 0.01360072289f,
0.01361331902f, 0.0136208944f, 0.01362342201f, 0.0136208944f,
0.01361331902f, 0.01360072289f, 0.01358314604f, 0.01356064621f,
0.01353329886f, 0.01350119151f, 0.01346442662f, 0.01342312153f,
0.01337740291f, 0.01332741138f, 0.01327329688f, 0.01321521774f,
0.01315334067f, 0.01308783889f, 0.01301889122f, 0.01294667926f,
0.01287138835f, 0.01279320568f, 0.01271232124f, 0.01262892038f,
0.0125431912f, 0.01245531905f, 0.01236548461f, 0.01227386575f,
0.0121806385f, 0.01208597142f, 0.01199002843f, 0.01189296879f,
0.01179494616f, 0.01169610675f, 0.01159659028f, 0.01149653178f,
0.0113960579f, 0.01129528973f, 0.01119434182f, 0.01109332126f,
0.01099232957f, 0.01089146268f, 0.01095664315f, 0.01102032885f,
0.0110824164f, 0.01114279591f, 0.01120136213f, 0.01125800703f,
0.01131262258f, 0.01136510447f, 0.01141534653f, 0.01146324724f,
0.01150870696f, 0.01155162696f, 0.01159191411f, 0.01162947994f,
0.0116642369f, 0.01169610675f, 0.01172501314f, 0.01175088901f,
0.01177367195f, 0.0117933061f, 0.01180974208f, 0.01182294171f,
0.01183286961f, 0.01183950249f, 0.01184282266f, 0.01184282266f,
0.01183950249f, 0.01183286961f, 0.01182294171f, 0.01180974208f,
0.0117933061f, 0.01177367195f, 0.01175088901f, 0.01172501314f,
0.01169610675f, 0.0116642369f, 0.01162947994f, 0.01159191411f,
0.01155162696f, 0.01150870696f, 0.01146324724f, 0.01141534653f,
0.01136510447f, 0.01131262258f, 0.01125800703f, 0.01120136213f,
0.01114279591f, 0.0110824164f, 0.01102032885f, 0.01095664315f,
0.01102568675f, 0.0111262314f, 0.01122674625f, 0.01132712793f,
0.01142726559f, 0.01152704284f, 0.01162633486f, 0.01172501314f,
0.01182294171f, 0.01191997528f, 0.01201596763f, 0.01211076323f,
0.01220420003f, 0.01229611505f, 0.01238633506f, 0.01247468684f,
0.01256099064f, 0.01264506485f, 0.01272672601f, 0.01280578785f,
0.01288206317f, 0.01295536757f, 0.01302551571f, 0.01309232507f,
0.01315561775f, 0.01321521774f, 0.01327095926f, 0.01332267933f,
0.01337022707f, 0.0134134572f, 0.0134522384f, 0.01348644961f,
0.01351598185f, 0.01354074106f, 0.01356064621f, 0.01357563306f,
0.01358565222f, 0.01359067019f, 0.01359067019f, 0.01358565222f,
0.01357563306f, 0.01356064621f, 0.01354074106f, 0.01351598185f,
0.01348644961f, 0.0134522384f, 0.0134134572f, 0.01337022707f,
0.01332267933f, 0.01327095926f, 0.01321521774f, 0.01315561775f,
0.01309232507f, 0.01302551571f, 0.01295536757f, 0.01288206317f,
0.01280578785f, 0.01272672601f, 0.01264506485f, 0.01256099064f,
0.01247468684f, 0.01238633506f, 0.01229611505f, 0.01220420003f,
0.01211076323f, 0.01201596763f, 0.01191997528f, 0.01182294171f,
0.01172501314f, 0.01162633486f, 0.01152704284f, 0.01142726559f,
0.01132712793f, 0.01122674625f, 0.0111262314f, 0.01102568675f,
0.01092521101f, 0.01099232957f, 0.01105799619f, 0.01112210099f,
0.01118453499f, 0.01124518644f, 0.01130394638f, 0.01136070304f,
0.01141534653f, 0.01146776881f, 0.01151786372f, 0.01156552508f,
0.01161065139f, 0.01165314391f, 0.01169290766f, 0.0117298523f,
0.01176389214f, 0.01179494616f, 0.01182294171f, 0.01184780896f,
0.01186948828f, 0.01188792568f, 0.0119030755f, 0.01191489771f,
0.01192336436f, 0.01192845311f, 0.01193015091f, 0.01192845311f,
0.01192336436f, 0.01191489771f, 0.0119030755f, 0.01188792568f,
0.01186948828f, 0.01184780896f, 0.01182294171f, 0.01179494616f,
0.01176389214f, 0.0117298523f, 0.01169290766f, 0.01165314391f,
0.01161065139f, 0.01156552508f, 0.01151786372f, 0.01146776881f,
0.01141534653f, 0.01136070304f, 0.01130394638f, 0.01124518644f,
0.01118453499f, 0.01112210099f, 0.01105799619f, 0.01099232957f,
0.01105664484f, 0.01115665678f, 0.01125658024f, 0.01135630626f,
0.01145572308f, 0.0115547115f, 0.01165314391f, 0.01175088901f,
0.01184780896f, 0.01194375753f, 0.01203858573f, 0.01213213522f,
0.01222424489f, 0.01231474802f, 0.01240347326f, 0.01249024551f,
0.01257488597f, 0.01265721396f, 0.01273704506f, 0.01281419583f,
0.01288848184f, 0.0129597187f, 0.01302772667f, 0.01309232507f,
0.01315334067f, 0.0132106049f, 0.01326395292f, 0.01331323106f,
0.01335829217f, 0.01339900028f, 0.01343523059f, 0.01346686855f,
0.01349381451f, 0.01351598185f, 0.01353329886f, 0.01354570966f,
0.01355317142f, 0.01355566178f, 0.01355317142f, 0.01354570966f,
0.01353329886f, 0.01351598185f, 0.01349381451f, 0.01346686855f,
0.01343523059f, 0.01339900028f, 0.01335829217f, 0.01331323106f,
0.01326395292f, 0.0132106049f, 0.01315334067f, 0.01309232507f,
0.01302772667f, 0.0129597187f, 0.01288848184f, 0.01281419583f,
0.01273704506f, 0.01265721396f, 0.01257488597f, 0.01249024551f,
0.01240347326f, 0.01231474802f, 0.01222424489f, 0.01213213522f,
0.01203858573f, 0.01194375753f, 0.01184780896f, 0.01175088901f,
0.01165314391f, 0.0115547115f, 0.01145572308f, 0.01135630626f,
0.01125658024f, 0.01115665678f, 0.01105664484f, 0.01095664315f,
0.01102568675f, 0.01109332126f, 0.01115943585f, 0.0112239169f,
0.01128665265f, 0.01134752948f, 0.01140643191f, 0.01146324724f,
0.01151786372f, 0.01157016866f, 0.01162005402f, 0.01166741177f,
0.01171213947f, 0.0117541356f, 0.0117933061f, 0.0118295569f,
0.01186280511f, 0.01189296879f, 0.01191997528f, 0.01194375753f,
0.01196425594f, 0.01198141929f, 0.01199520286f, 0.01200557221f,
0.01201249938f, 0.01201596763f, 0.01201596763f, 0.01201249938f,
0.01200557221f, 0.01199520286f, 0.01198141929f, 0.01196425594f,
0.01194375753f, 0.01191997528f, 0.01189296879f, 0.01186280511f,
0.0118295569f, 0.0117933061f, 0.0117541356f, 0.01171213947f,
0.01166741177f, 0.01162005402f, 0.01157016866f, 0.01151786372f,
0.01146324724f, 0.01140643191f, 0.01134752948f, 0.01128665265f,
0.0112239169f, 0.01115943585f, 0.01109332126f, 0.01102568675f,
0.01108513959f, 0.01118453499f, 0.01128377859f, 0.01138276048f,
0.01148136612f, 0.0115794735f, 0.01167695317f, 0.01177367195f,
0.01186948828f, 0.01196425594f, 0.01205782313f, 0.01215003151f,
0.01224071812f, 0.01232971624f, 0.01241685264f, 0.01250195317f,
0.01258483995f, 0.0126653323f, 0.0127432486f, 0.01281840634f,
0.01289062295f, 0.0129597187f, 0.01302551571f, 0.01308783889f,
0.01314651966f, 0.01320139226f, 0.01325230021f, 0.01329909544f,
0.01334163733f, 0.01337979734f, 0.0134134572f, 0.01344251167f,
0.01346686855f, 0.01348644961f, 0.01350119151f, 0.01351104677f,
0.01351598185f, 0.01351598185f, 0.01351104677f, 0.01350119151f,
0.01348644961f, 0.01346686855f, 0.01344251167f, 0.0134134572f,
0.01337979734f, 0.01334163733f, 0.01329909544f, 0.01325230021f,
0.01320139226f, 0.01314651966f, 0.01308783889f, 0.01302551571f,
0.0129597187f, 0.01289062295f, 0.01281840634f, 0.0127432486f,
0.0126653323f, 0.01258483995f, 0.01250195317f, 0.01241685264f,
0.01232971624f, 0.01224071812f, 0.01215003151f, 0.01205782313f,
0.01196425594f, 0.01186948828f, 0.01177367195f, 0.01167695317f,
0.0115794735f, 0.01148136612f, 0.01138276048f, 0.01128377859f,
0.01118453499f, 0.01108513959f, 0.01098569483f, 0.01105664484f,
0.0111262314f, 0.01119434182f, 0.01126086153f, 0.01132567506f,
0.01138866507f, 0.01144971419f, 0.01150870696f, 0.01156552508f,
0.01162005402f, 0.01167218015f, 0.01172179077f, 0.01176877879f,
0.01181303803f, 0.01185446698f, 0.01189296879f, 0.01192845311f,
0.0119608324f, 0.01199002843f, 0.01201596763f, 0.01203858573f,
0.01205782313f, 0.01207363233f, 0.01208597142f, 0.01209480781f,
0.01210011914f, 0.01210189145f, 0.01210011914f, 0.01209480781f,
0.01208597142f, 0.01207363233f, 0.01205782313f, 0.01203858573f,
0.01201596763f, 0.01199002843f, 0.0119608324f, 0.01192845311f,
0.01189296879f, 0.01185446698f, 0.01181303803f, 0.01176877879f,
0.01172179077f, 0.01167218015f, 0.01162005402f, 0.01156552508f,
0.01150870696f, 0.01144971419f, 0.01138866507f, 0.01132567506f,
0.01126086153f, 0.01119434182f, 0.0111262314f, 0.01105664484f,
0.01111111138f, 0.01120980456f, 0.01130828168f, 0.01140643191f,
0.01150413603f, 0.01160127111f, 0.01169770677f, 0.0117933061f,
0.01188792568f, 0.01198141929f, 0.01207363233f, 0.01216440555f,
0.01225357689f, 0.01234097779f, 0.01242643595f, 0.01250977721f,
0.0125908237f, 0.01266939752f, 0.01274531893f, 0.01281840634f,
0.01288848184f, 0.01295536757f, 0.01301889122f, 0.01307888143f,
0.01313517336f, 0.01318760961f, 0.01323603839f, 0.01328031812f,
0.01332031563f, 0.01335590892f, 0.01338698901f, 0.0134134572f,
0.01343523059f, 0.0134522384f, 0.01346442662f, 0.01347175613f,
0.01347420178f, 0.01347175613f, 0.01346442662f, 0.0134522384f,
0.01343523059f, 0.0134134572f, 0.01338698901f, 0.01335590892f,
0.01332031563f, 0.01328031812f, 0.01323603839f, 0.01318760961f,
0.01313517336f, 0.01307888143f, 0.01301889122f, 0.01295536757f,
0.01288848184f, 0.01281840634f, 0.01274531893f, 0.01266939752f,
0.0125908237f, 0.01250977721f, 0.01242643595f, 0.01234097779f,
0.01225357689f, 0.01216440555f, 0.01207363233f, 0.01198141929f,
0.01188792568f, 0.0117933061f, 0.01169770677f, 0.01160127111f,
0.01150413603f, 0.01140643191f, 0.01130828168f, 0.01120980456f,
0.01111111138f, 0.01101230737f, 0.01108513959f, 0.01115665678f,
0.01122674625f, 0.01129528973f, 0.01136216894f, 0.01142726559f,
0.01149045862f, 0.01155162696f, 0.01161065139f, 0.01166741177f,
0.01172179077f, 0.01177367195f, 0.01182294171f, 0.01186948828f,
0.01191320643f, 0.0119539937f, 0.01199175231f, 0.01202639099f,
0.01205782313f, 0.01208597142f, 0.01211076323f, 0.01213213522f,
0.01215003151f, 0.01216440555f, 0.01217522006f, 0.0121824462f,
0.01218606345f, 0.01218606345f, 0.0121824462f, 0.01217522006f,
0.01216440555f, 0.01215003151f, 0.01213213522f, 0.01211076323f,
0.01208597142f, 0.01205782313f, 0.01202639099f, 0.01199175231f,
0.0119539937f, 0.01191320643f, 0.01186948828f, 0.01182294171f,
0.01177367195f, 0.01172179077f, 0.01166741177f, 0.01161065139f,
0.01155162696f, 0.01149045862f, 0.01142726559f, 0.01136216894f,
0.01129528973f, 0.01122674625f, 0.01115665678f, 0.01108513959f,
0.01113450434f, 0.01123241056f, 0.01133003552f, 0.01142726559f,
0.01152398065f, 0.01162005402f, 0.0117153544f, 0.01180974208f,
0.0119030755f, 0.01199520286f, 0.01208597142f, 0.01217522006f,
0.01226278674f, 0.01234850287f, 0.01243219618f, 0.01251369435f,
0.01259282045f, 0.01266939752f, 0.0127432486f, 0.01281419583f,
0.01288206317f, 0.01294667926f, 0.01300787181f, 0.01306547876f,
0.01311933808f, 0.01316929981f, 0.01321521774f, 0.01325695775f,
0.0132943932f, 0.01332741138f, 0.01335590892f, 0.01337979734f,
0.01339900028f, 0.0134134572f, 0.01342312153f, 0.01342796069f,
0.01342796069f, 0.01342312153f, 0.0134134572f, 0.01339900028f,
0.01337979734f, 0.01335590892f, 0.01332741138f, 0.0132943932f,
0.01325695775f, 0.01321521774f, 0.01316929981f, 0.01311933808f,
0.01306547876f, 0.01300787181f, 0.01294667926f, 0.01288206317f,
0.01281419583f, 0.0127432486f, 0.01266939752f, 0.01259282045f,
0.01251369435f, 0.01243219618f, 0.01234850287f, 0.01226278674f,
0.01217522006f, 0.01208597142f, 0.01199520286f, 0.0119030755f,
0.01180974208f, 0.0117153544f, 0.01162005402f, 0.01152398065f,
0.01142726559f, 0.01133003552f, 0.01123241056f, 0.01113450434f,
0.0110364249f, 0.01111111138f, 0.01118453499f, 0.01125658024f,
0.01132712793f, 0.0113960579f, 0.01146324724f, 0.01152857486f,
0.01159191411f, 0.01165314391f, 0.01171213947f, 0.01176877879f,
0.01182294171f, 0.01187450811f, 0.01192336436f, 0.01196939778f,
0.01201249938f, 0.01205256768f, 0.01208950393f, 0.01212321594f,
0.0121536199f, 0.0121806385f, 0.01220420003f, 0.01222424489f,
0.01224071812f, 0.01225357689f, 0.01226278674f, 0.01226832252f,
0.01227016933f, 0.01226832252f, 0.01226278674f, 0.01225357689f,
0.01224071812f, 0.01222424489f, 0.01220420003f, 0.0121806385f,
0.0121536199f, 0.01212321594f, 0.01208950393f, 0.01205256768f,
0.01201249938f, 0.01196939778f, 0.01192336436f, 0.01187450811f,
0.01182294171f, 0.01176877879f, 0.01171213947f, 0.01165314391f,
0.01159191411f, 0.01152857486f, 0.01146324724f, 0.0113960579f,
0.01132712793f, 0.01125658024f, 0.01118453499f, 0.01111111138f,
0.01115526911f, 0.01125230361f, 0.01134899072f, 0.01144521404f,
0.01154085156f, 0.01163577568f, 0.0117298523f, 0.01182294171f,
0.01191489771f, 0.01200557221f, 0.01209480781f, 0.0121824462f,
0.01226832252f, 0.01235227007f, 0.01243411843f, 0.01251369435f,
0.0125908237f, 0.0126653323f, 0.01273704506f, 0.01280578785f,
0.01287138835f, 0.01293367799f, 0.01299249288f, 0.01304767188f,
0.01309906319f, 0.01314651966f, 0.01318990346f, 0.013229087f,
0.01326395292f, 0.0132943932f, 0.01332031563f, 0.01334163733f,
0.01335829217f, 0.01337022707f, 0.01337740291f, 0.01337979734f,
0.01337740291f, 0.01337022707f, 0.01335829217f, 0.01334163733f,
0.01332031563f, 0.0132943932f, 0.01326395292f, 0.013229087f,
0.01318990346f, 0.01314651966f, 0.01309906319f, 0.01304767188f,
0.01299249288f, 0.01293367799f, 0.01287138835f, 0.01280578785f,
0.01273704506f, 0.0126653323f, 0.0125908237f, 0.01251369435f,
0.01243411843f, 0.01235227007f, 0.01226832252f, 0.0121824462f,
0.01209480781f, 0.01200557221f, 0.01191489771f, 0.01182294171f,
0.0117298523f, 0.01163577568f, 0.01154085156f, 0.01144521404f,
0.01134899072f, 0.01125230361f, 0.01115526911f, 0.01105799619f,
0.01113450434f, 0.01120980456f, 0.01128377859f, 0.01135630626f,
0.01142726559f, 0.01149653178f, 0.01156397816f, 0.01162947994f,
0.01169290766f, 0.0117541356f, 0.01181303803f, 0.01186948828f,
0.01192336436f, 0.0119745452f, 0.01202291343f, 0.01206835546f,
0.01211076323f, 0.01215003151f, 0.01218606345f, 0.01221876871f,
0.0122480616f, 0.01227386575f, 0.01229611505f, 0.01231474802f,
0.01232971624f, 0.01234097779f, 0.01234850287f, 0.01235227007f,
0.01235227007f, 0.01234850287f, 0.01234097779f, 0.01232971624f,
0.01231474802f, 0.01229611505f, 0.01227386575f, 0.0122480616f,
0.01221876871f, 0.01218606345f, 0.01215003151f, 0.01211076323f,
0.01206835546f, 0.01202291343f, 0.0119745452f, 0.01192336436f,
0.01186948828f, 0.01181303803f, 0.0117541356f, 0.01169290766f,
0.01162947994f, 0.01156397816f, 0.01149653178f, 0.01142726559f,
0.01135630626f, 0.01128377859f, 0.01120980456f, 0.01113450434f,
0.01117335912f, 0.01126943901f, 0.01136510447f, 0.01146023627f,
0.0115547115f, 0.01164839976f, 0.011741166f, 0.01183286961f,
0.01192336436f, 0.01201249938f, 0.01210011914f, 0.01218606345f,
0.01227016933f, 0.01235227007f, 0.01243219618f, 0.01250977721f,
0.01258483995f, 0.01265721396f, 0.01272672601f, 0.01279320568f,
0.01285648718f, 0.01291640475f, 0.0129727982f, 0.01302551571f,
0.01307440922f, 0.01311933808f, 0.01316017378f, 0.01319679338f,
0.013229087f, 0.01325695775f, 0.01328031812f, 0.01329909544f,
0.01331323106f, 0.01332267933f, 0.01332741138f, 0.01332741138f,
0.01332267933f, 0.01331323106f, 0.01329909544f, 0.01328031812f,
0.01325695775f, 0.013229087f, 0.01319679338f, 0.01316017378f,
0.01311933808f, 0.01307440922f, 0.01302551571f, 0.0129727982f,
0.01291640475f, 0.01285648718f, 0.01279320568f, 0.01272672601f,
0.01265721396f, 0.01258483995f, 0.01250977721f, 0.01243219618f,
0.01235227007f, 0.01227016933f, 0.01218606345f, 0.01210011914f,
0.01201249938f, 0.01192336436f, 0.01183286961f, 0.011741166f,
0.01164839976f, 0.0115547115f, 0.01146023627f, 0.01136510447f,
0.01126943901f, 0.01117335912f, 0.01107697561f, 0.01115526911f,
0.01123241056f, 0.01130828168f, 0.01138276048f, 0.01145572308f,
0.01152704284f, 0.01159659028f, 0.0116642369f, 0.0117298523f,
0.0117933061f, 0.01185446698f, 0.01191320643f, 0.01196939778f,
0.01202291343f, 0.01207363233f, 0.01212143432f, 0.01216620672f,
0.01220783778f, 0.0122462241f, 0.01228126884f, 0.01231288072f,
0.01234097779f, 0.01236548461f, 0.01238633506f, 0.01240347326f,
0.01241685264f, 0.01242643595f, 0.01243219618f, 0.01243411843f,
0.01243219618f, 0.01242643595f, 0.01241685264f, 0.01240347326f,
0.01238633506f, 0.01236548461f, 0.01234097779f, 0.01231288072f,
0.01228126884f, 0.0122462241f, 0.01220783778f, 0.01216620672f,
0.01212143432f, 0.01207363233f, 0.01202291343f, 0.01196939778f,
0.01191320643f, 0.01185446698f, 0.0117933061f, 0.0117298523f,
0.0116642369f, 0.01159659028f, 0.01152704284f, 0.01145572308f,
0.01138276048f, 0.01130828168f, 0.01123241056f, 0.01115526911f,
0.01118873432f, 0.01128377859f, 0.01137833856f, 0.01147229597f,
0.01156552508f, 0.01165789459f, 0.01174926758f, 0.01183950249f,
0.01192845311f, 0.01201596763f, 0.01210189145f, 0.01218606345f,
0.01226832252f, 0.01234850287f, 0.01242643595f, 0.01250195317f,
0.01257488597f, 0.01264506485f, 0.01271232124f, 0.01277648844f,
0.01283740439f, 0.0128949089f, 0.01294884924f, 0.01299907733f,
0.0130454516f, 0.01308783889f, 0.01312611811f, 0.01316017378f,
0.01318990346f, 0.01321521774f, 0.01323603839f, 0.01325230021f,
0.01326395292f, 0.01327095926f, 0.01327329688f, 0.01327095926f,
0.01326395292f, 0.01325230021f, 0.01323603839f, 0.01321521774f,
0.01318990346f, 0.01316017378f, 0.01312611811f, 0.01308783889f,
0.0130454516f, 0.01299907733f, 0.01294884924f, 0.0128949089f,
0.01283740439f, 0.01277648844f, 0.01271232124f, 0.01264506485f,
0.01257488597f, 0.01250195317f, 0.01242643595f, 0.01234850287f,
0.01226832252f, 0.01218606345f, 0.01210189145f, 0.01201596763f,
0.01192845311f, 0.01183950249f, 0.01174926758f, 0.01165789459f,
0.01156552508f, 0.01147229597f, 0.01137833856f, 0.01128377859f,
0.01118873432f, 0.01109332126f, 0.01117335912f, 0.01125230361f,
0.01133003552f, 0.01140643191f, 0.01148136612f, 0.0115547115f,
0.01162633486f, 0.01169610675f, 0.01176389214f, 0.0118295569f,
0.01189296879f, 0.0119539937f, 0.01201249938f, 0.01206835546f,
0.01212143432f, 0.01217161212f, 0.01221876871f, 0.01226278674f,
0.01230355818f, 0.01234097779f, 0.01237494871f, 0.01240538247f,
0.01243219618f, 0.01245531905f, 0.01247468684f, 0.01249024551f,
0.01250195317f, 0.01250977721f, 0.01251369435f, 0.01251369435f,
0.01250977721f, 0.01250195317f, 0.01249024551f, 0.01247468684f,
0.01245531905f, 0.01243219618f, 0.01240538247f, 0.01237494871f,
0.01234097779f, 0.01230355818f, 0.01226278674f, 0.01221876871f,
0.01217161212f, 0.01212143432f, 0.01206835546f, 0.01201249938f,
0.0119539937f, 0.01189296879f, 0.0118295569f, 0.01176389214f,
0.01169610675f, 0.01162633486f, 0.0115547115f, 0.01148136612f,
0.01140643191f, 0.01133003552f, 0.01125230361f, 0.01117335912f,
0.01120136213f, 0.01129528973f, 0.01138866507f, 0.01148136612f,
0.0115732681f, 0.0116642369f, 0.0117541356f, 0.01184282266f,
0.01193015091f, 0.01201596763f, 0.01210011914f, 0.0121824462f,
0.01226278674f, 0.01234097779f, 0.01241685264f, 0.01249024551f,
0.01256099064f, 0.01262892038f, 0.01269387174f, 0.01275568269f,
0.01281419583f, 0.01286925655f, 0.01292071585f, 0.01296843402f,
0.01301227603f, 0.01305211708f, 0.01308783889f, 0.01311933808f,
0.01314651966f, 0.01316929981f, 0.01318760961f, 0.01320139226f,
0.0132106049f, 0.01321521774f, 0.01321521774f, 0.0132106049f,
0.01320139226f, 0.01318760961f, 0.01316929981f, 0.01314651966f,
0.01311933808f, 0.01308783889f, 0.01305211708f, 0.01301227603f,
0.01296843402f, 0.01292071585f, 0.01286925655f, 0.01281419583f,
0.01275568269f, 0.01269387174f, 0.01262892038f, 0.01256099064f,
0.01249024551f, 0.01241685264f, 0.01234097779f, 0.01226278674f,
0.0121824462f, 0.01210011914f, 0.01201596763f, 0.01193015091f,
0.01184282266f, 0.0117541356f, 0.0116642369f, 0.0115732681f,
0.01148136612f, 0.01138866507f, 0.01129528973f, 0.01120136213f,
0.01110699773f, 0.01118873432f, 0.01126943901f, 0.01134899072f,
0.01142726559f, 0.01150413603f, 0.0115794735f, 0.01165314391f,
0.01172501314f, 0.01179494616f, 0.01186280511f, 0.01192845311f,
0.01199175231f, 0.01205256768f, 0.01211076323f, 0.01216620672f,
0.01221876871f, 0.01226832252f, 0.01231474802f, 0.01235792786f,
0.01239775307f, 0.01243411843f, 0.01246692892f, 0.01249609515f,
0.01252153981f, 0.0125431912f, 0.01256099064f, 0.01257488597f,
0.01258483995f, 0.0125908237f, 0.01259282045f, 0.0125908237f,
0.01258483995f, 0.01257488597f, 0.01256099064f, 0.0125431912f,
0.01252153981f, 0.01249609515f, 0.01246692892f, 0.01243411843f,
0.01239775307f, 0.01235792786f, 0.01231474802f, 0.01226832252f,
0.01221876871f, 0.01216620672f, 0.01211076323f, 0.01205256768f,
0.01199175231f, 0.01192845311f, 0.01186280511f, 0.01179494616f,
0.01172501314f, 0.01165314391f, 0.0115794735f, 0.01150413603f,
0.01142726559f, 0.01134899072f, 0.01126943901f, 0.01118873432f,
0.01121121366f, 0.01130394638f, 0.0113960579f, 0.0114874253f,
0.01157792099f, 0.01166741177f, 0.01175575983f, 0.01184282266f,
0.01192845311f, 0.01201249938f, 0.01209480781f, 0.01217522006f,
0.01225357689f, 0.01232971624f, 0.01240347326f, 0.01247468684f,
0.0125431912f, 0.01260882709f, 0.01267143246f, 0.01273085084f,
0.0127869295f, 0.01283952035f, 0.01288848184f, 0.01293367799f,
0.01297498215f, 0.01301227603f, 0.0130454516f, 0.01307440922f,
0.01309906319f, 0.01311933808f, 0.01313517336f, 0.01314651966f,
0.01315334067f, 0.01315561775f, 0.01315334067f, 0.01314651966f,
0.01313517336f, 0.01311933808f, 0.01309906319f, 0.01307440922f,
0.0130454516f, 0.01301227603f, 0.01297498215f, 0.01293367799f,
0.01288848184f, 0.01283952035f, 0.0127869295f, 0.01273085084f,
0.01267143246f, 0.01260882709f, 0.0125431912f, 0.01247468684f,
0.01240347326f, 0.01232971624f, 0.01225357689f, 0.01217522006f,
0.01209480781f, 0.01201249938f, 0.01192845311f, 0.01184282266f,
0.01175575983f, 0.01166741177f, 0.01157792099f, 0.0114874253f,
0.0113960579f, 0.01130394638f, 0.01121121366f, 0.01111797616f,
0.01120136213f, 0.01128377859f, 0.01136510447f, 0.01144521404f,
0.01152398065f, 0.01160127111f, 0.01167695317f, 0.01175088901f,
0.01182294171f, 0.01189296879f, 0.0119608324f, 0.01202639099f,
0.01208950393f, 0.01215003151f, 0.01220783778f, 0.01226278674f,
0.01231474802f, 0.01236359403f, 0.01240920182f, 0.01245145593f,
0.01249024551f, 0.01252546813f, 0.01255702879f, 0.01258483995f,
0.01260882709f, 0.01262892038f, 0.01264506485f, 0.01265721396f,
0.0126653323f, 0.01266939752f, 0.01266939752f, 0.0126653323f,
0.01265721396f, 0.01264506485f, 0.01262892038f, 0.01260882709f,
0.01258483995f, 0.01255702879f, 0.01252546813f, 0.01249024551f,
0.01245145593f, 0.01240920182f, 0.01236359403f, 0.01231474802f,
0.01226278674f, 0.01220783778f, 0.01215003151f, 0.01208950393f,
0.01202639099f, 0.0119608324f, 0.01189296879f, 0.01182294171f,
0.01175088901f, 0.01167695317f, 0.01160127111f, 0.01152398065f,
0.01144521404f, 0.01136510447f, 0.01128377859f, 0.01120136213f,
0.01121826563f, 0.01130972803f, 0.01140050031f, 0.01149045862f,
0.0115794735f, 0.01166741177f, 0.0117541356f, 0.01183950249f,
0.01192336436f, 0.01200557221f, 0.01208597142f, 0.01216440555f,
0.01224071812f, 0.01231474802f, 0.01238633506f, 0.01245531905f,
0.01252153981f, 0.01258483995f, 0.01264506485f, 0.01270206179f,
0.01275568269f, 0.01280578785f, 0.01285223942f, 0.0128949089f,
0.01293367799f, 0.01296843402f, 0.01299907733f, 0.01302551571f,
0.01304767188f, 0.01306547876f, 0.01307888143f, 0.01308783889f,
0.01309232507f, 0.01309232507f, 0.01308783889f, 0.01307888143f,
0.01306547876f, 0.01304767188f, 0.01302551571f, 0.01299907733f,
0.01296843402f, 0.01293367799f, 0.0128949089f, 0.01285223942f,
0.01280578785f, 0.01275568269f, 0.01270206179f, 0.01264506485f,
0.01258483995f, 0.01252153981f, 0.01245531905f, 0.01238633506f,
0.01231474802f, 0.01224071812f, 0.01216440555f, 0.01208597142f,
0.01200557221f, 0.01192336436f, 0.01183950249f, 0.0117541356f,
0.01166741177f, 0.0115794735f, 0.01149045862f, 0.01140050031f,
0.01130972803f, 0.01121826563f, 0.0111262314f, 0.01121121366f,
0.01129528973f, 0.01137833856f, 0.01146023627f, 0.01154085156f,
0.01162005402f, 0.01169770677f, 0.01177367195f, 0.01184780896f,
0.01191997528f, 0.01199002843f, 0.01205782313f, 0.01212321594f,
0.01218606345f, 0.0122462241f, 0.01230355818f, 0.01235792786f,
0.01240920182f, 0.01245725155f, 0.01250195317f, 0.0125431912f,
0.01258085575f, 0.01261484437f, 0.01264506485f, 0.01267143246f,
0.01269387174f, 0.01271232124f, 0.01272672601f, 0.01273704506f,
0.0127432486f, 0.01274531893f, 0.0127432486f, 0.01273704506f,
0.01272672601f, 0.01271232124f, 0.01269387174f, 0.01267143246f,
0.01264506485f, 0.01261484437f, 0.01258085575f, 0.0125431912f,
0.01250195317f, 0.01245725155f, 0.01240920182f, 0.01235792786f,
0.01230355818f, 0.0122462241f, 0.01218606345f, 0.01212321594f,
0.01205782313f, 0.01199002843f, 0.01191997528f, 0.01184780896f,
0.01177367195f, 0.01169770677f, 0.01162005402f, 0.01154085156f,
0.01146023627f, 0.01137833856f, 0.01129528973f, 0.01121121366f,
0.01122250315f, 0.01131262258f, 0.01140198205f, 0.01149045862f,
0.01157792099f, 0.0116642369f, 0.01174926758f, 0.01183286961f,
0.01191489771f, 0.01199520286f, 0.01207363233f, 0.01215003151f,
0.01222424489f, 0.01229611505f, 0.01236548461f, 0.01243219618f,
0.01249609515f, 0.01255702879f, 0.01261484437f, 0.01266939752f,
0.01272054669f, 0.01276815403f, 0.01281209197f, 0.01285223942f,
0.01288848184f, 0.01292071585f, 0.01294884924f, 0.0129727982f,
0.01299249288f, 0.01300787181f, 0.01301889122f, 0.01302551571f,
0.01302772667f, 0.01302551571f, 0.01301889122f, 0.01300787181f,
0.01299249288f, 0.0129727982f, 0.01294884924f, 0.01292071585f,
0.01288848184f, 0.01285223942f, 0.01281209197f, 0.01276815403f,
0.01272054669f, 0.01266939752f, 0.01261484437f, 0.01255702879f,
0.01249609515f, 0.01243219618f, 0.01236548461f, 0.01229611505f,
0.01222424489f, 0.01215003151f, 0.01207363233f, 0.01199520286f,
0.01191489771f, 0.01183286961f, 0.01174926758f, 0.0116642369f,
0.01157792099f, 0.01149045862f, 0.01140198205f, 0.01131262258f,
0.01122250315f, 0.01113174483f, 0.01121826563f, 0.01130394638f,
0.01138866507f, 0.01147229597f, 0.0115547115f, 0.01163577568f,
0.0117153544f, 0.0117933061f, 0.01186948828f, 0.01194375753f,
0.01201596763f, 0.01208597142f, 0.0121536199f, 0.01221876871f,
0.01228126884f, 0.01234097779f, 0.01239775307f, 0.01245145593f,
0.01250195317f, 0.01254911628f, 0.01259282045f, 0.01263295114f,
0.01266939752f, 0.01270206179f, 0.01273085084f, 0.01275568269f,
0.01277648844f, 0.01279320568f, 0.01280578785f, 0.01281419583f,
0.01281840634f, 0.01281840634f, 0.01281419583f, 0.01280578785f,
0.01279320568f, 0.01277648844f, 0.01275568269f, 0.01273085084f,
0.01270206179f, 0.01266939752f, 0.01263295114f, 0.01259282045f,
0.01254911628f, 0.01250195317f, 0.01245145593f, 0.01239775307f,
0.01234097779f, 0.01228126884f, 0.01221876871f, 0.0121536199f,
0.01208597142f, 0.01201596763f, 0.01194375753f, 0.01186948828f,
0.0117933061f, 0.0117153544f, 0.01163577568f, 0.0115547115f,
0.01147229597f, 0.01138866507f, 0.01130394638f, 0.01121826563f,
0.0112239169f, 0.01131262258f, 0.01140050031f, 0.0114874253f,
0.0115732681f, 0.01165789459f, 0.011741166f, 0.01182294171f,
0.0119030755f, 0.01198141929f, 0.01205782313f, 0.01213213522f,
0.01220420003f, 0.01227386575f, 0.01234097779f, 0.01240538247f,
0.01246692892f, 0.01252546813f, 0.01258085575f, 0.01263295114f,
0.0126816174f, 0.01272672601f, 0.01276815403f, 0.01280578785f,
0.01283952035f, 0.01286925655f, 0.0128949089f, 0.01291640475f,
0.01293367799f, 0.01294667926f, 0.01295536757f, 0.0129597187f,
0.0129597187f, 0.01295536757f, 0.01294667926f, 0.01293367799f,
0.01291640475f, 0.0128949089f, 0.01286925655f, 0.01283952035f,
0.01280578785f, 0.01276815403f, 0.01272672601f, 0.0126816174f,
0.01263295114f, 0.01258085575f, 0.01252546813f, 0.01246692892f,
0.01240538247f, 0.01234097779f, 0.01227386575f, 0.01220420003f,
0.01213213522f, 0.01205782313f, 0.01198141929f, 0.0119030755f,
0.01182294171f, 0.011741166f, 0.01165789459f, 0.0115732681f,
0.0114874253f, 0.01140050031f, 0.01131262258f, 0.0112239169f,
0.01113450434f, 0.01122250315f, 0.01130972803f, 0.0113960579f,
0.01148136612f, 0.01156552508f, 0.01164839976f, 0.0117298523f,
0.01180974208f, 0.01188792568f, 0.01196425594f, 0.01203858573f,
0.01211076323f, 0.0121806385f, 0.0122480616f, 0.01231288072f,
0.01237494871f, 0.01243411843f, 0.01249024551f, 0.0125431912f,
0.01259282045f, 0.01263900381f, 0.0126816174f, 0.01272054669f,
0.01275568269f, 0.0127869295f, 0.01281419583f, 0.01283740439f,
0.01285648718f, 0.01287138835f, 0.01288206317f, 0.01288848184f,
0.01289062295f, 0.01288848184f, 0.01288206317f, 0.01287138835f,
0.01285648718f, 0.01283740439f, 0.01281419583f, 0.0127869295f,
0.01275568269f, 0.01272054669f, 0.0126816174f, 0.01263900381f,
0.01259282045f, 0.0125431912f, 0.01249024551f, 0.01243411843f,
0.01237494871f, 0.01231288072f, 0.0122480616f, 0.0121806385f,
0.01211076323f, 0.01203858573f, 0.01196425594f, 0.01188792568f,
0.01180974208f, 0.0117298523f, 0.01164839976f, 0.01156552508f,
0.01148136612f, 0.0113960579f, 0.01130972803f, 0.01122250315f
};
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 140,560 | C++ | 66.480077 | 71 | 0.764357 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Mat3.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_MAT3_H_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_MAT3_H_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include "Vec3.h"
#include "Mat.h"
#include <algorithm> // for std::copy()
#include <cassert>
#include <cmath>
#include <iomanip>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Vec3;
template<typename T> class Mat4;
template<typename T> class Quat;
/// @class Mat3 Mat3.h
/// @brief 3x3 matrix class.
template<typename T>
class Mat3: public Mat<3, T>
{
public:
/// Data type held by the matrix.
using value_type = T;
using ValueType = T;
using MyBase = Mat<3, T>;
/// Trivial constructor, the matrix is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Mat3() = default;
#else
Mat3() {}
/// Copy constructor
Mat3(const Mat<3, T> &m)
{
for (int i=0; i<3; ++i) {
for (int j=0; j<3; ++j) {
MyBase::mm[i*3 + j] = m[i][j];
}
}
}
#endif
/// Constructor given the quaternion rotation, e.g. Mat3f m(q);
/// The quaternion is normalized and used to construct the matrix
Mat3(const Quat<T> &q)
{ setToRotation(q); }
/// Constructor given array of elements, the ordering is in row major form:
/** @verbatim
a b c
d e f
g h i
@endverbatim */
template<typename Source>
Mat3(Source a, Source b, Source c,
Source d, Source e, Source f,
Source g, Source h, Source i)
{
MyBase::mm[0] = static_cast<T>(a);
MyBase::mm[1] = static_cast<T>(b);
MyBase::mm[2] = static_cast<T>(c);
MyBase::mm[3] = static_cast<T>(d);
MyBase::mm[4] = static_cast<T>(e);
MyBase::mm[5] = static_cast<T>(f);
MyBase::mm[6] = static_cast<T>(g);
MyBase::mm[7] = static_cast<T>(h);
MyBase::mm[8] = static_cast<T>(i);
} // constructor1Test
/// Construct matrix from rows or columns vectors (defaults to rows
/// for historical reasons)
template<typename Source>
Mat3(const Vec3<Source> &v1, const Vec3<Source> &v2, const Vec3<Source> &v3, bool rows = true)
{
if (rows) {
this->setRows(v1, v2, v3);
} else {
this->setColumns(v1, v2, v3);
}
}
/// Constructor given array of elements, the ordering is in row major form:\n
/// a[0] a[1] a[2]\n
/// a[3] a[4] a[5]\n
/// a[6] a[7] a[8]\n
template<typename Source>
Mat3(Source *a)
{
MyBase::mm[0] = static_cast<T>(a[0]);
MyBase::mm[1] = static_cast<T>(a[1]);
MyBase::mm[2] = static_cast<T>(a[2]);
MyBase::mm[3] = static_cast<T>(a[3]);
MyBase::mm[4] = static_cast<T>(a[4]);
MyBase::mm[5] = static_cast<T>(a[5]);
MyBase::mm[6] = static_cast<T>(a[6]);
MyBase::mm[7] = static_cast<T>(a[7]);
MyBase::mm[8] = static_cast<T>(a[8]);
} // constructor1Test
/// Conversion constructor
template<typename Source>
explicit Mat3(const Mat3<Source> &m)
{
for (int i=0; i<3; ++i) {
for (int j=0; j<3; ++j) {
MyBase::mm[i*3 + j] = static_cast<T>(m[i][j]);
}
}
}
/// Conversion from Mat4 (copies top left)
explicit Mat3(const Mat4<T> &m)
{
for (int i=0; i<3; ++i) {
for (int j=0; j<3; ++j) {
MyBase::mm[i*3 + j] = m[i][j];
}
}
}
/// Predefined constant for identity matrix
static const Mat3<T>& identity() {
static const Mat3<T> sIdentity = Mat3<T>(
1, 0, 0,
0, 1, 0,
0, 0, 1
);
return sIdentity;
}
/// Predefined constant for zero matrix
static const Mat3<T>& zero() {
static const Mat3<T> sZero = Mat3<T>(
0, 0, 0,
0, 0, 0,
0, 0, 0
);
return sZero;
}
/// Set ith row to vector v
void setRow(int i, const Vec3<T> &v)
{
// assert(i>=0 && i<3);
int i3 = i * 3;
MyBase::mm[i3+0] = v[0];
MyBase::mm[i3+1] = v[1];
MyBase::mm[i3+2] = v[2];
} // rowColumnTest
/// Get ith row, e.g. Vec3d v = m.row(1);
Vec3<T> row(int i) const
{
// assert(i>=0 && i<3);
return Vec3<T>((*this)(i,0), (*this)(i,1), (*this)(i,2));
} // rowColumnTest
/// Set jth column to vector v
void setCol(int j, const Vec3<T>& v)
{
// assert(j>=0 && j<3);
MyBase::mm[0+j] = v[0];
MyBase::mm[3+j] = v[1];
MyBase::mm[6+j] = v[2];
} // rowColumnTest
/// Get jth column, e.g. Vec3d v = m.col(0);
Vec3<T> col(int j) const
{
// assert(j>=0 && j<3);
return Vec3<T>((*this)(0,j), (*this)(1,j), (*this)(2,j));
} // rowColumnTest
/// Alternative indexed reference to the elements
/// Note that the indices are row first and column second.
/// e.g. m(0,0) = 1;
T& operator()(int i, int j)
{
// assert(i>=0 && i<3);
// assert(j>=0 && j<3);
return MyBase::mm[3*i+j];
} // trivial
/// Alternative indexed constant reference to the elements,
/// Note that the indices are row first and column second.
/// e.g. float f = m(1,0);
T operator()(int i, int j) const
{
// assert(i>=0 && i<3);
// assert(j>=0 && j<3);
return MyBase::mm[3*i+j];
} // trivial
/// Set the rows of this matrix to the vectors v1, v2, v3
void setRows(const Vec3<T> &v1, const Vec3<T> &v2, const Vec3<T> &v3)
{
MyBase::mm[0] = v1[0];
MyBase::mm[1] = v1[1];
MyBase::mm[2] = v1[2];
MyBase::mm[3] = v2[0];
MyBase::mm[4] = v2[1];
MyBase::mm[5] = v2[2];
MyBase::mm[6] = v3[0];
MyBase::mm[7] = v3[1];
MyBase::mm[8] = v3[2];
} // setRows
/// Set the columns of this matrix to the vectors v1, v2, v3
void setColumns(const Vec3<T> &v1, const Vec3<T> &v2, const Vec3<T> &v3)
{
MyBase::mm[0] = v1[0];
MyBase::mm[1] = v2[0];
MyBase::mm[2] = v3[0];
MyBase::mm[3] = v1[1];
MyBase::mm[4] = v2[1];
MyBase::mm[5] = v3[1];
MyBase::mm[6] = v1[2];
MyBase::mm[7] = v2[2];
MyBase::mm[8] = v3[2];
} // setColumns
/// Set diagonal and symmetric triangular components
void setSymmetric(const Vec3<T> &vdiag, const Vec3<T> &vtri)
{
MyBase::mm[0] = vdiag[0];
MyBase::mm[1] = vtri[0];
MyBase::mm[2] = vtri[1];
MyBase::mm[3] = vtri[0];
MyBase::mm[4] = vdiag[1];
MyBase::mm[5] = vtri[2];
MyBase::mm[6] = vtri[1];
MyBase::mm[7] = vtri[2];
MyBase::mm[8] = vdiag[2];
} // setSymmetricTest
/// Return a matrix with the prescribed diagonal and symmetric triangular components.
static Mat3 symmetric(const Vec3<T> &vdiag, const Vec3<T> &vtri)
{
return Mat3(
vdiag[0], vtri[0], vtri[1],
vtri[0], vdiag[1], vtri[2],
vtri[1], vtri[2], vdiag[2]
);
}
/// Set the matrix as cross product of the given vector
void setSkew(const Vec3<T> &v)
{*this = skew(v);}
/// @brief Set this matrix to the rotation matrix specified by the quaternion
/// @details The quaternion is normalized and used to construct the matrix.
/// Note that the matrix is transposed to match post-multiplication semantics.
void setToRotation(const Quat<T> &q)
{*this = rotation<Mat3<T> >(q);}
/// @brief Set this matrix to the rotation specified by @a axis and @a angle
/// @details The axis must be unit vector
void setToRotation(const Vec3<T> &axis, T angle)
{*this = rotation<Mat3<T> >(axis, angle);}
/// Set this matrix to zero
void setZero()
{
MyBase::mm[0] = 0;
MyBase::mm[1] = 0;
MyBase::mm[2] = 0;
MyBase::mm[3] = 0;
MyBase::mm[4] = 0;
MyBase::mm[5] = 0;
MyBase::mm[6] = 0;
MyBase::mm[7] = 0;
MyBase::mm[8] = 0;
} // trivial
/// Set this matrix to identity
void setIdentity()
{
MyBase::mm[0] = 1;
MyBase::mm[1] = 0;
MyBase::mm[2] = 0;
MyBase::mm[3] = 0;
MyBase::mm[4] = 1;
MyBase::mm[5] = 0;
MyBase::mm[6] = 0;
MyBase::mm[7] = 0;
MyBase::mm[8] = 1;
} // trivial
/// Assignment operator
template<typename Source>
const Mat3& operator=(const Mat3<Source> &m)
{
const Source *src = m.asPointer();
// don't suppress type conversion warnings
std::copy(src, (src + this->numElements()), MyBase::mm);
return *this;
} // opEqualToTest
/// Return @c true if this matrix is equivalent to @a m within a tolerance of @a eps.
bool eq(const Mat3 &m, T eps=1.0e-8) const
{
return (isApproxEqual(MyBase::mm[0],m.mm[0],eps) &&
isApproxEqual(MyBase::mm[1],m.mm[1],eps) &&
isApproxEqual(MyBase::mm[2],m.mm[2],eps) &&
isApproxEqual(MyBase::mm[3],m.mm[3],eps) &&
isApproxEqual(MyBase::mm[4],m.mm[4],eps) &&
isApproxEqual(MyBase::mm[5],m.mm[5],eps) &&
isApproxEqual(MyBase::mm[6],m.mm[6],eps) &&
isApproxEqual(MyBase::mm[7],m.mm[7],eps) &&
isApproxEqual(MyBase::mm[8],m.mm[8],eps));
} // trivial
/// Negation operator, for e.g. m1 = -m2;
Mat3<T> operator-() const
{
return Mat3<T>(
-MyBase::mm[0], -MyBase::mm[1], -MyBase::mm[2],
-MyBase::mm[3], -MyBase::mm[4], -MyBase::mm[5],
-MyBase::mm[6], -MyBase::mm[7], -MyBase::mm[8]
);
} // trivial
/// Multiplication operator, e.g. M = scalar * M;
// friend Mat3 operator*(T scalar, const Mat3& m) {
// return m*scalar;
// }
/// Multiply each element of this matrix by @a scalar.
template <typename S>
const Mat3<T>& operator*=(S scalar)
{
MyBase::mm[0] *= scalar;
MyBase::mm[1] *= scalar;
MyBase::mm[2] *= scalar;
MyBase::mm[3] *= scalar;
MyBase::mm[4] *= scalar;
MyBase::mm[5] *= scalar;
MyBase::mm[6] *= scalar;
MyBase::mm[7] *= scalar;
MyBase::mm[8] *= scalar;
return *this;
}
/// Add each element of the given matrix to the corresponding element of this matrix.
template <typename S>
const Mat3<T> &operator+=(const Mat3<S> &m1)
{
const S *s = m1.asPointer();
MyBase::mm[0] += s[0];
MyBase::mm[1] += s[1];
MyBase::mm[2] += s[2];
MyBase::mm[3] += s[3];
MyBase::mm[4] += s[4];
MyBase::mm[5] += s[5];
MyBase::mm[6] += s[6];
MyBase::mm[7] += s[7];
MyBase::mm[8] += s[8];
return *this;
}
/// Subtract each element of the given matrix from the corresponding element of this matrix.
template <typename S>
const Mat3<T> &operator-=(const Mat3<S> &m1)
{
const S *s = m1.asPointer();
MyBase::mm[0] -= s[0];
MyBase::mm[1] -= s[1];
MyBase::mm[2] -= s[2];
MyBase::mm[3] -= s[3];
MyBase::mm[4] -= s[4];
MyBase::mm[5] -= s[5];
MyBase::mm[6] -= s[6];
MyBase::mm[7] -= s[7];
MyBase::mm[8] -= s[8];
return *this;
}
/// Multiply this matrix by the given matrix.
template <typename S>
const Mat3<T> &operator*=(const Mat3<S> &m1)
{
Mat3<T> m0(*this);
const T* s0 = m0.asPointer();
const S* s1 = m1.asPointer();
MyBase::mm[0] = static_cast<T>(s0[0] * s1[0] +
s0[1] * s1[3] +
s0[2] * s1[6]);
MyBase::mm[1] = static_cast<T>(s0[0] * s1[1] +
s0[1] * s1[4] +
s0[2] * s1[7]);
MyBase::mm[2] = static_cast<T>(s0[0] * s1[2] +
s0[1] * s1[5] +
s0[2] * s1[8]);
MyBase::mm[3] = static_cast<T>(s0[3] * s1[0] +
s0[4] * s1[3] +
s0[5] * s1[6]);
MyBase::mm[4] = static_cast<T>(s0[3] * s1[1] +
s0[4] * s1[4] +
s0[5] * s1[7]);
MyBase::mm[5] = static_cast<T>(s0[3] * s1[2] +
s0[4] * s1[5] +
s0[5] * s1[8]);
MyBase::mm[6] = static_cast<T>(s0[6] * s1[0] +
s0[7] * s1[3] +
s0[8] * s1[6]);
MyBase::mm[7] = static_cast<T>(s0[6] * s1[1] +
s0[7] * s1[4] +
s0[8] * s1[7]);
MyBase::mm[8] = static_cast<T>(s0[6] * s1[2] +
s0[7] * s1[5] +
s0[8] * s1[8]);
return *this;
}
/// @brief Return the cofactor matrix of this matrix.
Mat3 cofactor() const
{
return Mat3<T>(
MyBase::mm[4] * MyBase::mm[8] - MyBase::mm[5] * MyBase::mm[7],
MyBase::mm[5] * MyBase::mm[6] - MyBase::mm[3] * MyBase::mm[8],
MyBase::mm[3] * MyBase::mm[7] - MyBase::mm[4] * MyBase::mm[6],
MyBase::mm[2] * MyBase::mm[7] - MyBase::mm[1] * MyBase::mm[8],
MyBase::mm[0] * MyBase::mm[8] - MyBase::mm[2] * MyBase::mm[6],
MyBase::mm[1] * MyBase::mm[6] - MyBase::mm[0] * MyBase::mm[7],
MyBase::mm[1] * MyBase::mm[5] - MyBase::mm[2] * MyBase::mm[4],
MyBase::mm[2] * MyBase::mm[3] - MyBase::mm[0] * MyBase::mm[5],
MyBase::mm[0] * MyBase::mm[4] - MyBase::mm[1] * MyBase::mm[3]);
}
/// Return the adjoint of this matrix, i.e., the transpose of its cofactor.
Mat3 adjoint() const
{
return Mat3<T>(
MyBase::mm[4] * MyBase::mm[8] - MyBase::mm[5] * MyBase::mm[7],
MyBase::mm[2] * MyBase::mm[7] - MyBase::mm[1] * MyBase::mm[8],
MyBase::mm[1] * MyBase::mm[5] - MyBase::mm[2] * MyBase::mm[4],
MyBase::mm[5] * MyBase::mm[6] - MyBase::mm[3] * MyBase::mm[8],
MyBase::mm[0] * MyBase::mm[8] - MyBase::mm[2] * MyBase::mm[6],
MyBase::mm[2] * MyBase::mm[3] - MyBase::mm[0] * MyBase::mm[5],
MyBase::mm[3] * MyBase::mm[7] - MyBase::mm[4] * MyBase::mm[6],
MyBase::mm[1] * MyBase::mm[6] - MyBase::mm[0] * MyBase::mm[7],
MyBase::mm[0] * MyBase::mm[4] - MyBase::mm[1] * MyBase::mm[3]);
} // adjointTest
/// returns transpose of this
Mat3 transpose() const
{
return Mat3<T>(
MyBase::mm[0], MyBase::mm[3], MyBase::mm[6],
MyBase::mm[1], MyBase::mm[4], MyBase::mm[7],
MyBase::mm[2], MyBase::mm[5], MyBase::mm[8]);
} // transposeTest
/// returns inverse of this
/// @throws ArithmeticError if singular
Mat3 inverse(T tolerance = 0) const
{
Mat3<T> inv(this->adjoint());
const T det = inv.mm[0]*MyBase::mm[0] + inv.mm[1]*MyBase::mm[3] + inv.mm[2]*MyBase::mm[6];
// If the determinant is 0, m was singular and the result will be invalid.
if (isApproxEqual(det,T(0.0),tolerance)) {
OPENVDB_THROW(ArithmeticError, "Inversion of singular 3x3 matrix");
}
return inv * (T(1)/det);
} // invertTest
/// Determinant of matrix
T det() const
{
const T co00 = MyBase::mm[4]*MyBase::mm[8] - MyBase::mm[5]*MyBase::mm[7];
const T co10 = MyBase::mm[5]*MyBase::mm[6] - MyBase::mm[3]*MyBase::mm[8];
const T co20 = MyBase::mm[3]*MyBase::mm[7] - MyBase::mm[4]*MyBase::mm[6];
return MyBase::mm[0]*co00 + MyBase::mm[1]*co10 + MyBase::mm[2]*co20;
} // determinantTest
/// Trace of matrix
T trace() const
{
return MyBase::mm[0]+MyBase::mm[4]+MyBase::mm[8];
}
/// This function snaps a specific axis to a specific direction,
/// preserving scaling. It does this using minimum energy, thus
/// posing a unique solution if basis & direction arent parralel.
/// Direction need not be unit.
Mat3 snapBasis(Axis axis, const Vec3<T> &direction)
{
return snapMatBasis(*this, axis, direction);
}
/// Return the transformed vector by this matrix.
/// This function is equivalent to post-multiplying the matrix.
template<typename T0>
Vec3<T0> transform(const Vec3<T0> &v) const
{
return static_cast< Vec3<T0> >(v * *this);
} // xformVectorTest
/// Return the transformed vector by transpose of this matrix.
/// This function is equivalent to pre-multiplying the matrix.
template<typename T0>
Vec3<T0> pretransform(const Vec3<T0> &v) const
{
return static_cast< Vec3<T0> >(*this * v);
} // xformTVectorTest
/// @brief Treat @a diag as a diagonal matrix and return the product
/// of this matrix with @a diag (from the right).
Mat3 timesDiagonal(const Vec3<T>& diag) const
{
Mat3 ret(*this);
ret.mm[0] *= diag(0);
ret.mm[1] *= diag(1);
ret.mm[2] *= diag(2);
ret.mm[3] *= diag(0);
ret.mm[4] *= diag(1);
ret.mm[5] *= diag(2);
ret.mm[6] *= diag(0);
ret.mm[7] *= diag(1);
ret.mm[8] *= diag(2);
return ret;
}
}; // class Mat3
/// @relates Mat3
/// @brief Equality operator, does exact floating point comparisons
template <typename T0, typename T1>
bool operator==(const Mat3<T0> &m0, const Mat3<T1> &m1)
{
const T0 *t0 = m0.asPointer();
const T1 *t1 = m1.asPointer();
for (int i=0; i<9; ++i) {
if (!isExactlyEqual(t0[i], t1[i])) return false;
}
return true;
}
/// @relates Mat3
/// @brief Inequality operator, does exact floating point comparisons
template <typename T0, typename T1>
bool operator!=(const Mat3<T0> &m0, const Mat3<T1> &m1) { return !(m0 == m1); }
/// @relates Mat3
/// @brief Multiply each element of the given matrix by @a scalar and return the result.
template <typename S, typename T>
Mat3<typename promote<S, T>::type> operator*(S scalar, const Mat3<T> &m)
{ return m*scalar; }
/// @relates Mat3
/// @brief Multiply each element of the given matrix by @a scalar and return the result.
template <typename S, typename T>
Mat3<typename promote<S, T>::type> operator*(const Mat3<T> &m, S scalar)
{
Mat3<typename promote<S, T>::type> result(m);
result *= scalar;
return result;
}
/// @relates Mat3
/// @brief Add corresponding elements of @a m0 and @a m1 and return the result.
template <typename T0, typename T1>
Mat3<typename promote<T0, T1>::type> operator+(const Mat3<T0> &m0, const Mat3<T1> &m1)
{
Mat3<typename promote<T0, T1>::type> result(m0);
result += m1;
return result;
}
/// @relates Mat3
/// @brief Subtract corresponding elements of @a m0 and @a m1 and return the result.
template <typename T0, typename T1>
Mat3<typename promote<T0, T1>::type> operator-(const Mat3<T0> &m0, const Mat3<T1> &m1)
{
Mat3<typename promote<T0, T1>::type> result(m0);
result -= m1;
return result;
}
/// @brief Multiply @a m0 by @a m1 and return the resulting matrix.
template <typename T0, typename T1>
Mat3<typename promote<T0, T1>::type>operator*(const Mat3<T0> &m0, const Mat3<T1> &m1)
{
Mat3<typename promote<T0, T1>::type> result(m0);
result *= m1;
return result;
}
/// @relates Mat3
/// @brief Multiply @a _m by @a _v and return the resulting vector.
template<typename T, typename MT>
inline Vec3<typename promote<T, MT>::type>
operator*(const Mat3<MT> &_m, const Vec3<T> &_v)
{
MT const *m = _m.asPointer();
return Vec3<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[1] + _v[2]*m[2],
_v[0]*m[3] + _v[1]*m[4] + _v[2]*m[5],
_v[0]*m[6] + _v[1]*m[7] + _v[2]*m[8]);
}
/// @relates Mat3
/// @brief Multiply @a _v by @a _m and return the resulting vector.
template<typename T, typename MT>
inline Vec3<typename promote<T, MT>::type>
operator*(const Vec3<T> &_v, const Mat3<MT> &_m)
{
MT const *m = _m.asPointer();
return Vec3<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[3] + _v[2]*m[6],
_v[0]*m[1] + _v[1]*m[4] + _v[2]*m[7],
_v[0]*m[2] + _v[1]*m[5] + _v[2]*m[8]);
}
/// @relates Mat3
/// @brief Multiply @a _v by @a _m and replace @a _v with the resulting vector.
template<typename T, typename MT>
inline Vec3<T> &operator *= (Vec3<T> &_v, const Mat3<MT> &_m)
{
Vec3<T> mult = _v * _m;
_v = mult;
return _v;
}
/// Returns outer product of v1, v2, i.e. v1 v2^T if v1 and v2 are
/// column vectors, e.g. M = Mat3f::outerproduct(v1,v2);
template <typename T>
Mat3<T> outerProduct(const Vec3<T>& v1, const Vec3<T>& v2)
{
return Mat3<T>(v1[0]*v2[0], v1[0]*v2[1], v1[0]*v2[2],
v1[1]*v2[0], v1[1]*v2[1], v1[1]*v2[2],
v1[2]*v2[0], v1[2]*v2[1], v1[2]*v2[2]);
}// outerProduct
/// Interpolate the rotation between m1 and m2 using Mat::powSolve.
/// Unlike slerp, translation is not treated independently.
/// This results in smoother animation results.
template<typename T, typename T0>
Mat3<T> powLerp(const Mat3<T0> &m1, const Mat3<T0> &m2, T t)
{
Mat3<T> x = m1.inverse() * m2;
powSolve(x, x, t);
Mat3<T> m = m1 * x;
return m;
}
namespace mat3_internal {
template<typename T>
inline void
pivot(int i, int j, Mat3<T>& S, Vec3<T>& D, Mat3<T>& Q)
{
const int& n = Mat3<T>::size; // should be 3
T temp;
/// scratch variables used in pivoting
double cotan_of_2_theta;
double tan_of_theta;
double cosin_of_theta;
double sin_of_theta;
double z;
double Sij = S(i,j);
double Sjj_minus_Sii = D[j] - D[i];
if (fabs(Sjj_minus_Sii) * (10*math::Tolerance<T>::value()) > fabs(Sij)) {
tan_of_theta = Sij / Sjj_minus_Sii;
} else {
/// pivot on Sij
cotan_of_2_theta = 0.5*Sjj_minus_Sii / Sij ;
if (cotan_of_2_theta < 0.) {
tan_of_theta =
-1./(sqrt(1. + cotan_of_2_theta*cotan_of_2_theta) - cotan_of_2_theta);
} else {
tan_of_theta =
1./(sqrt(1. + cotan_of_2_theta*cotan_of_2_theta) + cotan_of_2_theta);
}
}
cosin_of_theta = 1./sqrt( 1. + tan_of_theta * tan_of_theta);
sin_of_theta = cosin_of_theta * tan_of_theta;
z = tan_of_theta * Sij;
S(i,j) = 0;
D[i] -= z;
D[j] += z;
for (int k = 0; k < i; ++k) {
temp = S(k,i);
S(k,i) = cosin_of_theta * temp - sin_of_theta * S(k,j);
S(k,j)= sin_of_theta * temp + cosin_of_theta * S(k,j);
}
for (int k = i+1; k < j; ++k) {
temp = S(i,k);
S(i,k) = cosin_of_theta * temp - sin_of_theta * S(k,j);
S(k,j) = sin_of_theta * temp + cosin_of_theta * S(k,j);
}
for (int k = j+1; k < n; ++k) {
temp = S(i,k);
S(i,k) = cosin_of_theta * temp - sin_of_theta * S(j,k);
S(j,k) = sin_of_theta * temp + cosin_of_theta * S(j,k);
}
for (int k = 0; k < n; ++k)
{
temp = Q(k,i);
Q(k,i) = cosin_of_theta * temp - sin_of_theta*Q(k,j);
Q(k,j) = sin_of_theta * temp + cosin_of_theta*Q(k,j);
}
}
} // namespace mat3_internal
/// @brief Use Jacobi iterations to decompose a symmetric 3x3 matrix
/// (diagonalize and compute eigenvectors)
/// @details This is based on the "Efficient numerical diagonalization of Hermitian 3x3 matrices"
/// Joachim Kopp. arXiv.org preprint: physics/0610206
/// with the addition of largest pivot
template<typename T>
inline bool
diagonalizeSymmetricMatrix(const Mat3<T>& input, Mat3<T>& Q, Vec3<T>& D,
unsigned int MAX_ITERATIONS=250)
{
/// use Givens rotation matrix to eliminate off-diagonal entries.
/// initialize the rotation matrix as idenity
Q = Mat3<T>::identity();
int n = Mat3<T>::size; // should be 3
/// temp matrix. Assumed to be symmetric
Mat3<T> S(input);
for (int i = 0; i < n; ++i) {
D[i] = S(i,i);
}
unsigned int iterations(0);
/// Just iterate over all the non-diagonal enteries
/// using the largest as a pivot.
do {
/// check for absolute convergence
/// are symmetric off diagonals all zero
double er = 0;
for (int i = 0; i < n; ++i) {
for (int j = i+1; j < n; ++j) {
er += fabs(S(i,j));
}
}
if (std::abs(er) < math::Tolerance<T>::value()) {
return true;
}
iterations++;
T max_element = 0;
int ip = 0;
int jp = 0;
/// loop over all the off-diagonals above the diagonal
for (int i = 0; i < n; ++i) {
for (int j = i+1; j < n; ++j){
if ( fabs(D[i]) * (10*math::Tolerance<T>::value()) > fabs(S(i,j))) {
/// value too small to pivot on
S(i,j) = 0;
}
if (fabs(S(i,j)) > max_element) {
max_element = fabs(S(i,j));
ip = i;
jp = j;
}
}
}
mat3_internal::pivot(ip, jp, S, D, Q);
} while (iterations < MAX_ITERATIONS);
return false;
}
template<typename T>
inline Mat3<T>
Abs(const Mat3<T>& m)
{
Mat3<T> out;
const T* ip = m.asPointer();
T* op = out.asPointer();
for (unsigned i = 0; i < 9; ++i, ++op, ++ip) *op = math::Abs(*ip);
return out;
}
template<typename Type1, typename Type2>
inline Mat3<Type1>
cwiseAdd(const Mat3<Type1>& m, const Type2 s)
{
Mat3<Type1> out;
const Type1* ip = m.asPointer();
Type1* op = out.asPointer();
for (unsigned i = 0; i < 9; ++i, ++op, ++ip) {
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
*op = *ip + s;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
}
return out;
}
template<typename T>
inline bool
cwiseLessThan(const Mat3<T>& m0, const Mat3<T>& m1)
{
return cwiseLessThan<3, T>(m0, m1);
}
template<typename T>
inline bool
cwiseGreaterThan(const Mat3<T>& m0, const Mat3<T>& m1)
{
return cwiseGreaterThan<3, T>(m0, m1);
}
using Mat3s = Mat3<float>;
using Mat3d = Mat3<double>;
using Mat3f = Mat3d;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Mat3s)
OPENVDB_IS_POD(Mat3d)
#endif
} // namespace math
template<> inline math::Mat3s zeroVal<math::Mat3s>() { return math::Mat3s::zero(); }
template<> inline math::Mat3d zeroVal<math::Mat3d>() { return math::Mat3d::zero(); }
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_MAT3_H_HAS_BEEN_INCLUDED
| 27,164 | C | 30.36836 | 98 | 0.523008 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Proximity.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_PROXIMITY_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_PROXIMITY_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief Closest Point on Triangle to Point. Given a triangle @c abc and a point @c p,
/// return the point on @c abc closest to @c p and the corresponding barycentric coordinates.
///
/// @details Algorithms from "Real-Time Collision Detection" pg 136 to 142 by Christer Ericson.
/// The closest point is obtained by first determining which of the triangles'
/// Voronoi feature regions @c p is in and then computing the orthogonal projection
/// of @c p onto the corresponding feature.
///
/// @param a The triangle's first vertex point.
/// @param b The triangle's second vertex point.
/// @param c The triangle's third vertex point.
/// @param p Point to compute the closest point on @c abc for.
/// @param uvw Barycentric coordinates, computed and returned.
OPENVDB_API Vec3d
closestPointOnTriangleToPoint(
const Vec3d& a, const Vec3d& b, const Vec3d& c, const Vec3d& p, Vec3d& uvw);
/// @brief Closest Point on Line Segment to Point. Given segment @c ab and point @c p,
/// return the point on @c ab closest to @c p and @c t the parametric distance to @c b.
///
/// @param a The segment's first vertex point.
/// @param b The segment's second vertex point.
/// @param p Point to compute the closest point on @c ab for.
/// @param t Parametric distance to @c b.
OPENVDB_API Vec3d
closestPointOnSegmentToPoint(
const Vec3d& a, const Vec3d& b, const Vec3d& p, double& t);
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MESH_TO_VOLUME_UTIL_HAS_BEEN_INCLUDED
| 1,871 | C | 37.204081 | 95 | 0.725281 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/DDA.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file DDA.h
///
/// @author Ken Museth
///
/// @brief Digital Differential Analyzers specialized for VDB.
#ifndef OPENVDB_MATH_DDA_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_DDA_HAS_BEEN_INCLUDED
#include "Coord.h"
#include "Math.h"
#include "Vec3.h"
#include <openvdb/Types.h>
#include <iostream> // for std::ostream
#include <limits> // for std::numeric_limits<Type>::max()
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
/// @brief A Digital Differential Analyzer specialized for OpenVDB grids
/// @note Conceptually similar to Bresenham's line algorithm applied
/// to a 3D Ray intersecting OpenVDB nodes or voxels. Log2Dim = 0
/// corresponds to a voxel and Log2Dim a tree node of size 2^Log2Dim.
///
/// @note The Ray template class is expected to have the following
/// methods: test(time), t0(), t1(), invDir(), and operator()(time).
/// See the example Ray class above for their definition.
template<typename RayT, Index Log2Dim = 0>
class DDA
{
public:
using RealType = typename RayT::RealType;
using RealT = RealType;
using Vec3Type = typename RayT::Vec3Type;
using Vec3T = Vec3Type;
/// @brief uninitialized constructor
DDA() {}
DDA(const RayT& ray) { this->init(ray); }
DDA(const RayT& ray, RealT startTime) { this->init(ray, startTime); }
DDA(const RayT& ray, RealT startTime, RealT maxTime) { this->init(ray, startTime, maxTime); }
inline void init(const RayT& ray, RealT startTime, RealT maxTime)
{
assert(startTime <= maxTime);
static const int DIM = 1 << Log2Dim;
mT0 = startTime;
mT1 = maxTime;
const Vec3T &pos = ray(mT0), &dir = ray.dir(), &inv = ray.invDir();
mVoxel = Coord::floor(pos) & (~(DIM-1));
for (int axis = 0; axis < 3; ++axis) {
if (math::isZero(dir[axis])) {//handles dir = +/- 0
mStep[axis] = 0;//dummy value
mNext[axis] = std::numeric_limits<RealT>::max();//i.e. disabled!
mDelta[axis] = std::numeric_limits<RealT>::max();//dummy value
} else if (inv[axis] > 0) {
mStep[axis] = DIM;
mNext[axis] = mT0 + (mVoxel[axis] + DIM - pos[axis]) * inv[axis];
mDelta[axis] = mStep[axis] * inv[axis];
} else {
mStep[axis] = -DIM;
mNext[axis] = mT0 + (mVoxel[axis] - pos[axis]) * inv[axis];
mDelta[axis] = mStep[axis] * inv[axis];
}
}
}
inline void init(const RayT& ray) { this->init(ray, ray.t0(), ray.t1()); }
inline void init(const RayT& ray, RealT startTime) { this->init(ray, startTime, ray.t1()); }
/// @brief Increment the voxel index to next intersected voxel or node
/// and returns true if the step in time does not exceed maxTime.
inline bool step()
{
const int stepAxis = static_cast<int>(math::MinIndex(mNext));
mT0 = mNext[stepAxis];
mNext[stepAxis] += mDelta[stepAxis];
mVoxel[stepAxis] += mStep[stepAxis];
return mT0 <= mT1;
}
/// @brief Return the index coordinates of the next node or voxel
/// intersected by the ray. If Log2Dim = 0 the return value is the
/// actual signed coordinate of the voxel, else it is the origin
/// of the corresponding VDB tree node or tile.
/// @note Incurs no computational overhead.
inline const Coord& voxel() const { return mVoxel; }
/// @brief Return the time (parameterized along the Ray) of the
/// first hit of a tree node of size 2^Log2Dim.
/// @details This value is initialized to startTime or ray.t0()
/// depending on the constructor used.
/// @note Incurs no computational overhead.
inline RealType time() const { return mT0; }
/// @brief Return the maximum time (parameterized along the Ray).
inline RealType maxTime() const { return mT1; }
/// @brief Return the time (parameterized along the Ray) of the
/// second (i.e. next) hit of a tree node of size 2^Log2Dim.
/// @note Incurs a (small) computational overhead.
inline RealType next() const { return math::Min(mT1, mNext[0], mNext[1], mNext[2]); }
/// @brief Print information about this DDA for debugging.
/// @param os a stream to which to write textual information.
void print(std::ostream& os = std::cout) const
{
os << "Dim=" << (1<<Log2Dim) << " time=" << mT0 << " next()="
<< this->next() << " voxel=" << mVoxel << " next=" << mNext
<< " delta=" << mDelta << " step=" << mStep << std::endl;
}
private:
RealT mT0, mT1;
Coord mVoxel, mStep;
Vec3T mDelta, mNext;
}; // class DDA
/// @brief Output streaming of the Ray class.
/// @note Primarily intended for debugging.
template<typename RayT, Index Log2Dim>
inline std::ostream& operator<<(std::ostream& os, const DDA<RayT, Log2Dim>& dda)
{
os << "Dim=" << (1<<Log2Dim) << " time=" << dda.time()
<< " next()=" << dda.next() << " voxel=" << dda.voxel();
return os;
}
/////////////////////////////////////////// LevelSetHDDA ////////////////////////////////////////////
/// @brief Helper class that implements Hierarchical Digital Differential Analyzers
/// and is specialized for ray intersections with level sets
template<typename TreeT, int NodeLevel>
struct LevelSetHDDA
{
using ChainT = typename TreeT::RootNodeType::NodeChainType;
using NodeT = typename ChainT::template Get<NodeLevel>;
template <typename TesterT>
static bool test(TesterT& tester)
{
math::DDA<typename TesterT::RayT, NodeT::TOTAL> dda(tester.ray());
do {
if (tester.template hasNode<NodeT>(dda.voxel())) {
tester.setRange(dda.time(), dda.next());
if (LevelSetHDDA<TreeT, NodeLevel-1>::test(tester)) return true;
}
} while(dda.step());
return false;
}
};
/// @brief Specialization of Hierarchical Digital Differential Analyzer
/// class that intersects a ray against the voxels of a level set
template<typename TreeT>
struct LevelSetHDDA<TreeT, -1>
{
template <typename TesterT>
static bool test(TesterT& tester)
{
math::DDA<typename TesterT::RayT, 0> dda(tester.ray());
tester.init(dda.time());
do { if (tester(dda.voxel(), dda.next())) return true; } while(dda.step());
return false;
}
};
//////////////////////////////////////////// VolumeHDDA /////////////////////////////////////////////
/// @brief Helper class that implements Hierarchical Digital Differential Analyzers
/// for ray intersections against a generic volume.
///
/// @details The template argument ChildNodeLevel specifies the entry
/// upper node level used for the hierarchical ray-marching. The final
/// lowest level is always the leaf node level, i.e. not the voxel level!
template <typename TreeT, typename RayT, int ChildNodeLevel>
class VolumeHDDA
{
public:
using ChainT = typename TreeT::RootNodeType::NodeChainType;
using NodeT = typename ChainT::template Get<ChildNodeLevel>;
using TimeSpanT = typename RayT::TimeSpan;
VolumeHDDA() {}
template <typename AccessorT>
TimeSpanT march(RayT& ray, AccessorT &acc)
{
TimeSpanT t(-1, -1);
if (ray.valid()) this->march(ray, acc, t);
return t;
}
/// ListType is a list of RayType::TimeSpan and is required to
/// have the two methods: clear() and push_back(). Thus, it could
/// be std::vector<typename RayType::TimeSpan> or
/// std::deque<typename RayType::TimeSpan>.
template <typename AccessorT, typename ListT>
void hits(RayT& ray, AccessorT &acc, ListT& times)
{
TimeSpanT t(-1,-1);
times.clear();
this->hits(ray, acc, times, t);
if (t.valid()) times.push_back(t);
}
private:
friend class VolumeHDDA<TreeT, RayT, ChildNodeLevel+1>;
template <typename AccessorT>
bool march(RayT& ray, AccessorT &acc, TimeSpanT& t)
{
mDDA.init(ray);
do {
if (acc.template probeConstNode<NodeT>(mDDA.voxel()) != nullptr) {//child node
ray.setTimes(mDDA.time(), mDDA.next());
if (mHDDA.march(ray, acc, t)) return true;//terminate
} else if (acc.isValueOn(mDDA.voxel())) {//hit an active tile
if (t.t0<0) t.t0 = mDDA.time();//this is the first hit so set t0
} else if (t.t0>=0) {//hit an inactive tile after hitting active values
t.t1 = mDDA.time();//set end of active ray segment
if (t.valid()) return true;//terminate
t.set(-1, -1);//reset to an empty and invalid time-span
}
} while (mDDA.step());
if (t.t0>=0) t.t1 = mDDA.maxTime();
return false;
}
/// ListType is a list of RayType::TimeSpan and is required to
/// have the two methods: clear() and push_back(). Thus, it could
/// be std::vector<typename RayType::TimeSpan> or
/// std::deque<typename RayType::TimeSpan>.
template <typename AccessorT, typename ListT>
void hits(RayT& ray, AccessorT &acc, ListT& times, TimeSpanT& t)
{
mDDA.init(ray);
do {
if (acc.template probeConstNode<NodeT>(mDDA.voxel()) != nullptr) {//child node
ray.setTimes(mDDA.time(), mDDA.next());
mHDDA.hits(ray, acc, times, t);
} else if (acc.isValueOn(mDDA.voxel())) {//hit an active tile
if (t.t0<0) t.t0 = mDDA.time();//this is the first hit so set t0
} else if (t.t0>=0) {//hit an inactive tile after hitting active values
t.t1 = mDDA.time();//set end of active ray segment
if (t.valid()) times.push_back(t);
t.set(-1,-1);//reset to an empty and invalid time-span
}
} while (mDDA.step());
if (t.t0>=0) t.t1 = mDDA.maxTime();
}
math::DDA<RayT, NodeT::TOTAL> mDDA;
VolumeHDDA<TreeT, RayT, ChildNodeLevel-1> mHDDA;
};
/// @brief Specialization of Hierarchical Digital Differential Analyzer
/// class that intersects against the leafs or tiles of a generic volume.
template <typename TreeT, typename RayT>
class VolumeHDDA<TreeT, RayT, 0>
{
public:
using LeafT = typename TreeT::LeafNodeType;
using TimeSpanT = typename RayT::TimeSpan;
VolumeHDDA() {}
template <typename AccessorT>
TimeSpanT march(RayT& ray, AccessorT &acc)
{
TimeSpanT t(-1, -1);
if (ray.valid()) this->march(ray, acc, t);
return t;
}
template <typename AccessorT, typename ListT>
void hits(RayT& ray, AccessorT &acc, ListT& times)
{
TimeSpanT t(-1,-1);
times.clear();
this->hits(ray, acc, times, t);
if (t.valid()) times.push_back(t);
}
private:
friend class VolumeHDDA<TreeT, RayT, 1>;
template <typename AccessorT>
bool march(RayT& ray, AccessorT &acc, TimeSpanT& t)
{
mDDA.init(ray);
do {
if (acc.template probeConstNode<LeafT>(mDDA.voxel()) ||
acc.isValueOn(mDDA.voxel())) {//hit a leaf or an active tile
if (t.t0<0) t.t0 = mDDA.time();//this is the first hit
} else if (t.t0>=0) {//hit an inactive tile after hitting active values
t.t1 = mDDA.time();//set end of active ray segment
if (t.valid()) return true;//terminate
t.set(-1, -1);//reset to an empty and invalid time-span
}
} while (mDDA.step());
if (t.t0>=0) t.t1 = mDDA.maxTime();
return false;
}
template <typename AccessorT, typename ListT>
void hits(RayT& ray, AccessorT &acc, ListT& times, TimeSpanT& t)
{
mDDA.init(ray);
do {
if (acc.template probeConstNode<LeafT>(mDDA.voxel()) ||
acc.isValueOn(mDDA.voxel())) {//hit a leaf or an active tile
if (t.t0<0) t.t0 = mDDA.time();//this is the first hit
} else if (t.t0>=0) {//hit an inactive tile after hitting active values
t.t1 = mDDA.time();//set end of active ray segment
if (t.valid()) times.push_back(t);
t.set(-1, -1);//reset to an empty and invalid time-span
}
} while (mDDA.step());
if (t.t0>=0) t.t1 = mDDA.maxTime();
}
math::DDA<RayT, LeafT::TOTAL> mDDA;
};
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_DDA_HAS_BEEN_INCLUDED
| 12,649 | C | 35.773256 | 101 | 0.596806 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Mat4.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_MATH_MAT4_H_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_MAT4_H_HAS_BEEN_INCLUDED
#include <openvdb/Exceptions.h>
#include <openvdb/Platform.h>
#include "Math.h"
#include "Mat3.h"
#include "Vec3.h"
#include "Vec4.h"
#include <algorithm> // for std::copy(), std::swap()
#include <cassert>
#include <iomanip>
#include <cmath>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename T> class Vec4;
/// @class Mat4 Mat4.h
/// @brief 4x4 -matrix class.
template<typename T>
class Mat4: public Mat<4, T>
{
public:
/// Data type held by the matrix.
using value_type = T;
using ValueType = T;
using MyBase = Mat<4, T>;
/// Trivial constructor, the matrix is NOT initialized
#if OPENVDB_ABI_VERSION_NUMBER >= 8
/// @note destructor, copy constructor, assignment operator and
/// move constructor are left to be defined by the compiler (default)
Mat4() = default;
#else
Mat4() {}
/// Copy constructor
Mat4(const Mat<4, T> &m)
{
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
MyBase::mm[i*4 + j] = m[i][j];
}
}
}
#endif
/// Constructor given array of elements, the ordering is in row major form:
/** @verbatim
a[ 0] a[1] a[ 2] a[ 3]
a[ 4] a[5] a[ 6] a[ 7]
a[ 8] a[9] a[10] a[11]
a[12] a[13] a[14] a[15]
@endverbatim */
template<typename Source>
Mat4(Source *a)
{
for (int i = 0; i < 16; i++) {
MyBase::mm[i] = static_cast<T>(a[i]);
}
}
/// Constructor given array of elements, the ordering is in row major form:
/** @verbatim
a b c d
e f g h
i j k l
m n o p
@endverbatim */
template<typename Source>
Mat4(Source a, Source b, Source c, Source d,
Source e, Source f, Source g, Source h,
Source i, Source j, Source k, Source l,
Source m, Source n, Source o, Source p)
{
MyBase::mm[ 0] = static_cast<T>(a);
MyBase::mm[ 1] = static_cast<T>(b);
MyBase::mm[ 2] = static_cast<T>(c);
MyBase::mm[ 3] = static_cast<T>(d);
MyBase::mm[ 4] = static_cast<T>(e);
MyBase::mm[ 5] = static_cast<T>(f);
MyBase::mm[ 6] = static_cast<T>(g);
MyBase::mm[ 7] = static_cast<T>(h);
MyBase::mm[ 8] = static_cast<T>(i);
MyBase::mm[ 9] = static_cast<T>(j);
MyBase::mm[10] = static_cast<T>(k);
MyBase::mm[11] = static_cast<T>(l);
MyBase::mm[12] = static_cast<T>(m);
MyBase::mm[13] = static_cast<T>(n);
MyBase::mm[14] = static_cast<T>(o);
MyBase::mm[15] = static_cast<T>(p);
}
/// Construct matrix from rows or columns vectors (defaults to rows
/// for historical reasons)
template<typename Source>
Mat4(const Vec4<Source> &v1, const Vec4<Source> &v2,
const Vec4<Source> &v3, const Vec4<Source> &v4, bool rows = true)
{
if (rows) {
this->setRows(v1, v2, v3, v4);
} else {
this->setColumns(v1, v2, v3, v4);
}
}
/// Conversion constructor
template<typename Source>
explicit Mat4(const Mat4<Source> &m)
{
const Source *src = m.asPointer();
for (int i=0; i<16; ++i) {
MyBase::mm[i] = static_cast<T>(src[i]);
}
}
/// Predefined constant for identity matrix
static const Mat4<T>& identity() {
static const Mat4<T> sIdentity = Mat4<T>(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
);
return sIdentity;
}
/// Predefined constant for zero matrix
static const Mat4<T>& zero() {
static const Mat4<T> sZero = Mat4<T>(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0
);
return sZero;
}
/// Set ith row to vector v
void setRow(int i, const Vec4<T> &v)
{
// assert(i>=0 && i<4);
int i4 = i * 4;
MyBase::mm[i4+0] = v[0];
MyBase::mm[i4+1] = v[1];
MyBase::mm[i4+2] = v[2];
MyBase::mm[i4+3] = v[3];
}
/// Get ith row, e.g. Vec4f v = m.row(1);
Vec4<T> row(int i) const
{
// assert(i>=0 && i<3);
return Vec4<T>((*this)(i,0), (*this)(i,1), (*this)(i,2), (*this)(i,3));
}
/// Set jth column to vector v
void setCol(int j, const Vec4<T>& v)
{
// assert(j>=0 && j<4);
MyBase::mm[ 0+j] = v[0];
MyBase::mm[ 4+j] = v[1];
MyBase::mm[ 8+j] = v[2];
MyBase::mm[12+j] = v[3];
}
/// Get jth column, e.g. Vec4f v = m.col(0);
Vec4<T> col(int j) const
{
// assert(j>=0 && j<4);
return Vec4<T>((*this)(0,j), (*this)(1,j), (*this)(2,j), (*this)(3,j));
}
/// Alternative indexed reference to the elements
/// Note that the indices are row first and column second.
/// e.g. m(0,0) = 1;
T& operator()(int i, int j)
{
// assert(i>=0 && i<4);
// assert(j>=0 && j<4);
return MyBase::mm[4*i+j];
}
/// Alternative indexed constant reference to the elements,
/// Note that the indices are row first and column second.
/// e.g. float f = m(1,0);
T operator()(int i, int j) const
{
// assert(i>=0 && i<4);
// assert(j>=0 && j<4);
return MyBase::mm[4*i+j];
}
/// Set the rows of this matrix to the vectors v1, v2, v3, v4
void setRows(const Vec4<T> &v1, const Vec4<T> &v2,
const Vec4<T> &v3, const Vec4<T> &v4)
{
MyBase::mm[ 0] = v1[0];
MyBase::mm[ 1] = v1[1];
MyBase::mm[ 2] = v1[2];
MyBase::mm[ 3] = v1[3];
MyBase::mm[ 4] = v2[0];
MyBase::mm[ 5] = v2[1];
MyBase::mm[ 6] = v2[2];
MyBase::mm[ 7] = v2[3];
MyBase::mm[ 8] = v3[0];
MyBase::mm[ 9] = v3[1];
MyBase::mm[10] = v3[2];
MyBase::mm[11] = v3[3];
MyBase::mm[12] = v4[0];
MyBase::mm[13] = v4[1];
MyBase::mm[14] = v4[2];
MyBase::mm[15] = v4[3];
}
/// Set the columns of this matrix to the vectors v1, v2, v3, v4
void setColumns(const Vec4<T> &v1, const Vec4<T> &v2,
const Vec4<T> &v3, const Vec4<T> &v4)
{
MyBase::mm[ 0] = v1[0];
MyBase::mm[ 1] = v2[0];
MyBase::mm[ 2] = v3[0];
MyBase::mm[ 3] = v4[0];
MyBase::mm[ 4] = v1[1];
MyBase::mm[ 5] = v2[1];
MyBase::mm[ 6] = v3[1];
MyBase::mm[ 7] = v4[1];
MyBase::mm[ 8] = v1[2];
MyBase::mm[ 9] = v2[2];
MyBase::mm[10] = v3[2];
MyBase::mm[11] = v4[2];
MyBase::mm[12] = v1[3];
MyBase::mm[13] = v2[3];
MyBase::mm[14] = v3[3];
MyBase::mm[15] = v4[3];
}
// Set this matrix to zero
void setZero()
{
MyBase::mm[ 0] = 0;
MyBase::mm[ 1] = 0;
MyBase::mm[ 2] = 0;
MyBase::mm[ 3] = 0;
MyBase::mm[ 4] = 0;
MyBase::mm[ 5] = 0;
MyBase::mm[ 6] = 0;
MyBase::mm[ 7] = 0;
MyBase::mm[ 8] = 0;
MyBase::mm[ 9] = 0;
MyBase::mm[10] = 0;
MyBase::mm[11] = 0;
MyBase::mm[12] = 0;
MyBase::mm[13] = 0;
MyBase::mm[14] = 0;
MyBase::mm[15] = 0;
}
/// Set this matrix to identity
void setIdentity()
{
MyBase::mm[ 0] = 1;
MyBase::mm[ 1] = 0;
MyBase::mm[ 2] = 0;
MyBase::mm[ 3] = 0;
MyBase::mm[ 4] = 0;
MyBase::mm[ 5] = 1;
MyBase::mm[ 6] = 0;
MyBase::mm[ 7] = 0;
MyBase::mm[ 8] = 0;
MyBase::mm[ 9] = 0;
MyBase::mm[10] = 1;
MyBase::mm[11] = 0;
MyBase::mm[12] = 0;
MyBase::mm[13] = 0;
MyBase::mm[14] = 0;
MyBase::mm[15] = 1;
}
/// Set upper left to a Mat3
void setMat3(const Mat3<T> &m)
{
for (int i = 0; i < 3; i++)
for (int j=0; j < 3; j++)
MyBase::mm[i*4+j] = m[i][j];
}
Mat3<T> getMat3() const
{
Mat3<T> m;
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
m[i][j] = MyBase::mm[i*4+j];
return m;
}
/// Return the translation component
Vec3<T> getTranslation() const
{
return Vec3<T>(MyBase::mm[12], MyBase::mm[13], MyBase::mm[14]);
}
void setTranslation(const Vec3<T> &t)
{
MyBase::mm[12] = t[0];
MyBase::mm[13] = t[1];
MyBase::mm[14] = t[2];
}
/// Assignment operator
template<typename Source>
const Mat4& operator=(const Mat4<Source> &m)
{
const Source *src = m.asPointer();
// don't suppress warnings when assigning from different numerical types
std::copy(src, (src + this->numElements()), MyBase::mm);
return *this;
}
/// Return @c true if this matrix is equivalent to @a m within a tolerance of @a eps.
bool eq(const Mat4 &m, T eps=1.0e-8) const
{
for (int i = 0; i < 16; i++) {
if (!isApproxEqual(MyBase::mm[i], m.mm[i], eps))
return false;
}
return true;
}
/// Negation operator, for e.g. m1 = -m2;
Mat4<T> operator-() const
{
return Mat4<T>(
-MyBase::mm[ 0], -MyBase::mm[ 1], -MyBase::mm[ 2], -MyBase::mm[ 3],
-MyBase::mm[ 4], -MyBase::mm[ 5], -MyBase::mm[ 6], -MyBase::mm[ 7],
-MyBase::mm[ 8], -MyBase::mm[ 9], -MyBase::mm[10], -MyBase::mm[11],
-MyBase::mm[12], -MyBase::mm[13], -MyBase::mm[14], -MyBase::mm[15]
);
} // trivial
/// Multiply each element of this matrix by @a scalar.
template <typename S>
const Mat4<T>& operator*=(S scalar)
{
MyBase::mm[ 0] *= scalar;
MyBase::mm[ 1] *= scalar;
MyBase::mm[ 2] *= scalar;
MyBase::mm[ 3] *= scalar;
MyBase::mm[ 4] *= scalar;
MyBase::mm[ 5] *= scalar;
MyBase::mm[ 6] *= scalar;
MyBase::mm[ 7] *= scalar;
MyBase::mm[ 8] *= scalar;
MyBase::mm[ 9] *= scalar;
MyBase::mm[10] *= scalar;
MyBase::mm[11] *= scalar;
MyBase::mm[12] *= scalar;
MyBase::mm[13] *= scalar;
MyBase::mm[14] *= scalar;
MyBase::mm[15] *= scalar;
return *this;
}
/// Add each element of the given matrix to the corresponding element of this matrix.
template <typename S>
const Mat4<T> &operator+=(const Mat4<S> &m1)
{
const S* s = m1.asPointer();
MyBase::mm[ 0] += s[ 0];
MyBase::mm[ 1] += s[ 1];
MyBase::mm[ 2] += s[ 2];
MyBase::mm[ 3] += s[ 3];
MyBase::mm[ 4] += s[ 4];
MyBase::mm[ 5] += s[ 5];
MyBase::mm[ 6] += s[ 6];
MyBase::mm[ 7] += s[ 7];
MyBase::mm[ 8] += s[ 8];
MyBase::mm[ 9] += s[ 9];
MyBase::mm[10] += s[10];
MyBase::mm[11] += s[11];
MyBase::mm[12] += s[12];
MyBase::mm[13] += s[13];
MyBase::mm[14] += s[14];
MyBase::mm[15] += s[15];
return *this;
}
/// Subtract each element of the given matrix from the corresponding element of this matrix.
template <typename S>
const Mat4<T> &operator-=(const Mat4<S> &m1)
{
const S* s = m1.asPointer();
MyBase::mm[ 0] -= s[ 0];
MyBase::mm[ 1] -= s[ 1];
MyBase::mm[ 2] -= s[ 2];
MyBase::mm[ 3] -= s[ 3];
MyBase::mm[ 4] -= s[ 4];
MyBase::mm[ 5] -= s[ 5];
MyBase::mm[ 6] -= s[ 6];
MyBase::mm[ 7] -= s[ 7];
MyBase::mm[ 8] -= s[ 8];
MyBase::mm[ 9] -= s[ 9];
MyBase::mm[10] -= s[10];
MyBase::mm[11] -= s[11];
MyBase::mm[12] -= s[12];
MyBase::mm[13] -= s[13];
MyBase::mm[14] -= s[14];
MyBase::mm[15] -= s[15];
return *this;
}
/// Multiply this matrix by the given matrix.
template <typename S>
const Mat4<T> &operator*=(const Mat4<S> &m1)
{
Mat4<T> m0(*this);
const T* s0 = m0.asPointer();
const S* s1 = m1.asPointer();
for (int i = 0; i < 4; i++) {
int i4 = 4 * i;
MyBase::mm[i4+0] = static_cast<T>(s0[i4+0] * s1[ 0] +
s0[i4+1] * s1[ 4] +
s0[i4+2] * s1[ 8] +
s0[i4+3] * s1[12]);
MyBase::mm[i4+1] = static_cast<T>(s0[i4+0] * s1[ 1] +
s0[i4+1] * s1[ 5] +
s0[i4+2] * s1[ 9] +
s0[i4+3] * s1[13]);
MyBase::mm[i4+2] = static_cast<T>(s0[i4+0] * s1[ 2] +
s0[i4+1] * s1[ 6] +
s0[i4+2] * s1[10] +
s0[i4+3] * s1[14]);
MyBase::mm[i4+3] = static_cast<T>(s0[i4+0] * s1[ 3] +
s0[i4+1] * s1[ 7] +
s0[i4+2] * s1[11] +
s0[i4+3] * s1[15]);
}
return *this;
}
/// @return transpose of this
Mat4 transpose() const
{
return Mat4<T>(
MyBase::mm[ 0], MyBase::mm[ 4], MyBase::mm[ 8], MyBase::mm[12],
MyBase::mm[ 1], MyBase::mm[ 5], MyBase::mm[ 9], MyBase::mm[13],
MyBase::mm[ 2], MyBase::mm[ 6], MyBase::mm[10], MyBase::mm[14],
MyBase::mm[ 3], MyBase::mm[ 7], MyBase::mm[11], MyBase::mm[15]
);
}
/// @return inverse of this
/// @throw ArithmeticError if singular
Mat4 inverse(T tolerance = 0) const
{
//
// inv [ A | b ] = [ E | f ] A: 3x3, b: 3x1, c': 1x3 d: 1x1
// [ c' | d ] [ g' | h ]
//
// If A is invertible use
//
// E = A^-1 + p*h*r
// p = A^-1 * b
// f = -p * h
// g' = -h * c'
// h = 1 / (d - c'*p)
// r' = c'*A^-1
//
// Otherwise use gauss-jordan elimination
//
//
// We create this alias to ourself so we can easily use own subscript
// operator.
const Mat4<T>& m(*this);
T m0011 = m[0][0] * m[1][1];
T m0012 = m[0][0] * m[1][2];
T m0110 = m[0][1] * m[1][0];
T m0210 = m[0][2] * m[1][0];
T m0120 = m[0][1] * m[2][0];
T m0220 = m[0][2] * m[2][0];
T detA = m0011 * m[2][2] - m0012 * m[2][1] - m0110 * m[2][2]
+ m0210 * m[2][1] + m0120 * m[1][2] - m0220 * m[1][1];
bool hasPerspective =
(!isExactlyEqual(m[0][3], T(0.0)) ||
!isExactlyEqual(m[1][3], T(0.0)) ||
!isExactlyEqual(m[2][3], T(0.0)) ||
!isExactlyEqual(m[3][3], T(1.0)));
T det;
if (hasPerspective) {
det = m[0][3] * det3(m, 1,2,3, 0,2,1)
+ m[1][3] * det3(m, 2,0,3, 0,2,1)
+ m[2][3] * det3(m, 3,0,1, 0,2,1)
+ m[3][3] * detA;
} else {
det = detA * m[3][3];
}
Mat4<T> inv;
bool invertible;
if (isApproxEqual(det,T(0.0),tolerance)) {
invertible = false;
} else if (isApproxEqual(detA,T(0.0),T(1e-8))) {
// det is too small to rely on inversion by subblocks
invertible = m.invert(inv, tolerance);
} else {
invertible = true;
detA = 1.0 / detA;
//
// Calculate A^-1
//
inv[0][0] = detA * ( m[1][1] * m[2][2] - m[1][2] * m[2][1]);
inv[0][1] = detA * (-m[0][1] * m[2][2] + m[0][2] * m[2][1]);
inv[0][2] = detA * ( m[0][1] * m[1][2] - m[0][2] * m[1][1]);
inv[1][0] = detA * (-m[1][0] * m[2][2] + m[1][2] * m[2][0]);
inv[1][1] = detA * ( m[0][0] * m[2][2] - m0220);
inv[1][2] = detA * ( m0210 - m0012);
inv[2][0] = detA * ( m[1][0] * m[2][1] - m[1][1] * m[2][0]);
inv[2][1] = detA * ( m0120 - m[0][0] * m[2][1]);
inv[2][2] = detA * ( m0011 - m0110);
if (hasPerspective) {
//
// Calculate r, p, and h
//
Vec3<T> r;
r[0] = m[3][0] * inv[0][0] + m[3][1] * inv[1][0]
+ m[3][2] * inv[2][0];
r[1] = m[3][0] * inv[0][1] + m[3][1] * inv[1][1]
+ m[3][2] * inv[2][1];
r[2] = m[3][0] * inv[0][2] + m[3][1] * inv[1][2]
+ m[3][2] * inv[2][2];
Vec3<T> p;
p[0] = inv[0][0] * m[0][3] + inv[0][1] * m[1][3]
+ inv[0][2] * m[2][3];
p[1] = inv[1][0] * m[0][3] + inv[1][1] * m[1][3]
+ inv[1][2] * m[2][3];
p[2] = inv[2][0] * m[0][3] + inv[2][1] * m[1][3]
+ inv[2][2] * m[2][3];
T h = m[3][3] - p.dot(Vec3<T>(m[3][0],m[3][1],m[3][2]));
if (isApproxEqual(h,T(0.0),tolerance)) {
invertible = false;
} else {
h = 1.0 / h;
//
// Calculate h, g, and f
//
inv[3][3] = h;
inv[3][0] = -h * r[0];
inv[3][1] = -h * r[1];
inv[3][2] = -h * r[2];
inv[0][3] = -h * p[0];
inv[1][3] = -h * p[1];
inv[2][3] = -h * p[2];
//
// Calculate E
//
p *= h;
inv[0][0] += p[0] * r[0];
inv[0][1] += p[0] * r[1];
inv[0][2] += p[0] * r[2];
inv[1][0] += p[1] * r[0];
inv[1][1] += p[1] * r[1];
inv[1][2] += p[1] * r[2];
inv[2][0] += p[2] * r[0];
inv[2][1] += p[2] * r[1];
inv[2][2] += p[2] * r[2];
}
} else {
// Equations are much simpler in the non-perspective case
inv[3][0] = - (m[3][0] * inv[0][0] + m[3][1] * inv[1][0]
+ m[3][2] * inv[2][0]);
inv[3][1] = - (m[3][0] * inv[0][1] + m[3][1] * inv[1][1]
+ m[3][2] * inv[2][1]);
inv[3][2] = - (m[3][0] * inv[0][2] + m[3][1] * inv[1][2]
+ m[3][2] * inv[2][2]);
inv[0][3] = 0.0;
inv[1][3] = 0.0;
inv[2][3] = 0.0;
inv[3][3] = 1.0;
}
}
if (!invertible) OPENVDB_THROW(ArithmeticError, "Inversion of singular 4x4 matrix");
return inv;
}
/// Determinant of matrix
T det() const
{
const T *ap;
Mat3<T> submat;
T det;
T *sp;
int i, j, k, sign;
det = 0;
sign = 1;
for (i = 0; i < 4; i++) {
ap = &MyBase::mm[ 0];
sp = submat.asPointer();
for (j = 0; j < 4; j++) {
for (k = 0; k < 4; k++) {
if ((k != i) && (j != 0)) {
*sp++ = *ap;
}
ap++;
}
}
det += T(sign) * MyBase::mm[i] * submat.det();
sign = -sign;
}
return det;
}
/// Sets the matrix to a matrix that translates by v
static Mat4 translation(const Vec3d& v)
{
return Mat4(
T(1), T(0), T(0), T(0),
T(0), T(1), T(0), T(0),
T(0), T(0), T(1), T(0),
T(v.x()), T(v.y()),T(v.z()), T(1));
}
/// Sets the matrix to a matrix that translates by v
template <typename T0>
void setToTranslation(const Vec3<T0>& v)
{
MyBase::mm[ 0] = 1;
MyBase::mm[ 1] = 0;
MyBase::mm[ 2] = 0;
MyBase::mm[ 3] = 0;
MyBase::mm[ 4] = 0;
MyBase::mm[ 5] = 1;
MyBase::mm[ 6] = 0;
MyBase::mm[ 7] = 0;
MyBase::mm[ 8] = 0;
MyBase::mm[ 9] = 0;
MyBase::mm[10] = 1;
MyBase::mm[11] = 0;
MyBase::mm[12] = v.x();
MyBase::mm[13] = v.y();
MyBase::mm[14] = v.z();
MyBase::mm[15] = 1;
}
/// Left multiples by the specified translation, i.e. Trans * (*this)
template <typename T0>
void preTranslate(const Vec3<T0>& tr)
{
Vec3<T> tmp(tr.x(), tr.y(), tr.z());
Mat4<T> Tr = Mat4<T>::translation(tmp);
*this = Tr * (*this);
}
/// Right multiplies by the specified translation matrix, i.e. (*this) * Trans
template <typename T0>
void postTranslate(const Vec3<T0>& tr)
{
Vec3<T> tmp(tr.x(), tr.y(), tr.z());
Mat4<T> Tr = Mat4<T>::translation(tmp);
*this = (*this) * Tr;
}
/// Sets the matrix to a matrix that scales by v
template <typename T0>
void setToScale(const Vec3<T0>& v)
{
this->setIdentity();
MyBase::mm[ 0] = v.x();
MyBase::mm[ 5] = v.y();
MyBase::mm[10] = v.z();
}
// Left multiples by the specified scale matrix, i.e. Sc * (*this)
template <typename T0>
void preScale(const Vec3<T0>& v)
{
MyBase::mm[ 0] *= v.x();
MyBase::mm[ 1] *= v.x();
MyBase::mm[ 2] *= v.x();
MyBase::mm[ 3] *= v.x();
MyBase::mm[ 4] *= v.y();
MyBase::mm[ 5] *= v.y();
MyBase::mm[ 6] *= v.y();
MyBase::mm[ 7] *= v.y();
MyBase::mm[ 8] *= v.z();
MyBase::mm[ 9] *= v.z();
MyBase::mm[10] *= v.z();
MyBase::mm[11] *= v.z();
}
// Right multiples by the specified scale matrix, i.e. (*this) * Sc
template <typename T0>
void postScale(const Vec3<T0>& v)
{
MyBase::mm[ 0] *= v.x();
MyBase::mm[ 1] *= v.y();
MyBase::mm[ 2] *= v.z();
MyBase::mm[ 4] *= v.x();
MyBase::mm[ 5] *= v.y();
MyBase::mm[ 6] *= v.z();
MyBase::mm[ 8] *= v.x();
MyBase::mm[ 9] *= v.y();
MyBase::mm[10] *= v.z();
MyBase::mm[12] *= v.x();
MyBase::mm[13] *= v.y();
MyBase::mm[14] *= v.z();
}
/// @brief Sets the matrix to a rotation about the given axis.
/// @param axis The axis (one of X, Y, Z) to rotate about.
/// @param angle The rotation angle, in radians.
void setToRotation(Axis axis, T angle) {*this = rotation<Mat4<T> >(axis, angle);}
/// @brief Sets the matrix to a rotation about an arbitrary axis
/// @param axis The axis of rotation (cannot be zero-length)
/// @param angle The rotation angle, in radians.
void setToRotation(const Vec3<T>& axis, T angle) {*this = rotation<Mat4<T> >(axis, angle);}
/// @brief Sets the matrix to a rotation that maps v1 onto v2 about the cross
/// product of v1 and v2.
void setToRotation(const Vec3<T>& v1, const Vec3<T>& v2) {*this = rotation<Mat4<T> >(v1, v2);}
/// @brief Left multiplies by a rotation clock-wiseabout the given axis into this matrix.
/// @param axis The axis (one of X, Y, Z) of rotation.
/// @param angle The clock-wise rotation angle, in radians.
void preRotate(Axis axis, T angle)
{
T c = static_cast<T>(cos(angle));
T s = -static_cast<T>(sin(angle)); // the "-" makes it clockwise
switch (axis) {
case X_AXIS:
{
T a4, a5, a6, a7;
a4 = c * MyBase::mm[ 4] - s * MyBase::mm[ 8];
a5 = c * MyBase::mm[ 5] - s * MyBase::mm[ 9];
a6 = c * MyBase::mm[ 6] - s * MyBase::mm[10];
a7 = c * MyBase::mm[ 7] - s * MyBase::mm[11];
MyBase::mm[ 8] = s * MyBase::mm[ 4] + c * MyBase::mm[ 8];
MyBase::mm[ 9] = s * MyBase::mm[ 5] + c * MyBase::mm[ 9];
MyBase::mm[10] = s * MyBase::mm[ 6] + c * MyBase::mm[10];
MyBase::mm[11] = s * MyBase::mm[ 7] + c * MyBase::mm[11];
MyBase::mm[ 4] = a4;
MyBase::mm[ 5] = a5;
MyBase::mm[ 6] = a6;
MyBase::mm[ 7] = a7;
}
break;
case Y_AXIS:
{
T a0, a1, a2, a3;
a0 = c * MyBase::mm[ 0] + s * MyBase::mm[ 8];
a1 = c * MyBase::mm[ 1] + s * MyBase::mm[ 9];
a2 = c * MyBase::mm[ 2] + s * MyBase::mm[10];
a3 = c * MyBase::mm[ 3] + s * MyBase::mm[11];
MyBase::mm[ 8] = -s * MyBase::mm[ 0] + c * MyBase::mm[ 8];
MyBase::mm[ 9] = -s * MyBase::mm[ 1] + c * MyBase::mm[ 9];
MyBase::mm[10] = -s * MyBase::mm[ 2] + c * MyBase::mm[10];
MyBase::mm[11] = -s * MyBase::mm[ 3] + c * MyBase::mm[11];
MyBase::mm[ 0] = a0;
MyBase::mm[ 1] = a1;
MyBase::mm[ 2] = a2;
MyBase::mm[ 3] = a3;
}
break;
case Z_AXIS:
{
T a0, a1, a2, a3;
a0 = c * MyBase::mm[ 0] - s * MyBase::mm[ 4];
a1 = c * MyBase::mm[ 1] - s * MyBase::mm[ 5];
a2 = c * MyBase::mm[ 2] - s * MyBase::mm[ 6];
a3 = c * MyBase::mm[ 3] - s * MyBase::mm[ 7];
MyBase::mm[ 4] = s * MyBase::mm[ 0] + c * MyBase::mm[ 4];
MyBase::mm[ 5] = s * MyBase::mm[ 1] + c * MyBase::mm[ 5];
MyBase::mm[ 6] = s * MyBase::mm[ 2] + c * MyBase::mm[ 6];
MyBase::mm[ 7] = s * MyBase::mm[ 3] + c * MyBase::mm[ 7];
MyBase::mm[ 0] = a0;
MyBase::mm[ 1] = a1;
MyBase::mm[ 2] = a2;
MyBase::mm[ 3] = a3;
}
break;
default:
assert(axis==X_AXIS || axis==Y_AXIS || axis==Z_AXIS);
}
}
/// @brief Right multiplies by a rotation clock-wiseabout the given axis into this matrix.
/// @param axis The axis (one of X, Y, Z) of rotation.
/// @param angle The clock-wise rotation angle, in radians.
void postRotate(Axis axis, T angle)
{
T c = static_cast<T>(cos(angle));
T s = -static_cast<T>(sin(angle)); // the "-" makes it clockwise
switch (axis) {
case X_AXIS:
{
T a2, a6, a10, a14;
a2 = c * MyBase::mm[ 2] - s * MyBase::mm[ 1];
a6 = c * MyBase::mm[ 6] - s * MyBase::mm[ 5];
a10 = c * MyBase::mm[10] - s * MyBase::mm[ 9];
a14 = c * MyBase::mm[14] - s * MyBase::mm[13];
MyBase::mm[ 1] = c * MyBase::mm[ 1] + s * MyBase::mm[ 2];
MyBase::mm[ 5] = c * MyBase::mm[ 5] + s * MyBase::mm[ 6];
MyBase::mm[ 9] = c * MyBase::mm[ 9] + s * MyBase::mm[10];
MyBase::mm[13] = c * MyBase::mm[13] + s * MyBase::mm[14];
MyBase::mm[ 2] = a2;
MyBase::mm[ 6] = a6;
MyBase::mm[10] = a10;
MyBase::mm[14] = a14;
}
break;
case Y_AXIS:
{
T a2, a6, a10, a14;
a2 = c * MyBase::mm[ 2] + s * MyBase::mm[ 0];
a6 = c * MyBase::mm[ 6] + s * MyBase::mm[ 4];
a10 = c * MyBase::mm[10] + s * MyBase::mm[ 8];
a14 = c * MyBase::mm[14] + s * MyBase::mm[12];
MyBase::mm[ 0] = c * MyBase::mm[ 0] - s * MyBase::mm[ 2];
MyBase::mm[ 4] = c * MyBase::mm[ 4] - s * MyBase::mm[ 6];
MyBase::mm[ 8] = c * MyBase::mm[ 8] - s * MyBase::mm[10];
MyBase::mm[12] = c * MyBase::mm[12] - s * MyBase::mm[14];
MyBase::mm[ 2] = a2;
MyBase::mm[ 6] = a6;
MyBase::mm[10] = a10;
MyBase::mm[14] = a14;
}
break;
case Z_AXIS:
{
T a1, a5, a9, a13;
a1 = c * MyBase::mm[ 1] - s * MyBase::mm[ 0];
a5 = c * MyBase::mm[ 5] - s * MyBase::mm[ 4];
a9 = c * MyBase::mm[ 9] - s * MyBase::mm[ 8];
a13 = c * MyBase::mm[13] - s * MyBase::mm[12];
MyBase::mm[ 0] = c * MyBase::mm[ 0] + s * MyBase::mm[ 1];
MyBase::mm[ 4] = c * MyBase::mm[ 4] + s * MyBase::mm[ 5];
MyBase::mm[ 8] = c * MyBase::mm[ 8] + s * MyBase::mm[ 9];
MyBase::mm[12] = c * MyBase::mm[12] + s * MyBase::mm[13];
MyBase::mm[ 1] = a1;
MyBase::mm[ 5] = a5;
MyBase::mm[ 9] = a9;
MyBase::mm[13] = a13;
}
break;
default:
assert(axis==X_AXIS || axis==Y_AXIS || axis==Z_AXIS);
}
}
/// @brief Sets the matrix to a shear along axis0 by a fraction of axis1.
/// @param axis0 The fixed axis of the shear.
/// @param axis1 The shear axis.
/// @param shearby The shear factor.
void setToShear(Axis axis0, Axis axis1, T shearby)
{
*this = shear<Mat4<T> >(axis0, axis1, shearby);
}
/// @brief Left multiplies a shearing transformation into the matrix.
/// @see setToShear
void preShear(Axis axis0, Axis axis1, T shear)
{
int index0 = static_cast<int>(axis0);
int index1 = static_cast<int>(axis1);
// to row "index1" add a multiple of the index0 row
MyBase::mm[index1 * 4 + 0] += shear * MyBase::mm[index0 * 4 + 0];
MyBase::mm[index1 * 4 + 1] += shear * MyBase::mm[index0 * 4 + 1];
MyBase::mm[index1 * 4 + 2] += shear * MyBase::mm[index0 * 4 + 2];
MyBase::mm[index1 * 4 + 3] += shear * MyBase::mm[index0 * 4 + 3];
}
/// @brief Right multiplies a shearing transformation into the matrix.
/// @see setToShear
void postShear(Axis axis0, Axis axis1, T shear)
{
int index0 = static_cast<int>(axis0);
int index1 = static_cast<int>(axis1);
// to collumn "index0" add a multiple of the index1 row
MyBase::mm[index0 + 0] += shear * MyBase::mm[index1 + 0];
MyBase::mm[index0 + 4] += shear * MyBase::mm[index1 + 4];
MyBase::mm[index0 + 8] += shear * MyBase::mm[index1 + 8];
MyBase::mm[index0 + 12] += shear * MyBase::mm[index1 + 12];
}
/// Transform a Vec4 by post-multiplication.
template<typename T0>
Vec4<T0> transform(const Vec4<T0> &v) const
{
return static_cast< Vec4<T0> >(v * *this);
}
/// Transform a Vec3 by post-multiplication, without homogenous division.
template<typename T0>
Vec3<T0> transform(const Vec3<T0> &v) const
{
return static_cast< Vec3<T0> >(v * *this);
}
/// Transform a Vec4 by pre-multiplication.
template<typename T0>
Vec4<T0> pretransform(const Vec4<T0> &v) const
{
return static_cast< Vec4<T0> >(*this * v);
}
/// Transform a Vec3 by pre-multiplication, without homogenous division.
template<typename T0>
Vec3<T0> pretransform(const Vec3<T0> &v) const
{
return static_cast< Vec3<T0> >(*this * v);
}
/// Transform a Vec3 by post-multiplication, doing homogenous divison.
template<typename T0>
Vec3<T0> transformH(const Vec3<T0> &p) const
{
T0 w;
// w = p * (*this).col(3);
w = static_cast<T0>(p[0] * MyBase::mm[ 3] + p[1] * MyBase::mm[ 7]
+ p[2] * MyBase::mm[11] + MyBase::mm[15]);
if ( !isExactlyEqual(w , 0.0) ) {
return Vec3<T0>(static_cast<T0>((p[0] * MyBase::mm[ 0] + p[1] * MyBase::mm[ 4] +
p[2] * MyBase::mm[ 8] + MyBase::mm[12]) / w),
static_cast<T0>((p[0] * MyBase::mm[ 1] + p[1] * MyBase::mm[ 5] +
p[2] * MyBase::mm[ 9] + MyBase::mm[13]) / w),
static_cast<T0>((p[0] * MyBase::mm[ 2] + p[1] * MyBase::mm[ 6] +
p[2] * MyBase::mm[10] + MyBase::mm[14]) / w));
}
return Vec3<T0>(0, 0, 0);
}
/// Transform a Vec3 by pre-multiplication, doing homogenous division.
template<typename T0>
Vec3<T0> pretransformH(const Vec3<T0> &p) const
{
T0 w;
// w = p * (*this).col(3);
w = p[0] * MyBase::mm[12] + p[1] * MyBase::mm[13] + p[2] * MyBase::mm[14] + MyBase::mm[15];
if ( !isExactlyEqual(w , 0.0) ) {
return Vec3<T0>(static_cast<T0>((p[0] * MyBase::mm[ 0] + p[1] * MyBase::mm[ 1] +
p[2] * MyBase::mm[ 2] + MyBase::mm[ 3]) / w),
static_cast<T0>((p[0] * MyBase::mm[ 4] + p[1] * MyBase::mm[ 5] +
p[2] * MyBase::mm[ 6] + MyBase::mm[ 7]) / w),
static_cast<T0>((p[0] * MyBase::mm[ 8] + p[1] * MyBase::mm[ 9] +
p[2] * MyBase::mm[10] + MyBase::mm[11]) / w));
}
return Vec3<T0>(0, 0, 0);
}
/// Transform a Vec3 by post-multiplication, without translation.
template<typename T0>
Vec3<T0> transform3x3(const Vec3<T0> &v) const
{
return Vec3<T0>(
static_cast<T0>(v[0] * MyBase::mm[ 0] + v[1] * MyBase::mm[ 4] + v[2] * MyBase::mm[ 8]),
static_cast<T0>(v[0] * MyBase::mm[ 1] + v[1] * MyBase::mm[ 5] + v[2] * MyBase::mm[ 9]),
static_cast<T0>(v[0] * MyBase::mm[ 2] + v[1] * MyBase::mm[ 6] + v[2] * MyBase::mm[10]));
}
private:
bool invert(Mat4<T> &inverse, T tolerance) const;
T det2(const Mat4<T> &a, int i0, int i1, int j0, int j1) const {
int i0row = i0 * 4;
int i1row = i1 * 4;
return a.mm[i0row+j0]*a.mm[i1row+j1] - a.mm[i0row+j1]*a.mm[i1row+j0];
}
T det3(const Mat4<T> &a, int i0, int i1, int i2,
int j0, int j1, int j2) const {
int i0row = i0 * 4;
return a.mm[i0row+j0]*det2(a, i1,i2, j1,j2) +
a.mm[i0row+j1]*det2(a, i1,i2, j2,j0) +
a.mm[i0row+j2]*det2(a, i1,i2, j0,j1);
}
}; // class Mat4
/// @relates Mat4
/// @brief Equality operator, does exact floating point comparisons
template <typename T0, typename T1>
bool operator==(const Mat4<T0> &m0, const Mat4<T1> &m1)
{
const T0 *t0 = m0.asPointer();
const T1 *t1 = m1.asPointer();
for (int i=0; i<16; ++i) if (!isExactlyEqual(t0[i], t1[i])) return false;
return true;
}
/// @relates Mat4
/// @brief Inequality operator, does exact floating point comparisons
template <typename T0, typename T1>
bool operator!=(const Mat4<T0> &m0, const Mat4<T1> &m1) { return !(m0 == m1); }
/// @relates Mat4
/// @brief Multiply each element of the given matrix by @a scalar and return the result.
template <typename S, typename T>
Mat4<typename promote<S, T>::type> operator*(S scalar, const Mat4<T> &m)
{
return m*scalar;
}
/// @relates Mat4
/// @brief Multiply each element of the given matrix by @a scalar and return the result.
template <typename S, typename T>
Mat4<typename promote<S, T>::type> operator*(const Mat4<T> &m, S scalar)
{
Mat4<typename promote<S, T>::type> result(m);
result *= scalar;
return result;
}
/// @relates Mat4
/// @brief Multiply @a _m by @a _v and return the resulting vector.
template<typename T, typename MT>
inline Vec4<typename promote<T, MT>::type>
operator*(const Mat4<MT> &_m,
const Vec4<T> &_v)
{
MT const *m = _m.asPointer();
return Vec4<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[1] + _v[2]*m[2] + _v[3]*m[3],
_v[0]*m[4] + _v[1]*m[5] + _v[2]*m[6] + _v[3]*m[7],
_v[0]*m[8] + _v[1]*m[9] + _v[2]*m[10] + _v[3]*m[11],
_v[0]*m[12] + _v[1]*m[13] + _v[2]*m[14] + _v[3]*m[15]);
}
/// @relates Mat4
/// @brief Multiply @a _v by @a _m and return the resulting vector.
template<typename T, typename MT>
inline Vec4<typename promote<T, MT>::type>
operator*(const Vec4<T> &_v,
const Mat4<MT> &_m)
{
MT const *m = _m.asPointer();
return Vec4<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[4] + _v[2]*m[8] + _v[3]*m[12],
_v[0]*m[1] + _v[1]*m[5] + _v[2]*m[9] + _v[3]*m[13],
_v[0]*m[2] + _v[1]*m[6] + _v[2]*m[10] + _v[3]*m[14],
_v[0]*m[3] + _v[1]*m[7] + _v[2]*m[11] + _v[3]*m[15]);
}
/// @relates Mat4
/// @brief Multiply @a _m by @a _v and return the resulting vector.
template<typename T, typename MT>
inline Vec3<typename promote<T, MT>::type>
operator*(const Mat4<MT> &_m, const Vec3<T> &_v)
{
MT const *m = _m.asPointer();
return Vec3<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[1] + _v[2]*m[2] + m[3],
_v[0]*m[4] + _v[1]*m[5] + _v[2]*m[6] + m[7],
_v[0]*m[8] + _v[1]*m[9] + _v[2]*m[10] + m[11]);
}
/// @relates Mat4
/// @brief Multiply @a _v by @a _m and return the resulting vector.
template<typename T, typename MT>
inline Vec3<typename promote<T, MT>::type>
operator*(const Vec3<T> &_v, const Mat4<MT> &_m)
{
MT const *m = _m.asPointer();
return Vec3<typename promote<T, MT>::type>(
_v[0]*m[0] + _v[1]*m[4] + _v[2]*m[8] + m[12],
_v[0]*m[1] + _v[1]*m[5] + _v[2]*m[9] + m[13],
_v[0]*m[2] + _v[1]*m[6] + _v[2]*m[10] + m[14]);
}
/// @relates Mat4
/// @brief Add corresponding elements of @a m0 and @a m1 and return the result.
template <typename T0, typename T1>
Mat4<typename promote<T0, T1>::type>
operator+(const Mat4<T0> &m0, const Mat4<T1> &m1)
{
Mat4<typename promote<T0, T1>::type> result(m0);
result += m1;
return result;
}
/// @relates Mat4
/// @brief Subtract corresponding elements of @a m0 and @a m1 and return the result.
template <typename T0, typename T1>
Mat4<typename promote<T0, T1>::type>
operator-(const Mat4<T0> &m0, const Mat4<T1> &m1)
{
Mat4<typename promote<T0, T1>::type> result(m0);
result -= m1;
return result;
}
/// @relates Mat4
/// @brief Multiply @a m0 by @a m1 and return the resulting matrix.
template <typename T0, typename T1>
Mat4<typename promote<T0, T1>::type>
operator*(const Mat4<T0> &m0, const Mat4<T1> &m1)
{
Mat4<typename promote<T0, T1>::type> result(m0);
result *= m1;
return result;
}
/// Transform a Vec3 by pre-multiplication, without translation.
/// Presumes this matrix is inverse of coordinate transform
/// Synonymous to "pretransform3x3"
template<typename T0, typename T1>
Vec3<T1> transformNormal(const Mat4<T0> &m, const Vec3<T1> &n)
{
return Vec3<T1>(
static_cast<T1>(m[0][0]*n[0] + m[0][1]*n[1] + m[0][2]*n[2]),
static_cast<T1>(m[1][0]*n[0] + m[1][1]*n[1] + m[1][2]*n[2]),
static_cast<T1>(m[2][0]*n[0] + m[2][1]*n[1] + m[2][2]*n[2]));
}
/// Invert via gauss-jordan elimination. Modified from dreamworks internal mx library
template<typename T>
bool Mat4<T>::invert(Mat4<T> &inverse, T tolerance) const
{
Mat4<T> temp(*this);
inverse.setIdentity();
// Forward elimination step
double det = 1.0;
for (int i = 0; i < 4; ++i) {
int row = i;
double max = fabs(temp[i][i]);
for (int k = i+1; k < 4; ++k) {
if (fabs(temp[k][i]) > max) {
row = k;
max = fabs(temp[k][i]);
}
}
if (isExactlyEqual(max, 0.0)) return false;
// must move pivot to row i
if (row != i) {
det = -det;
for (int k = 0; k < 4; ++k) {
std::swap(temp[row][k], temp[i][k]);
std::swap(inverse[row][k], inverse[i][k]);
}
}
double pivot = temp[i][i];
det *= pivot;
// scale row i
for (int k = 0; k < 4; ++k) {
temp[i][k] /= pivot;
inverse[i][k] /= pivot;
}
// eliminate in rows below i
for (int j = i+1; j < 4; ++j) {
double t = temp[j][i];
if (!isExactlyEqual(t, 0.0)) {
// subtract scaled row i from row j
for (int k = 0; k < 4; ++k) {
temp[j][k] -= temp[i][k] * t;
inverse[j][k] -= inverse[i][k] * t;
}
}
}
}
// Backward elimination step
for (int i = 3; i > 0; --i) {
for (int j = 0; j < i; ++j) {
double t = temp[j][i];
if (!isExactlyEqual(t, 0.0)) {
for (int k = 0; k < 4; ++k) {
inverse[j][k] -= inverse[i][k]*t;
}
}
}
}
return det*det >= tolerance*tolerance;
}
template <typename T>
inline bool isAffine(const Mat4<T>& m) {
return (m.col(3) == Vec4<T>(0, 0, 0, 1));
}
template <typename T>
inline bool hasTranslation(const Mat4<T>& m) {
return (m.row(3) != Vec4<T>(0, 0, 0, 1));
}
template<typename T>
inline Mat4<T>
Abs(const Mat4<T>& m)
{
Mat4<T> out;
const T* ip = m.asPointer();
T* op = out.asPointer();
for (unsigned i = 0; i < 16; ++i, ++op, ++ip) *op = math::Abs(*ip);
return out;
}
template<typename Type1, typename Type2>
inline Mat4<Type1>
cwiseAdd(const Mat4<Type1>& m, const Type2 s)
{
Mat4<Type1> out;
const Type1* ip = m.asPointer();
Type1* op = out.asPointer();
for (unsigned i = 0; i < 16; ++i, ++op, ++ip) {
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
*op = *ip + s;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
}
return out;
}
template<typename T>
inline bool
cwiseLessThan(const Mat4<T>& m0, const Mat4<T>& m1)
{
return cwiseLessThan<4, T>(m0, m1);
}
template<typename T>
inline bool
cwiseGreaterThan(const Mat4<T>& m0, const Mat4<T>& m1)
{
return cwiseGreaterThan<4, T>(m0, m1);
}
using Mat4s = Mat4<float>;
using Mat4d = Mat4<double>;
using Mat4f = Mat4d;
#if OPENVDB_ABI_VERSION_NUMBER >= 8
OPENVDB_IS_POD(Mat4s)
OPENVDB_IS_POD(Mat4d)
#endif
} // namespace math
template<> inline math::Mat4s zeroVal<math::Mat4s>() { return math::Mat4s::zero(); }
template<> inline math::Mat4d zeroVal<math::Mat4d>() { return math::Mat4d::zero(); }
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_UTIL_MAT4_H_HAS_BEEN_INCLUDED
| 42,893 | C | 29.948052 | 100 | 0.44984 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/math/Ray.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Ray.h
///
/// @author Ken Museth
///
/// @brief A Ray class.
#ifndef OPENVDB_MATH_RAY_HAS_BEEN_INCLUDED
#define OPENVDB_MATH_RAY_HAS_BEEN_INCLUDED
#include "Math.h"
#include "Vec3.h"
#include "Transform.h"
#include <algorithm> // for std::swap()
#include <iostream> // for std::ostream
#include <limits> // for std::numeric_limits<Type>::max()
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace math {
template<typename RealT = double>
class Ray
{
public:
static_assert(std::is_floating_point<RealT>::value,
"math::Ray requires a floating-point value type");
using RealType = RealT;
using Vec3Type = Vec3<RealT>;
using Vec3T = Vec3Type;
struct TimeSpan {
RealT t0, t1;
/// @brief Default constructor
TimeSpan() {}
/// @brief Constructor
TimeSpan(RealT _t0, RealT _t1) : t0(_t0), t1(_t1) {}
/// @brief Set both times
inline void set(RealT _t0, RealT _t1) { t0=_t0; t1=_t1; }
/// @brief Get both times
inline void get(RealT& _t0, RealT& _t1) const { _t0=t0; _t1=t1; }
/// @brief Return @c true if t1 is larger than t0 by at least eps.
inline bool valid(RealT eps=math::Delta<RealT>::value()) const { return (t1-t0)>eps; }
/// @brief Return the midpoint of the ray.
inline RealT mid() const { return 0.5*(t0 + t1); }
/// @brief Multiplies both times
inline void scale(RealT s) {assert(s>0); t0*=s; t1*=s; }
/// @brief Return @c true if time is inclusive
inline bool test(RealT t) const { return (t>=t0 && t<=t1); }
};
Ray(const Vec3Type& eye = Vec3Type(0,0,0),
const Vec3Type& direction = Vec3Type(1,0,0),
RealT t0 = math::Delta<RealT>::value(),
RealT t1 = std::numeric_limits<RealT>::max())
: mEye(eye), mDir(direction), mInvDir(1/mDir), mTimeSpan(t0, t1)
{
}
inline void setEye(const Vec3Type& eye) { mEye = eye; }
inline void setDir(const Vec3Type& dir)
{
mDir = dir;
mInvDir = 1/mDir;
}
inline void setMinTime(RealT t0) { assert(t0>0); mTimeSpan.t0 = t0; }
inline void setMaxTime(RealT t1) { assert(t1>0); mTimeSpan.t1 = t1; }
inline void setTimes(
RealT t0 = math::Delta<RealT>::value(),
RealT t1 = std::numeric_limits<RealT>::max())
{
assert(t0>0 && t1>0);
mTimeSpan.set(t0, t1);
}
inline void scaleTimes(RealT scale) { mTimeSpan.scale(scale); }
inline void reset(
const Vec3Type& eye,
const Vec3Type& direction,
RealT t0 = math::Delta<RealT>::value(),
RealT t1 = std::numeric_limits<RealT>::max())
{
this->setEye(eye);
this->setDir(direction);
this->setTimes(t0, t1);
}
inline const Vec3T& eye() const {return mEye;}
inline const Vec3T& dir() const {return mDir;}
inline const Vec3T& invDir() const {return mInvDir;}
inline RealT t0() const {return mTimeSpan.t0;}
inline RealT t1() const {return mTimeSpan.t1;}
/// @brief Return the position along the ray at the specified time.
inline Vec3R operator()(RealT time) const { return mEye + mDir * time; }
/// @brief Return the starting point of the ray.
inline Vec3R start() const { return (*this)(mTimeSpan.t0); }
/// @brief Return the endpoint of the ray.
inline Vec3R end() const { return (*this)(mTimeSpan.t1); }
/// @brief Return the midpoint of the ray.
inline Vec3R mid() const { return (*this)(mTimeSpan.mid()); }
/// @brief Return @c true if t1 is larger than t0 by at least eps.
inline bool valid(RealT eps=math::Delta<float>::value()) const { return mTimeSpan.valid(eps); }
/// @brief Return @c true if @a time is within t0 and t1, both inclusive.
inline bool test(RealT time) const { return mTimeSpan.test(time); }
/// @brief Return a new Ray that is transformed with the specified map.
/// @param map the map from which to construct the new Ray.
/// @warning Assumes a linear map and a normalized direction.
/// @details The requirement that the direction is normalized
/// follows from the transformation of t0 and t1 - and that fact that
/// we want applyMap and applyInverseMap to be inverse operations.
template<typename MapType>
inline Ray applyMap(const MapType& map) const
{
assert(map.isLinear());
assert(math::isRelOrApproxEqual(mDir.length(), RealT(1),
Tolerance<RealT>::value(), Delta<RealT>::value()));
const Vec3T eye = map.applyMap(mEye);
const Vec3T dir = map.applyJacobian(mDir);
const RealT length = dir.length();
return Ray(eye, dir/length, length*mTimeSpan.t0, length*mTimeSpan.t1);
}
/// @brief Return a new Ray that is transformed with the inverse of the specified map.
/// @param map the map from which to construct the new Ray by inverse mapping.
/// @warning Assumes a linear map and a normalized direction.
/// @details The requirement that the direction is normalized
/// follows from the transformation of t0 and t1 - and that fact that
/// we want applyMap and applyInverseMap to be inverse operations.
template<typename MapType>
inline Ray applyInverseMap(const MapType& map) const
{
assert(map.isLinear());
assert(math::isRelOrApproxEqual(mDir.length(), RealT(1), Tolerance<RealT>::value(), Delta<RealT>::value()));
const Vec3T eye = map.applyInverseMap(mEye);
const Vec3T dir = map.applyInverseJacobian(mDir);
const RealT length = dir.length();
return Ray(eye, dir/length, length*mTimeSpan.t0, length*mTimeSpan.t1);
}
/// @brief Return a new ray in world space, assuming the existing
/// ray is represented in the index space of the specified grid.
template<typename GridType>
inline Ray indexToWorld(const GridType& grid) const
{
return this->applyMap(*(grid.transform().baseMap()));
}
/// @brief Return a new ray in the index space of the specified
/// grid, assuming the existing ray is represented in world space.
template<typename GridType>
inline Ray worldToIndex(const GridType& grid) const
{
return this->applyInverseMap(*(grid.transform().baseMap()));
}
/// @brief Return true if this ray intersects the specified sphere.
/// @param center The center of the sphere in the same space as this ray.
/// @param radius The radius of the sphere in the same units as this ray.
/// @param t0 The first intersection point if an intersection exists.
/// @param t1 The second intersection point if an intersection exists.
/// @note If the return value is true, i.e. a hit, and t0 =
/// this->t0() or t1 == this->t1() only one true intersection exist.
inline bool intersects(const Vec3T& center, RealT radius, RealT& t0, RealT& t1) const
{
const Vec3T origin = mEye - center;
const RealT A = mDir.lengthSqr();
const RealT B = 2 * mDir.dot(origin);
const RealT C = origin.lengthSqr() - radius * radius;
const RealT D = B * B - 4 * A * C;
if (D < 0) return false;
const RealT Q = RealT(-0.5)*(B<0 ? (B + Sqrt(D)) : (B - Sqrt(D)));
t0 = Q / A;
t1 = C / Q;
if (t0 > t1) std::swap(t0, t1);
if (t0 < mTimeSpan.t0) t0 = mTimeSpan.t0;
if (t1 > mTimeSpan.t1) t1 = mTimeSpan.t1;
return t0 <= t1;
}
/// @brief Return true if this ray intersects the specified sphere.
/// @param center The center of the sphere in the same space as this ray.
/// @param radius The radius of the sphere in the same units as this ray.
inline bool intersects(const Vec3T& center, RealT radius) const
{
RealT t0, t1;
return this->intersects(center, radius, t0, t1)>0;
}
/// @brief Return true if this ray intersects the specified sphere.
/// @note For intersection this ray is clipped to the two intersection points.
/// @param center The center of the sphere in the same space as this ray.
/// @param radius The radius of the sphere in the same units as this ray.
inline bool clip(const Vec3T& center, RealT radius)
{
RealT t0, t1;
const bool hit = this->intersects(center, radius, t0, t1);
if (hit) mTimeSpan.set(t0, t1);
return hit;
}
/// @brief Return true if the Ray intersects the specified
/// axisaligned bounding box.
/// @param bbox Axis-aligned bounding box in the same space as the Ray.
/// @param t0 If an intersection is detected this is assigned
/// the time for the first intersection point.
/// @param t1 If an intersection is detected this is assigned
/// the time for the second intersection point.
template<typename BBoxT>
inline bool intersects(const BBoxT& bbox, RealT& t0, RealT& t1) const
{
mTimeSpan.get(t0, t1);
for (int i = 0; i < 3; ++i) {
RealT a = (bbox.min()[i] - mEye[i]) * mInvDir[i];
RealT b = (bbox.max()[i] - mEye[i]) * mInvDir[i];
if (a > b) std::swap(a, b);
if (a > t0) t0 = a;
if (b < t1) t1 = b;
if (t0 > t1) return false;
}
return true;
}
/// @brief Return true if this ray intersects the specified bounding box.
/// @param bbox Axis-aligned bounding box in the same space as this ray.
template<typename BBoxT>
inline bool intersects(const BBoxT& bbox) const
{
RealT t0, t1;
return this->intersects(bbox, t0, t1);
}
/// @brief Return true if this ray intersects the specified bounding box.
/// @note For intersection this ray is clipped to the two intersection points.
/// @param bbox Axis-aligned bounding box in the same space as this ray.
template<typename BBoxT>
inline bool clip(const BBoxT& bbox)
{
RealT t0, t1;
const bool hit = this->intersects(bbox, t0, t1);
if (hit) mTimeSpan.set(t0, t1);
return hit;
}
/// @brief Return true if the Ray intersects the plane specified
/// by a normal and distance from the origin.
/// @param normal Normal of the plane.
/// @param distance Distance of the plane to the origin.
/// @param t Time of intersection, if one exists.
inline bool intersects(const Vec3T& normal, RealT distance, RealT& t) const
{
const RealT cosAngle = mDir.dot(normal);
if (math::isApproxZero(cosAngle)) return false;//parallel
t = (distance - mEye.dot(normal))/cosAngle;
return this->test(t);
}
/// @brief Return true if the Ray intersects the plane specified
/// by a normal and point.
/// @param normal Normal of the plane.
/// @param point Point in the plane.
/// @param t Time of intersection, if one exists.
inline bool intersects(const Vec3T& normal, const Vec3T& point, RealT& t) const
{
return this->intersects(normal, point.dot(normal), t);
}
private:
Vec3T mEye, mDir, mInvDir;
TimeSpan mTimeSpan;
}; // end of Ray class
/// @brief Output streaming of the Ray class.
/// @note Primarily intended for debugging.
template<typename RealT>
inline std::ostream& operator<<(std::ostream& os, const Ray<RealT>& r)
{
os << "eye=" << r.eye() << " dir=" << r.dir() << " 1/dir="<<r.invDir()
<< " t0=" << r.t0() << " t1=" << r.t1();
return os;
}
} // namespace math
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_MATH_RAY_HAS_BEEN_INCLUDED
| 11,761 | C | 36.698718 | 116 | 0.623756 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/File.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file File.h
#ifndef OPENVDB_IO_FILE_HAS_BEEN_INCLUDED
#define OPENVDB_IO_FILE_HAS_BEEN_INCLUDED
#include <openvdb/version.h>
#include "io.h" // for MappedFile::Notifier
#include "Archive.h"
#include "GridDescriptor.h"
#include <algorithm> // for std::copy()
#include <iosfwd>
#include <iterator> // for std::back_inserter()
#include <map>
#include <memory>
#include <string>
class TestFile;
class TestStream;
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
/// Grid archive associated with a file on disk
class OPENVDB_API File: public Archive
{
public:
using NameMap = std::multimap<Name, GridDescriptor>;
using NameMapCIter = NameMap::const_iterator;
explicit File(const std::string& filename);
~File() override;
/// @brief Copy constructor
/// @details The copy will be closed and will not reference the same
/// file descriptor as the original.
File(const File& other);
/// @brief Assignment
/// @details After assignment, this File will be closed and will not
/// reference the same file descriptor as the source File.
File& operator=(const File& other);
/// @brief Return a copy of this archive.
/// @details The copy will be closed and will not reference the same
/// file descriptor as the original.
SharedPtr<Archive> copy() const override;
/// @brief Return the name of the file with which this archive is associated.
/// @details The file does not necessarily exist on disk yet.
const std::string& filename() const;
/// @brief Open the file, read the file header and the file-level metadata,
/// and populate the grid descriptors, but do not load any grids into memory.
/// @details If @a delayLoad is true, map the file into memory and enable delayed loading
/// of grids, and if a notifier is provided, call it when the file gets unmapped.
/// @note Define the environment variable @c OPENVDB_DISABLE_DELAYED_LOAD to disable
/// delayed loading unconditionally.
/// @throw IoError if the file is not a valid VDB file.
/// @return @c true if the file's UUID has changed since it was last read.
/// @see setCopyMaxBytes
bool open(bool delayLoad = true, const MappedFile::Notifier& = MappedFile::Notifier());
/// Return @c true if the file has been opened for reading.
bool isOpen() const;
/// Close the file once we are done reading from it.
void close();
/// @brief Return this file's current size on disk in bytes.
/// @throw IoError if the file size cannot be determined.
Index64 getSize() const;
/// @brief Return the size in bytes above which this file will not be
/// automatically copied during delayed loading.
Index64 copyMaxBytes() const;
/// @brief If this file is opened with delayed loading enabled, make a private copy
/// of the file if its size in bytes is less than the specified value.
/// @details Making a private copy ensures that the file can't change on disk
/// before it has been fully read.
/// @warning If the file is larger than this size, it is the user's responsibility
/// to ensure that it does not change on disk before it has been fully read.
/// Undefined behavior and/or a crash might result otherwise.
/// @note Copying is enabled by default, but it can be disabled for individual files
/// by setting the maximum size to zero bytes. A default size limit can be specified
/// by setting the environment variable @c OPENVDB_DELAYED_LOAD_COPY_MAX_BYTES
/// to the desired number of bytes.
void setCopyMaxBytes(Index64 bytes);
/// Return @c true if a grid of the given name exists in this file.
bool hasGrid(const Name&) const;
/// Return (in a newly created MetaMap) the file-level metadata.
MetaMap::Ptr getMetadata() const;
/// Read the entire contents of the file and return a list of grid pointers.
GridPtrVecPtr getGrids() const;
/// @brief Read just the grid metadata and transforms from the file and return a list
/// of pointers to grids that are empty except for their metadata and transforms.
/// @throw IoError if this file is not open for reading.
GridPtrVecPtr readAllGridMetadata();
/// @brief Read a grid's metadata and transform only.
/// @return A pointer to a grid that is empty except for its metadata and transform.
/// @throw IoError if this file is not open for reading.
/// @throw KeyError if no grid with the given name exists in this file.
GridBase::Ptr readGridMetadata(const Name&);
/// Read an entire grid, including all of its data blocks.
GridBase::Ptr readGrid(const Name&);
/// @brief Read a grid, including its data blocks, but only where it
/// intersects the given world-space bounding box.
GridBase::Ptr readGrid(const Name&, const BBoxd&);
/// @todo GridPtrVec readAllGrids(const Name&)
/// @brief Write the grids in the given container to the file whose name
/// was given in the constructor.
void write(const GridCPtrVec&, const MetaMap& = MetaMap()) const override;
/// @brief Write the grids in the given container to the file whose name
/// was given in the constructor.
template<typename GridPtrContainerT>
void write(const GridPtrContainerT&, const MetaMap& = MetaMap()) const;
/// A const iterator that iterates over all names in the file. This is only
/// valid once the file has been opened.
class NameIterator
{
public:
NameIterator(const NameMapCIter& iter): mIter(iter) {}
NameIterator(const NameIterator&) = default;
~NameIterator() {}
NameIterator& operator++() { mIter++; return *this; }
bool operator==(const NameIterator& iter) const { return mIter == iter.mIter; }
bool operator!=(const NameIterator& iter) const { return mIter != iter.mIter; }
Name operator*() const { return this->gridName(); }
Name gridName() const { return GridDescriptor::nameAsString(mIter->second.uniqueName()); }
private:
NameMapCIter mIter;
};
/// @return a NameIterator to iterate over all grid names in the file.
NameIterator beginName() const;
/// @return the ending iterator for all grid names in the file.
NameIterator endName() const;
private:
/// Read in all grid descriptors that are stored in the given stream.
void readGridDescriptors(std::istream&);
/// @brief Return an iterator to the descriptor for the grid with the given name.
/// If the name is non-unique, return an iterator to the first matching descriptor.
NameMapCIter findDescriptor(const Name&) const;
/// Return a newly created, empty grid of the type specified by the given grid descriptor.
GridBase::Ptr createGrid(const GridDescriptor&) const;
/// @brief Read a grid, including its data blocks, but only where it
/// intersects the given world-space bounding box.
GridBase::Ptr readGridByName(const Name&, const BBoxd&);
/// Read in and return the partially-populated grid specified by the given grid descriptor.
GridBase::ConstPtr readGridPartial(const GridDescriptor&, bool readTopology) const;
/// Read in and return the grid specified by the given grid descriptor.
GridBase::Ptr readGrid(const GridDescriptor&) const;
/// Read in and return the region of the grid specified by the given grid descriptor
/// that intersects the given world-space bounding box.
GridBase::Ptr readGrid(const GridDescriptor&, const BBoxd&) const;
/// Read in and return the region of the grid specified by the given grid descriptor
/// that intersects the given index-space bounding box.
GridBase::Ptr readGrid(const GridDescriptor&, const CoordBBox&) const;
/// @brief Partially populate the given grid by reading its metadata and transform and,
/// if the grid is not an instance, its tree structure, but not the tree's leaf nodes.
void readGridPartial(GridBase::Ptr, std::istream&, bool isInstance, bool readTopology) const;
/// @brief Retrieve a grid from @c mNamedGrids. Return a null pointer
/// if @c mNamedGrids was not populated (because this file is random-access).
/// @throw KeyError if no grid with the given name exists in this file.
GridBase::Ptr retrieveCachedGrid(const Name&) const;
void writeGrids(const GridCPtrVec&, const MetaMap&) const;
MetaMap::Ptr fileMetadata();
MetaMap::ConstPtr fileMetadata() const;
const NameMap& gridDescriptors() const;
NameMap& gridDescriptors();
std::istream& inputStream() const;
friend class ::TestFile;
friend class ::TestStream;
struct Impl;
std::unique_ptr<Impl> mImpl;
};
////////////////////////////////////////
inline void
File::write(const GridCPtrVec& grids, const MetaMap& meta) const
{
this->writeGrids(grids, meta);
}
template<typename GridPtrContainerT>
inline void
File::write(const GridPtrContainerT& container, const MetaMap& meta) const
{
GridCPtrVec grids;
std::copy(container.begin(), container.end(), std::back_inserter(grids));
this->writeGrids(grids, meta);
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_FILE_HAS_BEEN_INCLUDED
| 9,388 | C | 38.616034 | 98 | 0.699936 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/DelayedLoadMetadata.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "DelayedLoadMetadata.h"
#include <openvdb/points/StreamCompression.h>
#ifdef OPENVDB_USE_BLOSC
#include <blosc.h>
namespace {
inline size_t padMask(size_t bytes)
{
return size_t(std::ceil(static_cast<float>(bytes+1) /
sizeof(openvdb::io::DelayedLoadMetadata::MaskType)));
}
inline size_t padCompressedSize(size_t bytes)
{
return size_t(std::ceil(static_cast<float>(bytes+1) /
sizeof(openvdb::io::DelayedLoadMetadata::CompressedSizeType)));
}
} // namespace
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
DelayedLoadMetadata::DelayedLoadMetadata(const DelayedLoadMetadata& other)
: Metadata()
, mMask(other.mMask)
, mCompressedSize(other.mCompressedSize)
{
}
Name DelayedLoadMetadata::typeName() const
{
return DelayedLoadMetadata::staticTypeName();
}
Metadata::Ptr DelayedLoadMetadata::copy() const
{
Metadata::Ptr metadata(new DelayedLoadMetadata());
metadata->copy(*this);
return metadata;
}
void DelayedLoadMetadata::copy(const Metadata& other)
{
const DelayedLoadMetadata* t = dynamic_cast<const DelayedLoadMetadata*>(&other);
if (t == nullptr) OPENVDB_THROW(TypeError, "Incompatible type during copy");
mMask = t->mMask;
mCompressedSize = t->mCompressedSize;
}
std::string DelayedLoadMetadata::str() const
{
return "";
}
bool DelayedLoadMetadata::asBool() const
{
return false;
}
Index32 DelayedLoadMetadata::size() const
{
if (mMask.empty() && mCompressedSize.empty()) return Index32(0);
// count
size_t size = sizeof(Index32);
{ // mask
size += sizeof(Index32);
size_t compressedSize = compression::bloscCompressedSize(
reinterpret_cast<const char*>(mMask.data()), mMask.size()*sizeof(MaskType));
if (compressedSize > 0) size += compressedSize;
else size += mMask.size()*sizeof(MaskType);
}
{ // compressed size
size += sizeof(Index32);
if (!mCompressedSize.empty()) {
size_t compressedSize = compression::bloscCompressedSize(
reinterpret_cast<const char*>(mCompressedSize.data()), mCompressedSize.size()*sizeof(CompressedSizeType));
if (compressedSize > 0) size += compressedSize;
else size += mCompressedSize.size()*sizeof(CompressedSizeType);
}
}
return static_cast<Index32>(size);
}
void DelayedLoadMetadata::clear()
{
mMask.clear();
mCompressedSize.clear();
}
bool DelayedLoadMetadata::empty() const
{
return mMask.empty() && mCompressedSize.empty();
}
void DelayedLoadMetadata::resizeMask(size_t size)
{
mMask.resize(size);
}
void DelayedLoadMetadata::resizeCompressedSize(size_t size)
{
mCompressedSize.resize(size);
}
DelayedLoadMetadata::MaskType DelayedLoadMetadata::getMask(size_t index) const
{
assert(DelayedLoadMetadata::isRegisteredType());
assert(index < mMask.size());
return mMask[index];
}
void DelayedLoadMetadata::setMask(size_t index, const MaskType& value)
{
assert(index < mMask.size());
mMask[index] = value;
}
DelayedLoadMetadata::CompressedSizeType DelayedLoadMetadata::getCompressedSize(size_t index) const
{
assert(DelayedLoadMetadata::isRegisteredType());
assert(index < mCompressedSize.size());
return mCompressedSize[index];
}
void DelayedLoadMetadata::setCompressedSize(size_t index, const CompressedSizeType& value)
{
assert(index < mCompressedSize.size());
mCompressedSize[index] = value;
}
void DelayedLoadMetadata::readValue(std::istream& is, Index32 numBytes)
{
if (numBytes == 0) return;
// initial header size
size_t total = sizeof(Index32);
Index32 count = 0;
is.read(reinterpret_cast<char*>(&count), sizeof(Index32));
total += sizeof(Index32);
Index32 bytes = 0;
is.read(reinterpret_cast<char*>(&bytes), sizeof(Index32));
total += sizeof(Index32);
if (bytes > Index32(0)) {
std::unique_ptr<char[]> compressedBuffer(new char[bytes]);
is.read(reinterpret_cast<char*>(compressedBuffer.get()), bytes);
total += bytes;
#ifdef OPENVDB_USE_BLOSC
// pad to include BLOSC_MAX_OVERHEAD
size_t uncompressedBytes = openvdb::compression::bloscUncompressedSize(compressedBuffer.get());
const size_t paddedCount = padMask(uncompressedBytes + BLOSC_MAX_OVERHEAD);
mMask.reserve(paddedCount);
mMask.resize(count);
// resize should never modify capacity for smaller vector sizes
assert(mMask.capacity() >= paddedCount);
compression::bloscDecompress(reinterpret_cast<char*>(mMask.data()), count*sizeof(MaskType), mMask.capacity()*sizeof(MaskType), compressedBuffer.get());
#endif
} else {
mMask.resize(count);
is.read(reinterpret_cast<char*>(mMask.data()), count*sizeof(MaskType));
total += count*sizeof(MaskType);
}
is.read(reinterpret_cast<char*>(&bytes), sizeof(Index32));
if (bytes != std::numeric_limits<Index32>::max()) {
if (bytes > Index32(0)) {
std::unique_ptr<char[]> compressedBuffer(new char[bytes]);
is.read(reinterpret_cast<char*>(compressedBuffer.get()), bytes);
total += size_t(bytes);
#ifdef OPENVDB_USE_BLOSC
// pad to include BLOSC_MAX_OVERHEAD
size_t uncompressedBytes = openvdb::compression::bloscUncompressedSize(compressedBuffer.get());
const size_t paddedCount = padCompressedSize(uncompressedBytes + BLOSC_MAX_OVERHEAD);
mCompressedSize.reserve(paddedCount);
mCompressedSize.resize(count);
// resize should never modify capacity for smaller vector sizes
assert(mCompressedSize.capacity() >= paddedCount);
compression::bloscDecompress(reinterpret_cast<char*>(mCompressedSize.data()), count*sizeof(CompressedSizeType), mCompressedSize.capacity()*sizeof(CompressedSizeType), compressedBuffer.get());
#endif
} else {
mCompressedSize.resize(count);
is.read(reinterpret_cast<char*>(mCompressedSize.data()), count*sizeof(CompressedSizeType));
total += count*sizeof(CompressedSizeType);
}
}
Index32 totalBytes = static_cast<Index32>(total);
if (totalBytes < numBytes) {
// Read and discard any unknown bytes at the end of the metadata for forwards-compatibility
// (without seeking, because the stream might not be seekable).
const size_t BUFFER_SIZE = 1024;
std::vector<char> buffer(BUFFER_SIZE);
for (Index32 bytesRemaining = numBytes - totalBytes; bytesRemaining > 0; ) {
const Index32 bytesToSkip = std::min<Index32>(bytesRemaining, BUFFER_SIZE);
is.read(&buffer[0], bytesToSkip);
bytesRemaining -= bytesToSkip;
}
}
}
void DelayedLoadMetadata::writeValue(std::ostream& os) const
{
// metadata has a limit of 2^32 bytes
assert(mMask.size() < std::numeric_limits<Index32>::max());
assert(mCompressedSize.size() < std::numeric_limits<Index32>::max());
if (mMask.empty() && mCompressedSize.empty()) return;
assert(mCompressedSize.empty() || (mMask.size() == mCompressedSize.size()));
Index32 count = static_cast<Index32>(mMask.size());
os.write(reinterpret_cast<const char*>(&count), sizeof(Index32));
const Index32 zeroSize(0);
const Index32 maxSize(std::numeric_limits<Index32>::max());
{ // mask buffer
size_t compressedBytes(0);
std::unique_ptr<char[]> compressedBuffer;
if (compression::bloscCanCompress()) {
compressedBuffer = compression::bloscCompress(
reinterpret_cast<const char*>(mMask.data()),
mMask.size()*sizeof(MaskType), compressedBytes, /*resize=*/false);
}
if (compressedBuffer) {
assert(compressedBytes < std::numeric_limits<Index32>::max());
Index32 bytes(static_cast<Index32>(compressedBytes));
os.write(reinterpret_cast<const char*>(&bytes), sizeof(Index32));
os.write(reinterpret_cast<const char*>(compressedBuffer.get()), compressedBytes);
}
else {
os.write(reinterpret_cast<const char*>(&zeroSize), sizeof(Index32));
os.write(reinterpret_cast<const char*>(mMask.data()),
mMask.size()*sizeof(MaskType));
}
}
// compressed size buffer
if (mCompressedSize.empty()) {
// write out maximum Index32 value to denote no compressed sizes stored
os.write(reinterpret_cast<const char*>(&maxSize), sizeof(Index32));
} else {
size_t compressedBytes(0);
std::unique_ptr<char[]> compressedBuffer;
if (compression::bloscCanCompress()) {
compressedBuffer = compression::bloscCompress(
reinterpret_cast<const char*>(mCompressedSize.data()),
mCompressedSize.size()*sizeof(CompressedSizeType), compressedBytes, /*resize=*/false);
}
if (compressedBuffer) {
assert(compressedBytes < std::numeric_limits<Index32>::max());
Index32 bytes(static_cast<Index32>(compressedBytes));
os.write(reinterpret_cast<const char*>(&bytes), sizeof(Index32));
os.write(reinterpret_cast<const char*>(compressedBuffer.get()), compressedBytes);
}
else {
os.write(reinterpret_cast<const char*>(&zeroSize), sizeof(Index32));
os.write(reinterpret_cast<const char*>(mCompressedSize.data()),
mCompressedSize.size()*sizeof(CompressedSizeType));
}
}
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 9,838 | C++ | 31.796667 | 203 | 0.658366 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/File.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file io/File.cc
#include "File.h"
#include "TempFile.h"
#include <openvdb/Exceptions.h>
#include <openvdb/util/logging.h>
#include <cstdint>
#include <boost/iostreams/copy.hpp>
#ifndef _MSC_VER
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#endif
#include <cassert>
#include <cstdlib> // for getenv(), strtoul()
#include <cstring> // for strerror_r()
#include <fstream>
#include <iostream>
#include <limits>
#include <sstream>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
// Implementation details of the File class
struct File::Impl
{
enum { DEFAULT_COPY_MAX_BYTES = 500000000 }; // 500 MB
struct NoBBox {};
// Common implementation of the various File::readGrid() overloads,
// with and without bounding box clipping
template<typename BoxType>
static GridBase::Ptr readGrid(const File& file, const GridDescriptor& gd, const BoxType& bbox)
{
// This method should not be called for files that don't contain grid offsets.
assert(file.inputHasGridOffsets());
GridBase::Ptr grid = file.createGrid(gd);
gd.seekToGrid(file.inputStream());
unarchive(file, grid, gd, bbox);
return grid;
}
static void unarchive(const File& file, GridBase::Ptr& grid,
const GridDescriptor& gd, NoBBox)
{
file.Archive::readGrid(grid, gd, file.inputStream());
}
static void unarchive(const File& file, GridBase::Ptr& grid,
const GridDescriptor& gd, const CoordBBox& indexBBox)
{
file.Archive::readGrid(grid, gd, file.inputStream(), indexBBox);
}
static void unarchive(const File& file, GridBase::Ptr& grid,
const GridDescriptor& gd, const BBoxd& worldBBox)
{
file.Archive::readGrid(grid, gd, file.inputStream(), worldBBox);
}
static Index64 getDefaultCopyMaxBytes()
{
Index64 result = DEFAULT_COPY_MAX_BYTES;
if (const char* s = std::getenv("OPENVDB_DELAYED_LOAD_COPY_MAX_BYTES")) {
char* endptr = nullptr;
result = std::strtoul(s, &endptr, /*base=*/10);
}
return result;
}
std::string mFilename;
// The file-level metadata
MetaMap::Ptr mMeta;
// The memory-mapped file
MappedFile::Ptr mFileMapping;
// The buffer for the input stream, if it is a memory-mapped file
SharedPtr<std::streambuf> mStreamBuf;
// The file stream that is open for reading
std::unique_ptr<std::istream> mInStream;
// File-level stream metadata (file format, compression, etc.)
StreamMetadata::Ptr mStreamMetadata;
// Flag indicating if we have read in the global information (header,
// metadata, and grid descriptors) for this VDB file
bool mIsOpen;
// File size limit for copying during delayed loading
Index64 mCopyMaxBytes;
// Grid descriptors for all grids stored in the file, indexed by grid name
NameMap mGridDescriptors;
// All grids, indexed by unique name (used only when mHasGridOffsets is false)
Archive::NamedGridMap mNamedGrids;
// All grids stored in the file (used only when mHasGridOffsets is false)
GridPtrVecPtr mGrids;
}; // class File::Impl
////////////////////////////////////////
File::File(const std::string& filename): mImpl(new Impl)
{
mImpl->mFilename = filename;
mImpl->mIsOpen = false;
mImpl->mCopyMaxBytes = Impl::getDefaultCopyMaxBytes();
setInputHasGridOffsets(true);
}
File::~File()
{
}
File::File(const File& other)
: Archive(other)
, mImpl(new Impl)
{
*this = other;
}
File&
File::operator=(const File& other)
{
if (&other != this) {
Archive::operator=(other);
const Impl& otherImpl = *other.mImpl;
mImpl->mFilename = otherImpl.mFilename;
mImpl->mMeta = otherImpl.mMeta;
mImpl->mIsOpen = false; // don't want two file objects reading from the same stream
mImpl->mCopyMaxBytes = otherImpl.mCopyMaxBytes;
mImpl->mGridDescriptors = otherImpl.mGridDescriptors;
mImpl->mNamedGrids = otherImpl.mNamedGrids;
mImpl->mGrids = otherImpl.mGrids;
}
return *this;
}
SharedPtr<Archive>
File::copy() const
{
return SharedPtr<Archive>{new File{*this}};
}
////////////////////////////////////////
const std::string&
File::filename() const
{
return mImpl->mFilename;
}
MetaMap::Ptr
File::fileMetadata()
{
return mImpl->mMeta;
}
MetaMap::ConstPtr
File::fileMetadata() const
{
return mImpl->mMeta;
}
const File::NameMap&
File::gridDescriptors() const
{
return mImpl->mGridDescriptors;
}
File::NameMap&
File::gridDescriptors()
{
return mImpl->mGridDescriptors;
}
std::istream&
File::inputStream() const
{
if (!mImpl->mInStream) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
return *mImpl->mInStream;
}
////////////////////////////////////////
Index64
File::getSize() const
{
/// @internal boost::filesystem::file_size() would be a more portable alternative,
/// but as of 9/2014, Houdini ships without the Boost.Filesystem library,
/// which makes it much less convenient to use that library.
Index64 result = std::numeric_limits<Index64>::max();
std::string mesg = "could not get size of file " + filename();
#ifdef _MSC_VER
// Get the file size by seeking to the end of the file.
std::ifstream fstrm(filename());
if (fstrm) {
fstrm.seekg(0, fstrm.end);
result = static_cast<Index64>(fstrm.tellg());
} else {
OPENVDB_THROW(IoError, mesg);
}
#else
// Get the file size using the stat() system call.
struct stat info;
if (0 != ::stat(filename().c_str(), &info)) {
std::string s = getErrorString();
if (!s.empty()) mesg += " (" + s + ")";
OPENVDB_THROW(IoError, mesg);
}
if (!S_ISREG(info.st_mode)) {
mesg += " (not a regular file)";
OPENVDB_THROW(IoError, mesg);
}
result = static_cast<Index64>(info.st_size);
#endif
return result;
}
Index64
File::copyMaxBytes() const
{
return mImpl->mCopyMaxBytes;
}
void
File::setCopyMaxBytes(Index64 bytes)
{
mImpl->mCopyMaxBytes = bytes;
}
////////////////////////////////////////
bool
File::isOpen() const
{
return mImpl->mIsOpen;
}
bool
File::open(bool delayLoad, const MappedFile::Notifier& notifier)
{
if (isOpen()) {
OPENVDB_THROW(IoError, filename() << " is already open");
}
mImpl->mInStream.reset();
// Open the file.
std::unique_ptr<std::istream> newStream;
SharedPtr<std::streambuf> newStreamBuf;
MappedFile::Ptr newFileMapping;
if (!delayLoad || !Archive::isDelayedLoadingEnabled()) {
newStream.reset(new std::ifstream(
filename().c_str(), std::ios_base::in | std::ios_base::binary));
} else {
bool isTempFile = false;
std::string fname = filename();
if (getSize() < copyMaxBytes()) {
// If the file is not too large, make a temporary private copy of it
// and open the copy instead. The original file can then be modified
// or removed without affecting delayed load.
try {
TempFile tempFile;
std::ifstream fstrm(filename().c_str(),
std::ios_base::in | std::ios_base::binary);
boost::iostreams::copy(fstrm, tempFile);
fname = tempFile.filename();
isTempFile = true;
} catch (std::exception& e) {
std::string mesg;
if (e.what()) mesg = std::string(" (") + e.what() + ")";
OPENVDB_LOG_WARN("failed to create a temporary copy of " << filename()
<< " for delayed loading" << mesg
<< "; will read directly from " << filename() << " instead");
}
}
// While the file is open, its mapping, stream buffer and stream
// must all be maintained. Once the file is closed, the buffer and
// the stream can be discarded, but the mapping needs to persist
// if any grids were lazily loaded.
try {
newFileMapping.reset(new MappedFile(fname, /*autoDelete=*/isTempFile));
newStreamBuf = newFileMapping->createBuffer();
newStream.reset(new std::istream(newStreamBuf.get()));
} catch (std::exception& e) {
std::ostringstream ostr;
ostr << "could not open file " << filename();
if (e.what() != nullptr) ostr << " (" << e.what() << ")";
OPENVDB_THROW(IoError, ostr.str());
}
}
if (newStream->fail()) {
OPENVDB_THROW(IoError, "could not open file " << filename());
}
// Read in the file header.
bool newFile = false;
try {
newFile = Archive::readHeader(*newStream);
} catch (IoError& e) {
if (e.what() && std::string("not a VDB file") == e.what()) {
// Rethrow, adding the filename.
OPENVDB_THROW(IoError, filename() << " is not a VDB file");
}
throw;
}
mImpl->mFileMapping = newFileMapping;
if (mImpl->mFileMapping) mImpl->mFileMapping->setNotifier(notifier);
mImpl->mStreamBuf = newStreamBuf;
mImpl->mInStream.swap(newStream);
// Tag the input stream with the file format and library version numbers
// and other metadata.
mImpl->mStreamMetadata.reset(new StreamMetadata);
mImpl->mStreamMetadata->setSeekable(true);
io::setStreamMetadataPtr(inputStream(), mImpl->mStreamMetadata, /*transfer=*/false);
Archive::setFormatVersion(inputStream());
Archive::setLibraryVersion(inputStream());
Archive::setDataCompression(inputStream());
io::setMappedFilePtr(inputStream(), mImpl->mFileMapping);
// Read in the VDB metadata.
mImpl->mMeta = MetaMap::Ptr(new MetaMap);
mImpl->mMeta->readMeta(inputStream());
if (!inputHasGridOffsets()) {
OPENVDB_LOG_DEBUG_RUNTIME("file " << filename() << " does not support partial reading");
mImpl->mGrids.reset(new GridPtrVec);
mImpl->mNamedGrids.clear();
// Stream in the entire contents of the file and append all grids to mGrids.
const int32_t gridCount = readGridCount(inputStream());
for (int32_t i = 0; i < gridCount; ++i) {
GridDescriptor gd;
gd.read(inputStream());
GridBase::Ptr grid = createGrid(gd);
Archive::readGrid(grid, gd, inputStream());
gridDescriptors().insert(std::make_pair(gd.gridName(), gd));
mImpl->mGrids->push_back(grid);
mImpl->mNamedGrids[gd.uniqueName()] = grid;
}
// Connect instances (grids that share trees with other grids).
for (NameMapCIter it = gridDescriptors().begin(); it != gridDescriptors().end(); ++it) {
Archive::connectInstance(it->second, mImpl->mNamedGrids);
}
} else {
// Read in just the grid descriptors.
readGridDescriptors(inputStream());
}
mImpl->mIsOpen = true;
return newFile; // true if file is not identical to opened file
}
void
File::close()
{
// Reset all data.
mImpl->mMeta.reset();
mImpl->mGridDescriptors.clear();
mImpl->mGrids.reset();
mImpl->mNamedGrids.clear();
mImpl->mInStream.reset();
mImpl->mStreamBuf.reset();
mImpl->mStreamMetadata.reset();
mImpl->mFileMapping.reset();
mImpl->mIsOpen = false;
setInputHasGridOffsets(true);
}
////////////////////////////////////////
bool
File::hasGrid(const Name& name) const
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
return (findDescriptor(name) != gridDescriptors().end());
}
MetaMap::Ptr
File::getMetadata() const
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
// Return a deep copy of the file-level metadata, which was read
// when the file was opened.
return MetaMap::Ptr(new MetaMap(*mImpl->mMeta));
}
GridPtrVecPtr
File::getGrids() const
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
GridPtrVecPtr ret;
if (!inputHasGridOffsets()) {
// If the input file doesn't have grid offsets, then all of the grids
// have already been streamed in and stored in mGrids.
ret = mImpl->mGrids;
} else {
ret.reset(new GridPtrVec);
Archive::NamedGridMap namedGrids;
// Read all grids represented by the GridDescriptors.
for (NameMapCIter i = gridDescriptors().begin(), e = gridDescriptors().end(); i != e; ++i) {
const GridDescriptor& gd = i->second;
GridBase::Ptr grid = readGrid(gd);
ret->push_back(grid);
namedGrids[gd.uniqueName()] = grid;
}
// Connect instances (grids that share trees with other grids).
for (NameMapCIter i = gridDescriptors().begin(), e = gridDescriptors().end(); i != e; ++i) {
Archive::connectInstance(i->second, namedGrids);
}
}
return ret;
}
GridBase::Ptr
File::retrieveCachedGrid(const Name& name) const
{
// If the file has grid offsets, grids are read on demand
// and not cached in mNamedGrids.
if (inputHasGridOffsets()) return GridBase::Ptr();
// If the file does not have grid offsets, mNamedGrids should already
// contain the entire contents of the file.
// Search by unique name.
Archive::NamedGridMap::const_iterator it =
mImpl->mNamedGrids.find(GridDescriptor::stringAsUniqueName(name));
// If not found, search by grid name.
if (it == mImpl->mNamedGrids.end()) it = mImpl->mNamedGrids.find(name);
if (it == mImpl->mNamedGrids.end()) {
OPENVDB_THROW(KeyError, filename() << " has no grid named \"" << name << "\"");
}
return it->second;
}
////////////////////////////////////////
GridPtrVecPtr
File::readAllGridMetadata()
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
GridPtrVecPtr ret(new GridPtrVec);
if (!inputHasGridOffsets()) {
// If the input file doesn't have grid offsets, then all of the grids
// have already been streamed in and stored in mGrids.
for (size_t i = 0, N = mImpl->mGrids->size(); i < N; ++i) {
// Return copies of the grids, but with empty trees.
ret->push_back((*mImpl->mGrids)[i]->copyGridWithNewTree());
}
} else {
// Read just the metadata and transforms for all grids.
for (NameMapCIter i = gridDescriptors().begin(), e = gridDescriptors().end(); i != e; ++i) {
const GridDescriptor& gd = i->second;
GridBase::ConstPtr grid = readGridPartial(gd, /*readTopology=*/false);
// Return copies of the grids, but with empty trees.
// (As of 0.98.0, at least, it would suffice to just const cast
// the grid pointers returned by readGridPartial(), but shallow
// copying the grids helps to ensure future compatibility.)
ret->push_back(grid->copyGridWithNewTree());
}
}
return ret;
}
GridBase::Ptr
File::readGridMetadata(const Name& name)
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading.");
}
GridBase::ConstPtr ret;
if (!inputHasGridOffsets()) {
// Retrieve the grid from mGrids, which should already contain
// the entire contents of the file.
ret = readGrid(name);
} else {
NameMapCIter it = findDescriptor(name);
if (it == gridDescriptors().end()) {
OPENVDB_THROW(KeyError, filename() << " has no grid named \"" << name << "\"");
}
// Seek to and read in the grid from the file.
const GridDescriptor& gd = it->second;
ret = readGridPartial(gd, /*readTopology=*/false);
}
return ret->copyGridWithNewTree();
}
////////////////////////////////////////
GridBase::Ptr
File::readGrid(const Name& name)
{
return readGridByName(name, BBoxd());
}
GridBase::Ptr
File::readGrid(const Name& name, const BBoxd& bbox)
{
return readGridByName(name, bbox);
}
GridBase::Ptr
File::readGridByName(const Name& name, const BBoxd& bbox)
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading.");
}
const bool clip = bbox.isSorted();
// If a grid with the given name was already read and cached
// (along with the entire contents of the file, because the file
// doesn't support random access), retrieve and return it.
GridBase::Ptr grid = retrieveCachedGrid(name);
if (grid) {
if (clip) {
grid = grid->deepCopyGrid();
grid->clipGrid(bbox);
}
return grid;
}
NameMapCIter it = findDescriptor(name);
if (it == gridDescriptors().end()) {
OPENVDB_THROW(KeyError, filename() << " has no grid named \"" << name << "\"");
}
// Seek to and read in the grid from the file.
const GridDescriptor& gd = it->second;
grid = (clip ? readGrid(gd, bbox) : readGrid(gd));
if (gd.isInstance()) {
/// @todo Refactor to share code with Archive::connectInstance()?
NameMapCIter parentIt =
findDescriptor(GridDescriptor::nameAsString(gd.instanceParentName()));
if (parentIt == gridDescriptors().end()) {
OPENVDB_THROW(KeyError, "missing instance parent \""
<< GridDescriptor::nameAsString(gd.instanceParentName())
<< "\" for grid " << GridDescriptor::nameAsString(gd.uniqueName())
<< " in file " << filename());
}
GridBase::Ptr parent;
if (clip) {
const CoordBBox indexBBox = grid->constTransform().worldToIndexNodeCentered(bbox);
parent = readGrid(parentIt->second, indexBBox);
} else {
parent = readGrid(parentIt->second);
}
if (parent) grid->setTree(parent->baseTreePtr());
}
return grid;
}
////////////////////////////////////////
void
File::writeGrids(const GridCPtrVec& grids, const MetaMap& meta) const
{
if (isOpen()) {
OPENVDB_THROW(IoError,
filename() << " cannot be written because it is open for reading");
}
// Create a file stream and write it out.
std::ofstream file;
file.open(filename().c_str(),
std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
if (file.fail()) {
OPENVDB_THROW(IoError, "could not open " << filename() << " for writing");
}
// Write out the vdb.
Archive::write(file, grids, /*seekable=*/true, meta);
file.close();
}
////////////////////////////////////////
void
File::readGridDescriptors(std::istream& is)
{
// This method should not be called for files that don't contain grid offsets.
assert(inputHasGridOffsets());
gridDescriptors().clear();
for (int32_t i = 0, N = readGridCount(is); i < N; ++i) {
// Read the grid descriptor.
GridDescriptor gd;
gd.read(is);
// Add the descriptor to the dictionary.
gridDescriptors().insert(std::make_pair(gd.gridName(), gd));
// Skip forward to the next descriptor.
gd.seekToEnd(is);
}
}
////////////////////////////////////////
File::NameMapCIter
File::findDescriptor(const Name& name) const
{
const Name uniqueName = GridDescriptor::stringAsUniqueName(name);
// Find all descriptors with the given grid name.
std::pair<NameMapCIter, NameMapCIter> range = gridDescriptors().equal_range(name);
if (range.first == range.second) {
// If no descriptors were found with the given grid name, the name might have
// a suffix ("name[N]"). In that case, remove the "[N]" suffix and search again.
range = gridDescriptors().equal_range(GridDescriptor::stripSuffix(uniqueName));
}
const size_t count = size_t(std::distance(range.first, range.second));
if (count > 1 && name == uniqueName) {
OPENVDB_LOG_WARN(filename() << " has more than one grid named \"" << name << "\"");
}
NameMapCIter ret = gridDescriptors().end();
if (count > 0) {
if (name == uniqueName) {
// If the given grid name is unique or if no "[N]" index was given,
// use the first matching descriptor.
ret = range.first;
} else {
// If the given grid name has a "[N]" index, find the descriptor
// with a matching unique name.
for (NameMapCIter it = range.first; it != range.second; ++it) {
const Name candidateName = it->second.uniqueName();
if (candidateName == uniqueName || candidateName == name) {
ret = it;
break;
}
}
}
}
return ret;
}
////////////////////////////////////////
GridBase::Ptr
File::createGrid(const GridDescriptor& gd) const
{
// Create the grid.
if (!GridBase::isRegistered(gd.gridType())) {
OPENVDB_THROW(KeyError, "Cannot read grid "
<< GridDescriptor::nameAsString(gd.uniqueName())
<< " from " << filename() << ": grid type "
<< gd.gridType() << " is not registered");
}
GridBase::Ptr grid = GridBase::createGrid(gd.gridType());
if (grid) grid->setSaveFloatAsHalf(gd.saveFloatAsHalf());
return grid;
}
GridBase::ConstPtr
File::readGridPartial(const GridDescriptor& gd, bool readTopology) const
{
// This method should not be called for files that don't contain grid offsets.
assert(inputHasGridOffsets());
GridBase::Ptr grid = createGrid(gd);
// Seek to grid.
gd.seekToGrid(inputStream());
// Read the grid partially.
readGridPartial(grid, inputStream(), gd.isInstance(), readTopology);
// Promote to a const grid.
GridBase::ConstPtr constGrid = grid;
return constGrid;
}
GridBase::Ptr
File::readGrid(const GridDescriptor& gd) const
{
return Impl::readGrid(*this, gd, Impl::NoBBox());
}
GridBase::Ptr
File::readGrid(const GridDescriptor& gd, const BBoxd& bbox) const
{
return Impl::readGrid(*this, gd, bbox);
}
GridBase::Ptr
File::readGrid(const GridDescriptor& gd, const CoordBBox& bbox) const
{
return Impl::readGrid(*this, gd, bbox);
}
void
File::readGridPartial(GridBase::Ptr grid, std::istream& is,
bool isInstance, bool readTopology) const
{
// This method should not be called for files that don't contain grid offsets.
assert(inputHasGridOffsets());
// This code needs to stay in sync with io::Archive::readGrid(), in terms of
// the order of operations.
readGridCompression(is);
grid->readMeta(is);
// drop DelayedLoadMetadata from the grid as it is only useful for IO
if ((*grid)[GridBase::META_FILE_DELAYED_LOAD]) {
grid->removeMeta(GridBase::META_FILE_DELAYED_LOAD);
}
if (getFormatVersion(is) >= OPENVDB_FILE_VERSION_GRID_INSTANCING) {
grid->readTransform(is);
if (!isInstance && readTopology) {
grid->readTopology(is);
}
} else {
if (readTopology) {
grid->readTopology(is);
grid->readTransform(is);
}
}
}
////////////////////////////////////////
File::NameIterator
File::beginName() const
{
if (!isOpen()) {
OPENVDB_THROW(IoError, filename() << " is not open for reading");
}
return File::NameIterator(gridDescriptors().begin());
}
File::NameIterator
File::endName() const
{
return File::NameIterator(gridDescriptors().end());
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 23,893 | C++ | 27.64988 | 100 | 0.604026 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/io.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_IO_IO_HAS_BEEN_INCLUDED
#define OPENVDB_IO_IO_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h>
#include <openvdb/Types.h> // for SharedPtr
#include <openvdb/version.h>
#include <boost/any.hpp>
#include <functional>
#include <iosfwd> // for std::ios_base
#include <map>
#include <memory>
#include <string>
class TestMappedFile;
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
class MetaMap;
namespace io {
/// @brief Container for metadata describing how to unserialize grids from and/or
/// serialize grids to a stream (which file format, compression scheme, etc. to use)
/// @details This class is mainly for internal use.
class OPENVDB_API StreamMetadata
{
public:
using Ptr = SharedPtr<StreamMetadata>;
using ConstPtr = SharedPtr<const StreamMetadata>;
StreamMetadata();
StreamMetadata(const StreamMetadata&);
explicit StreamMetadata(std::ios_base&);
~StreamMetadata();
StreamMetadata& operator=(const StreamMetadata&);
/// @brief Transfer metadata items directly to the given stream.
/// @todo Deprecate direct transfer; use StreamMetadata structs everywhere.
void transferTo(std::ios_base&) const;
uint32_t fileVersion() const;
void setFileVersion(uint32_t);
VersionId libraryVersion() const;
void setLibraryVersion(VersionId);
uint32_t compression() const;
void setCompression(uint32_t);
uint32_t gridClass() const;
void setGridClass(uint32_t);
const void* backgroundPtr() const;
void setBackgroundPtr(const void*);
bool halfFloat() const;
void setHalfFloat(bool);
bool writeGridStats() const;
void setWriteGridStats(bool);
bool seekable() const;
void setSeekable(bool);
bool delayedLoadMeta() const;
bool countingPasses() const;
void setCountingPasses(bool);
uint32_t pass() const;
void setPass(uint32_t);
uint64_t leaf() const;
void setLeaf(uint64_t);
//@{
/// @brief Return a (reference to a) copy of the metadata of the grid
/// currently being read or written.
/// @details Some grid metadata might duplicate information returned by
/// gridClass(), backgroundPtr() and other accessors, but those values
/// are not guaranteed to be kept in sync.
MetaMap& gridMetadata();
const MetaMap& gridMetadata() const;
//@}
using AuxDataMap = std::map<std::string, boost::any>;
//@{
/// @brief Return a map that can be populated with arbitrary user data.
AuxDataMap& auxData();
const AuxDataMap& auxData() const;
//@}
/// @private
uint32_t __test() const;
/// @private
void __setTest(uint32_t);
/// Return a string describing this stream metadata.
std::string str() const;
private:
struct Impl;
std::unique_ptr<Impl> mImpl;
}; // class StreamMetadata
/// Write a description of the given metadata to an output stream.
std::ostream& operator<<(std::ostream&, const StreamMetadata&);
std::ostream& operator<<(std::ostream&, const StreamMetadata::AuxDataMap&);
////////////////////////////////////////
/// @brief Leaf nodes that require multi-pass I/O must inherit from this struct.
/// @sa Grid::hasMultiPassIO()
struct MultiPass {};
////////////////////////////////////////
class File;
/// @brief Handle to control the lifetime of a memory-mapped .vdb file
class OPENVDB_API MappedFile
{
public:
using Ptr = SharedPtr<MappedFile>;
~MappedFile();
MappedFile(const MappedFile&) = delete; // not copyable
MappedFile& operator=(const MappedFile&) = delete;
/// Return the filename of the mapped file.
std::string filename() const;
/// @brief Return a new stream buffer for the mapped file.
/// @details Typical usage is
/// @code
/// openvdb::io::MappedFile::Ptr mappedFile = ...;
/// auto buf = mappedFile->createBuffer();
/// std::istream istrm{buf.get()};
/// // Read from istrm...
/// @endcode
/// The buffer must persist as long as the stream is open.
SharedPtr<std::streambuf> createBuffer() const;
using Notifier = std::function<void(std::string /*filename*/)>;
/// @brief Register a function that will be called with this file's name
/// when the file is unmapped.
void setNotifier(const Notifier&);
/// Deregister the notifier.
void clearNotifier();
private:
friend class File;
friend class ::TestMappedFile;
explicit MappedFile(const std::string& filename, bool autoDelete = false);
class Impl;
std::unique_ptr<Impl> mImpl;
}; // class MappedFile
////////////////////////////////////////
/// Return a string (possibly empty) describing the given system error code.
std::string getErrorString(int errorNum);
/// Return a string (possibly empty) describing the most recent system error.
std::string getErrorString();
////////////////////////////////////////
/// @brief Return the file format version number associated with the given input stream.
/// @sa File::setFormatVersion()
OPENVDB_API uint32_t getFormatVersion(std::ios_base&);
/// @brief Return the (major, minor) library version number associated with the given input stream.
/// @sa File::setLibraryVersion()
OPENVDB_API VersionId getLibraryVersion(std::ios_base&);
/// @brief Return a string of the form "<major>.<minor>/<format>", giving the library
/// and file format version numbers associated with the given input stream.
OPENVDB_API std::string getVersion(std::ios_base&);
/// Associate the current file format and library version numbers with the given input stream.
OPENVDB_API void setCurrentVersion(std::istream&);
/// @brief Associate specific file format and library version numbers with the given stream.
/// @details This is typically called immediately after reading a header that contains
/// the version numbers. Data read subsequently can then be interpreted appropriately.
OPENVDB_API void setVersion(std::ios_base&, const VersionId& libraryVersion, uint32_t fileVersion);
/// @brief Return a bitwise OR of compression option flags (COMPRESS_ZIP,
/// COMPRESS_ACTIVE_MASK, etc.) specifying whether and how input data is compressed
/// or output data should be compressed.
OPENVDB_API uint32_t getDataCompression(std::ios_base&);
/// @brief Associate with the given stream a bitwise OR of compression option flags
/// (COMPRESS_ZIP, COMPRESS_ACTIVE_MASK, etc.) specifying whether and how input data
/// is compressed or output data should be compressed.
OPENVDB_API void setDataCompression(std::ios_base&, uint32_t compressionFlags);
/// @brief Return the class (GRID_LEVEL_SET, GRID_UNKNOWN, etc.) of the grid
/// currently being read from or written to the given stream.
OPENVDB_API uint32_t getGridClass(std::ios_base&);
/// @brief Associate with the given stream the class (GRID_LEVEL_SET, GRID_UNKNOWN, etc.)
/// of the grid currently being read or written.
OPENVDB_API void setGridClass(std::ios_base&, uint32_t);
/// @brief Return true if floating-point values should be quantized to 16 bits when writing
/// to the given stream or promoted back from 16-bit to full precision when reading from it.
OPENVDB_API bool getHalfFloat(std::ios_base&);
/// @brief Specify whether floating-point values should be quantized to 16 bits when writing
/// to the given stream or promoted back from 16-bit to full precision when reading from it.
OPENVDB_API void setHalfFloat(std::ios_base&, bool);
/// @brief Return a pointer to the background value of the grid
/// currently being read from or written to the given stream.
OPENVDB_API const void* getGridBackgroundValuePtr(std::ios_base&);
/// @brief Specify (a pointer to) the background value of the grid
/// currently being read from or written to the given stream.
/// @note The pointer must remain valid until the entire grid has been read or written.
OPENVDB_API void setGridBackgroundValuePtr(std::ios_base&, const void* background);
/// @brief Return @c true if grid statistics (active voxel count and bounding box, etc.)
/// should be computed and stored as grid metadata when writing to the given stream.
OPENVDB_API bool getWriteGridStatsMetadata(std::ios_base&);
/// @brief Specify whether to compute grid statistics (active voxel count and bounding box, etc.)
/// and store them as grid metadata when writing to the given stream.
OPENVDB_API void setWriteGridStatsMetadata(std::ios_base&, bool writeGridStats);
/// @brief Return a shared pointer to the memory-mapped file with which the given stream
/// is associated, or a null pointer if the stream is not associated with a memory-mapped file.
OPENVDB_API SharedPtr<MappedFile> getMappedFilePtr(std::ios_base&);
/// @brief Associate the given stream with (a shared pointer to) a memory-mapped file.
/// @note The shared pointer object (not just the io::MappedFile object to which it points)
/// must remain valid until the file is closed.
OPENVDB_API void setMappedFilePtr(std::ios_base&, SharedPtr<MappedFile>&);
/// @brief Return a shared pointer to an object that stores metadata (file format,
/// compression scheme, etc.) for use when reading from or writing to the given stream.
OPENVDB_API SharedPtr<StreamMetadata> getStreamMetadataPtr(std::ios_base&);
/// @brief Associate the given stream with (a shared pointer to) an object that stores
/// metadata (file format, compression scheme, etc.) for use when reading from
/// or writing to the stream.
/// @details If @a transfer is true, copy metadata from the object directly to the stream
/// (for backward compatibility with older versions of the library).
/// @note The shared pointer object (not just the io::StreamMetadata object to which it points)
/// must remain valid until the file is closed.
OPENVDB_API void setStreamMetadataPtr(std::ios_base&,
SharedPtr<StreamMetadata>&, bool transfer = true);
/// @brief Dissociate the given stream from its metadata object (if it has one)
/// and return a shared pointer to the object.
OPENVDB_API SharedPtr<StreamMetadata> clearStreamMetadataPtr(std::ios_base&);
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_IO_HAS_BEEN_INCLUDED
| 10,222 | C | 36.174545 | 99 | 0.716983 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Stream.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Stream.h"
#include "File.h" ///< @todo refactor
#include "GridDescriptor.h"
#include "TempFile.h"
#include <openvdb/Exceptions.h>
#include <cstdint>
#include <boost/iostreams/copy.hpp>
#include <cstdio> // for remove()
#include <functional> // for std::bind()
#include <iostream>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
struct Stream::Impl
{
Impl(): mOutputStream{nullptr} {}
Impl(const Impl& other) { *this = other; }
Impl& operator=(const Impl& other)
{
if (&other != this) {
mMeta = other.mMeta; ///< @todo deep copy?
mGrids = other.mGrids; ///< @todo deep copy?
mOutputStream = other.mOutputStream;
mFile.reset();
}
return *this;
}
MetaMap::Ptr mMeta;
GridPtrVecPtr mGrids;
std::ostream* mOutputStream;
std::unique_ptr<File> mFile;
};
////////////////////////////////////////
namespace {
/// @todo Use MappedFile auto-deletion instead.
void
removeTempFile(const std::string expectedFilename, const std::string& filename)
{
if (filename == expectedFilename) {
if (0 != std::remove(filename.c_str())) {
std::string mesg = getErrorString();
if (!mesg.empty()) mesg = " (" + mesg + ")";
OPENVDB_LOG_WARN("failed to remove temporary file " << filename << mesg);
}
}
}
}
Stream::Stream(std::istream& is, bool delayLoad): mImpl(new Impl)
{
if (!is) return;
if (delayLoad && Archive::isDelayedLoadingEnabled()) {
// Copy the contents of the stream to a temporary private file
// and open the file instead.
std::unique_ptr<TempFile> tempFile;
try {
tempFile.reset(new TempFile);
} catch (std::exception& e) {
std::string mesg;
if (e.what()) mesg = std::string(" (") + e.what() + ")";
OPENVDB_LOG_WARN("failed to create a temporary file for delayed loading" << mesg
<< "; will read directly from the input stream instead");
}
if (tempFile) {
boost::iostreams::copy(is, *tempFile);
const std::string& filename = tempFile->filename();
mImpl->mFile.reset(new File(filename));
mImpl->mFile->setCopyMaxBytes(0); // don't make a copy of the temporary file
/// @todo Need to pass auto-deletion flag to MappedFile.
mImpl->mFile->open(delayLoad,
std::bind(&removeTempFile, filename, std::placeholders::_1));
}
}
if (!mImpl->mFile) {
readHeader(is);
// Tag the input stream with the library and file format version numbers
// and the compression options specified in the header.
StreamMetadata::Ptr streamMetadata(new StreamMetadata);
io::setStreamMetadataPtr(is, streamMetadata, /*transfer=*/false);
io::setVersion(is, libraryVersion(), fileVersion());
io::setDataCompression(is, compression());
// Read in the VDB metadata.
mImpl->mMeta.reset(new MetaMap);
mImpl->mMeta->readMeta(is);
// Read in the number of grids.
const int32_t gridCount = readGridCount(is);
// Read in all grids and insert them into mGrids.
mImpl->mGrids.reset(new GridPtrVec);
std::vector<GridDescriptor> descriptors;
descriptors.reserve(gridCount);
Archive::NamedGridMap namedGrids;
for (int32_t i = 0; i < gridCount; ++i) {
GridDescriptor gd;
gd.read(is);
descriptors.push_back(gd);
GridBase::Ptr grid = readGrid(gd, is);
mImpl->mGrids->push_back(grid);
namedGrids[gd.uniqueName()] = grid;
}
// Connect instances (grids that share trees with other grids).
for (size_t i = 0, N = descriptors.size(); i < N; ++i) {
Archive::connectInstance(descriptors[i], namedGrids);
}
}
}
Stream::Stream(): mImpl(new Impl)
{
}
Stream::Stream(std::ostream& os): mImpl(new Impl)
{
mImpl->mOutputStream = &os;
}
Stream::~Stream()
{
}
Stream::Stream(const Stream& other): Archive(other), mImpl(new Impl(*other.mImpl))
{
}
Stream&
Stream::operator=(const Stream& other)
{
if (&other != this) {
mImpl.reset(new Impl(*other.mImpl));
}
return *this;
}
SharedPtr<Archive>
Stream::copy() const
{
return SharedPtr<Archive>(new Stream(*this));
}
////////////////////////////////////////
GridBase::Ptr
Stream::readGrid(const GridDescriptor& gd, std::istream& is) const
{
GridBase::Ptr grid;
if (!GridBase::isRegistered(gd.gridType())) {
OPENVDB_THROW(TypeError, "can't read grid \""
<< GridDescriptor::nameAsString(gd.uniqueName()) <<
"\" from input stream because grid type " << gd.gridType() << " is unknown");
} else {
grid = GridBase::createGrid(gd.gridType());
if (grid) grid->setSaveFloatAsHalf(gd.saveFloatAsHalf());
Archive::readGrid(grid, gd, is);
}
return grid;
}
void
Stream::write(const GridCPtrVec& grids, const MetaMap& metadata) const
{
if (mImpl->mOutputStream == nullptr) {
OPENVDB_THROW(ValueError, "no output stream was specified");
}
this->writeGrids(*mImpl->mOutputStream, grids, metadata);
}
void
Stream::writeGrids(std::ostream& os, const GridCPtrVec& grids, const MetaMap& metadata) const
{
Archive::write(os, grids, /*seekable=*/false, metadata);
}
////////////////////////////////////////
MetaMap::Ptr
Stream::getMetadata() const
{
MetaMap::Ptr result;
if (mImpl->mFile) {
result = mImpl->mFile->getMetadata();
} else if (mImpl->mMeta) {
// Return a deep copy of the file-level metadata
// that was read when this object was constructed.
result.reset(new MetaMap(*mImpl->mMeta));
}
return result;
}
GridPtrVecPtr
Stream::getGrids()
{
if (mImpl->mFile) {
return mImpl->mFile->getGrids();
}
return mImpl->mGrids;
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 6,232 | C++ | 25.079498 | 93 | 0.596919 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/TempFile.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file TempFile.h
#ifndef OPENVDB_IO_TEMPFILE_HAS_BEEN_INCLUDED
#define OPENVDB_IO_TEMPFILE_HAS_BEEN_INCLUDED
#include <openvdb/version.h>
#include <memory>
#include <ostream>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
/// Output stream to a unique temporary file
class OPENVDB_API TempFile: public std::ostream
{
public:
/// @brief Create and open a unique file.
/// @details On UNIX systems, the file is created in the directory specified by
/// the environment variable @c OPENVDB_TEMP_DIR, if that variable is defined,
/// or else in the directory specified by @c TMPDIR, if that variable is defined.
/// Otherwise (and on non-UNIX systems), the file is created in the system default
/// temporary directory.
TempFile();
~TempFile();
/// Return the path to the temporary file.
const std::string& filename() const;
/// Return @c true if the file is open for writing.
bool is_open() const;
/// Close the file.
void close();
private:
struct TempFileImpl;
std::unique_ptr<TempFileImpl> mImpl;
};
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_TEMPFILE_HAS_BEEN_INCLUDED
| 1,340 | C | 25.294117 | 86 | 0.705224 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Queue.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file Queue.cc
/// @author Peter Cucka
#include "Queue.h"
#include "File.h"
#include "Stream.h"
#include <openvdb/Exceptions.h>
#include <openvdb/util/logging.h>
#include <tbb/atomic.h>
#include <tbb/concurrent_hash_map.h>
#include <tbb/mutex.h>
#include <tbb/task.h>
#include <tbb/tbb_thread.h> // for tbb::this_tbb_thread::sleep()
#include <tbb/tick_count.h>
#include <algorithm> // for std::max()
#include <iostream>
#include <map>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
namespace {
using Mutex = tbb::mutex;
using Lock = Mutex::scoped_lock;
// Abstract base class for queuable TBB tasks that adds a task completion callback
class Task: public tbb::task
{
public:
Task(Queue::Id id): mId(id) {}
~Task() override {}
Queue::Id id() const { return mId; }
void setNotifier(Queue::Notifier& notifier) { mNotify = notifier; }
protected:
void notify(Queue::Status status) { if (mNotify) mNotify(this->id(), status); }
private:
Queue::Id mId;
Queue::Notifier mNotify;
};
// Queuable TBB task that writes one or more grids to a .vdb file or an output stream
class OutputTask: public Task
{
public:
OutputTask(Queue::Id id, const GridCPtrVec& grids, const Archive& archive,
const MetaMap& metadata)
: Task(id)
, mGrids(grids)
, mArchive(archive.copy())
, mMetadata(metadata)
{}
tbb::task* execute() override
{
Queue::Status status = Queue::FAILED;
try {
mArchive->write(mGrids, mMetadata);
status = Queue::SUCCEEDED;
} catch (std::exception& e) {
if (const char* msg = e.what()) {
OPENVDB_LOG_ERROR(msg);
}
} catch (...) {
}
this->notify(status);
return nullptr; // no successor to this task
}
private:
GridCPtrVec mGrids;
SharedPtr<Archive> mArchive;
MetaMap mMetadata;
};
} // unnamed namespace
////////////////////////////////////////
// Private implementation details of a Queue
struct Queue::Impl
{
using NotifierMap = std::map<Queue::Id, Queue::Notifier>;
/// @todo Provide more information than just "succeeded" or "failed"?
using StatusMap = tbb::concurrent_hash_map<Queue::Id, Queue::Status>;
Impl()
: mTimeout(Queue::DEFAULT_TIMEOUT)
, mCapacity(Queue::DEFAULT_CAPACITY)
, mNextId(1)
, mNextNotifierId(1)
{
mNumTasks = 0; // note: must explicitly zero-initialize atomics
}
~Impl() {}
// Disallow copying of instances of this class.
Impl(const Impl&);
Impl& operator=(const Impl&);
// This method might be called from multiple threads.
void setStatus(Queue::Id id, Queue::Status status)
{
StatusMap::accessor acc;
mStatus.insert(acc, id);
acc->second = status;
}
// This method might be called from multiple threads.
void setStatusWithNotification(Queue::Id id, Queue::Status status)
{
const bool completed = (status == SUCCEEDED || status == FAILED);
// Update the task's entry in the status map with the new status.
this->setStatus(id, status);
// If the client registered any callbacks, call them now.
bool didNotify = false;
{
// tbb::concurrent_hash_map does not support concurrent iteration
// (i.e., iteration concurrent with insertion or deletion),
// so we use a mutex-protected STL map instead. But if a callback
// invokes a notifier method such as removeNotifier() on this queue,
// the result will be a deadlock.
/// @todo Is it worth trying to avoid such deadlocks?
Lock lock(mNotifierMutex);
if (!mNotifiers.empty()) {
didNotify = true;
for (NotifierMap::const_iterator it = mNotifiers.begin();
it != mNotifiers.end(); ++it)
{
it->second(id, status);
}
}
}
// If the task completed and callbacks were called, remove
// the task's entry from the status map.
if (completed) {
if (didNotify) {
StatusMap::accessor acc;
if (mStatus.find(acc, id)) {
mStatus.erase(acc);
}
}
--mNumTasks;
}
}
bool canEnqueue() const { return mNumTasks < Int64(mCapacity); }
void enqueue(Task& task)
{
tbb::tick_count start = tbb::tick_count::now();
while (!canEnqueue()) {
tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(0.5/*sec*/));
if ((tbb::tick_count::now() - start).seconds() > double(mTimeout)) {
OPENVDB_THROW(RuntimeError,
"unable to queue I/O task; " << mTimeout << "-second time limit expired");
}
}
Queue::Notifier notify = std::bind(&Impl::setStatusWithNotification, this,
std::placeholders::_1, std::placeholders::_2);
task.setNotifier(notify);
this->setStatus(task.id(), Queue::PENDING);
tbb::task::enqueue(task);
++mNumTasks;
}
Index32 mTimeout;
Index32 mCapacity;
tbb::atomic<Int32> mNumTasks;
Index32 mNextId;
StatusMap mStatus;
NotifierMap mNotifiers;
Index32 mNextNotifierId;
Mutex mNotifierMutex;
};
////////////////////////////////////////
Queue::Queue(Index32 capacity): mImpl(new Impl)
{
mImpl->mCapacity = capacity;
}
Queue::~Queue()
{
// Wait for all queued tasks to complete (successfully or unsuccessfully).
/// @todo Allow the queue to be destroyed while there are uncompleted tasks
/// (e.g., by keeping a static registry of queues that also dispatches
/// or blocks notifications)?
while (mImpl->mNumTasks > 0) {
tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(0.5/*sec*/));
}
}
////////////////////////////////////////
bool Queue::empty() const { return (mImpl->mNumTasks == 0); }
Index32 Queue::size() const { return Index32(std::max<Int32>(0, mImpl->mNumTasks)); }
Index32 Queue::capacity() const { return mImpl->mCapacity; }
void Queue::setCapacity(Index32 n) { mImpl->mCapacity = std::max<Index32>(1, n); }
/// @todo void Queue::setCapacity(Index64 bytes);
/// @todo Provide a way to limit the number of tasks in flight
/// (e.g., by enqueueing tbb::tasks that pop Tasks off a concurrent_queue)?
/// @todo Remove any tasks from the queue that are not currently executing.
//void clear() const;
Index32 Queue::timeout() const { return mImpl->mTimeout; }
void Queue::setTimeout(Index32 sec) { mImpl->mTimeout = sec; }
////////////////////////////////////////
Queue::Status
Queue::status(Id id) const
{
Impl::StatusMap::const_accessor acc;
if (mImpl->mStatus.find(acc, id)) {
const Status status = acc->second;
if (status == SUCCEEDED || status == FAILED) {
mImpl->mStatus.erase(acc);
}
return status;
}
return UNKNOWN;
}
Queue::Id
Queue::addNotifier(Notifier notify)
{
Lock lock(mImpl->mNotifierMutex);
Queue::Id id = mImpl->mNextNotifierId++;
mImpl->mNotifiers[id] = notify;
return id;
}
void
Queue::removeNotifier(Id id)
{
Lock lock(mImpl->mNotifierMutex);
Impl::NotifierMap::iterator it = mImpl->mNotifiers.find(id);
if (it != mImpl->mNotifiers.end()) {
mImpl->mNotifiers.erase(it);
}
}
void
Queue::clearNotifiers()
{
Lock lock(mImpl->mNotifierMutex);
mImpl->mNotifiers.clear();
}
////////////////////////////////////////
Queue::Id
Queue::writeGrid(GridBase::ConstPtr grid, const Archive& archive, const MetaMap& metadata)
{
return writeGridVec(GridCPtrVec(1, grid), archive, metadata);
}
Queue::Id
Queue::writeGridVec(const GridCPtrVec& grids, const Archive& archive, const MetaMap& metadata)
{
const Queue::Id taskId = mImpl->mNextId++;
// From the "GUI Thread" chapter in the TBB Design Patterns guide
OutputTask* task =
new(tbb::task::allocate_root()) OutputTask(taskId, grids, archive, metadata);
try {
mImpl->enqueue(*task);
} catch (openvdb::RuntimeError&) {
// Destroy the task if it could not be enqueued, then rethrow the exception.
tbb::task::destroy(*task);
throw;
}
return taskId;
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 8,604 | C++ | 26.404459 | 94 | 0.601929 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Archive.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Archive.h"
#include "GridDescriptor.h"
#include "DelayedLoadMetadata.h"
#include "io.h"
#include <openvdb/Exceptions.h>
#include <openvdb/Metadata.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/util/logging.h>
#include <openvdb/openvdb.h>
// Boost.Interprocess uses a header-only portion of Boost.DateTime
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-macros"
#endif
#define BOOST_DATE_TIME_NO_LIB
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#include <boost/interprocess/file_mapping.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/iostreams/device/array.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <tbb/atomic.h>
#ifdef _MSC_VER
#include <boost/interprocess/detail/os_file_functions.hpp> // open_existing_file(), close_file()
extern "C" __declspec(dllimport) bool __stdcall GetFileTime(
void* fh, void* ctime, void* atime, void* mtime);
// boost::interprocess::detail was renamed to boost::interprocess::ipcdetail in Boost 1.48.
// Ensure that both namespaces exist.
namespace boost { namespace interprocess { namespace detail {} namespace ipcdetail {} } }
#else
#include <sys/types.h> // for struct stat
#include <sys/stat.h> // for stat()
#include <unistd.h> // for unlink()
#endif
#include <algorithm> // for std::find_if()
#include <cerrno> // for errno
#include <cstdlib> // for getenv()
#include <cstring> // for std::memcpy()
#include <ctime> // for std::time()
#include <iostream>
#include <map>
#include <random>
#include <set>
#include <sstream>
#include <system_error> // for std::error_code()
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
#ifdef OPENVDB_USE_BLOSC
const uint32_t Archive::DEFAULT_COMPRESSION_FLAGS = (COMPRESS_BLOSC | COMPRESS_ACTIVE_MASK);
#else
#ifdef OPENVDB_USE_ZLIB
const uint32_t Archive::DEFAULT_COMPRESSION_FLAGS = (COMPRESS_ZIP | COMPRESS_ACTIVE_MASK);
#else
const uint32_t Archive::DEFAULT_COMPRESSION_FLAGS = (COMPRESS_ACTIVE_MASK);
#endif
#endif
namespace {
// Indices into a stream's internal extensible array of values used by readers and writers
struct StreamState
{
static const long MAGIC_NUMBER;
StreamState();
~StreamState();
// Important: The size and order of these member variables must *only* change when
// OpenVDB ABI changes to avoid potential segfaults when performing I/O
// across two different versions of the library. Adding new member
// variables to the end of the struct is allowed provided that they
// are only accessed from within an appropriate ABI guard.
int magicNumber;
int fileVersion;
int libraryMajorVersion;
int libraryMinorVersion;
int dataCompression;
int writeGridStatsMetadata;
int gridBackground;
int gridClass;
int halfFloat;
int mappedFile;
int metadata;
}
sStreamState;
const long StreamState::MAGIC_NUMBER =
long((uint64_t(OPENVDB_MAGIC) << 32) | (uint64_t(OPENVDB_MAGIC)));
////////////////////////////////////////
StreamState::StreamState(): magicNumber(std::ios_base::xalloc())
{
// Having reserved an entry (the one at index magicNumber) in the extensible array
// associated with every stream, store a magic number at that location in the
// array belonging to the cout stream.
std::cout.iword(magicNumber) = MAGIC_NUMBER;
std::cout.pword(magicNumber) = this;
// Search for a lower-numbered entry in cout's array that already contains the magic number.
/// @todo This assumes that the indices returned by xalloc() increase monotonically.
int existingArray = -1;
for (int i = 0; i < magicNumber; ++i) {
if (std::cout.iword(i) == MAGIC_NUMBER) {
existingArray = i;
break;
}
}
if (existingArray >= 0 && std::cout.pword(existingArray) != nullptr) {
// If a lower-numbered entry was found to contain the magic number,
// a coexisting version of this library must have registered it.
// In that case, the corresponding pointer should point to an existing
// StreamState struct. Copy the other array indices from that StreamState
// into this one, so as to share state with the other library.
const StreamState& other =
*static_cast<const StreamState*>(std::cout.pword(existingArray));
fileVersion = other.fileVersion;
libraryMajorVersion = other.libraryMajorVersion;
libraryMinorVersion = other.libraryMinorVersion;
dataCompression = other.dataCompression;
writeGridStatsMetadata = other.writeGridStatsMetadata;
gridBackground = other.gridBackground;
gridClass = other.gridClass;
if (other.mappedFile != 0) { // memory-mapped file support was added in OpenVDB 3.0.0
mappedFile = other.mappedFile;
metadata = other.metadata;
halfFloat = other.halfFloat;
} else {
mappedFile = std::ios_base::xalloc();
metadata = std::ios_base::xalloc();
halfFloat = std::ios_base::xalloc();
}
} else {
// Reserve storage for per-stream file format and library version numbers
// and other values of use to readers and writers. Each of the following
// values is an index into the extensible arrays associated with all streams.
// The indices are common to all streams, but the values stored at those indices
// are unique to each stream.
fileVersion = std::ios_base::xalloc();
libraryMajorVersion = std::ios_base::xalloc();
libraryMinorVersion = std::ios_base::xalloc();
dataCompression = std::ios_base::xalloc();
writeGridStatsMetadata = std::ios_base::xalloc();
gridBackground = std::ios_base::xalloc();
gridClass = std::ios_base::xalloc();
mappedFile = std::ios_base::xalloc();
metadata = std::ios_base::xalloc();
halfFloat = std::ios_base::xalloc();
}
}
StreamState::~StreamState()
{
// Ensure that this StreamState struct can no longer be accessed.
std::cout.iword(magicNumber) = 0;
std::cout.pword(magicNumber) = nullptr;
}
} // unnamed namespace
////////////////////////////////////////
struct StreamMetadata::Impl
{
// Important: The size and order of these member variables must *only* change when
// OpenVDB ABI changes to avoid potential segfaults when performing I/O
// across two different versions of the library. Adding new member
// variables to the end of the struct is allowed provided that they
// are only accessed from within an appropriate ABI guard.
uint32_t mFileVersion = OPENVDB_FILE_VERSION;
VersionId mLibraryVersion = { OPENVDB_LIBRARY_MAJOR_VERSION, OPENVDB_LIBRARY_MINOR_VERSION };
uint32_t mCompression = COMPRESS_NONE;
uint32_t mGridClass = GRID_UNKNOWN;
const void* mBackgroundPtr = nullptr; ///< @todo use Metadata::Ptr?
bool mHalfFloat = false;
bool mWriteGridStats = false;
bool mSeekable = false;
bool mCountingPasses = false;
uint32_t mPass = 0;
MetaMap mGridMetadata;
AuxDataMap mAuxData;
bool mDelayedLoadMeta = DelayedLoadMetadata::isRegisteredType();
uint64_t mLeaf = 0;
uint32_t mTest = 0; // for testing only
}; // struct StreamMetadata
StreamMetadata::StreamMetadata(): mImpl(new Impl)
{
}
StreamMetadata::StreamMetadata(const StreamMetadata& other): mImpl(new Impl(*other.mImpl))
{
}
StreamMetadata::StreamMetadata(std::ios_base& strm): mImpl(new Impl)
{
mImpl->mFileVersion = getFormatVersion(strm);
mImpl->mLibraryVersion = getLibraryVersion(strm);
mImpl->mCompression = getDataCompression(strm);
mImpl->mGridClass = getGridClass(strm);
mImpl->mHalfFloat = getHalfFloat(strm);
mImpl->mWriteGridStats = getWriteGridStatsMetadata(strm);
}
StreamMetadata::~StreamMetadata()
{
}
StreamMetadata&
StreamMetadata::operator=(const StreamMetadata& other)
{
if (&other != this) {
mImpl.reset(new Impl(*other.mImpl));
}
return *this;
}
void
StreamMetadata::transferTo(std::ios_base& strm) const
{
io::setVersion(strm, mImpl->mLibraryVersion, mImpl->mFileVersion);
io::setDataCompression(strm, mImpl->mCompression);
io::setGridBackgroundValuePtr(strm, mImpl->mBackgroundPtr);
io::setGridClass(strm, mImpl->mGridClass);
io::setHalfFloat(strm, mImpl->mHalfFloat);
io::setWriteGridStatsMetadata(strm, mImpl->mWriteGridStats);
}
uint32_t StreamMetadata::fileVersion() const { return mImpl->mFileVersion; }
VersionId StreamMetadata::libraryVersion() const { return mImpl->mLibraryVersion; }
uint32_t StreamMetadata::compression() const { return mImpl->mCompression; }
uint32_t StreamMetadata::gridClass() const { return mImpl->mGridClass; }
const void* StreamMetadata::backgroundPtr() const { return mImpl->mBackgroundPtr; }
bool StreamMetadata::halfFloat() const { return mImpl->mHalfFloat; }
bool StreamMetadata::writeGridStats() const { return mImpl->mWriteGridStats; }
bool StreamMetadata::seekable() const { return mImpl->mSeekable; }
bool StreamMetadata::delayedLoadMeta() const { return mImpl->mDelayedLoadMeta; }
bool StreamMetadata::countingPasses() const { return mImpl->mCountingPasses; }
uint32_t StreamMetadata::pass() const { return mImpl->mPass; }
uint64_t StreamMetadata::leaf() const { return mImpl->mLeaf; }
MetaMap& StreamMetadata::gridMetadata() { return mImpl->mGridMetadata; }
const MetaMap& StreamMetadata::gridMetadata() const { return mImpl->mGridMetadata; }
uint32_t StreamMetadata::__test() const { return mImpl->mTest; }
StreamMetadata::AuxDataMap& StreamMetadata::auxData() { return mImpl->mAuxData; }
const StreamMetadata::AuxDataMap& StreamMetadata::auxData() const { return mImpl->mAuxData; }
void StreamMetadata::setFileVersion(uint32_t v) { mImpl->mFileVersion = v; }
void StreamMetadata::setLibraryVersion(VersionId v) { mImpl->mLibraryVersion = v; }
void StreamMetadata::setCompression(uint32_t c) { mImpl->mCompression = c; }
void StreamMetadata::setGridClass(uint32_t c) { mImpl->mGridClass = c; }
void StreamMetadata::setBackgroundPtr(const void* ptr) { mImpl->mBackgroundPtr = ptr; }
void StreamMetadata::setHalfFloat(bool b) { mImpl->mHalfFloat = b; }
void StreamMetadata::setWriteGridStats(bool b) { mImpl->mWriteGridStats = b; }
void StreamMetadata::setSeekable(bool b) { mImpl->mSeekable = b; }
void StreamMetadata::setCountingPasses(bool b) { mImpl->mCountingPasses = b; }
void StreamMetadata::setPass(uint32_t i) { mImpl->mPass = i; }
void StreamMetadata::setLeaf(uint64_t i) { mImpl->mLeaf = i; }
void StreamMetadata::__setTest(uint32_t t) { mImpl->mTest = t; }
std::string
StreamMetadata::str() const
{
std::ostringstream ostr;
ostr << std::boolalpha;
ostr << "version: " << libraryVersion().first << "." << libraryVersion().second
<< "/" << fileVersion() << "\n";
ostr << "class: " << GridBase::gridClassToString(static_cast<GridClass>(gridClass())) << "\n";
ostr << "compression: " << compressionToString(compression()) << "\n";
ostr << "half_float: " << halfFloat() << "\n";
ostr << "seekable: " << seekable() << "\n";
ostr << "delayed_load_meta: " << delayedLoadMeta() << "\n";
ostr << "pass: " << pass() << "\n";
ostr << "counting_passes: " << countingPasses() << "\n";
ostr << "write_grid_stats_metadata: " << writeGridStats() << "\n";
if (!auxData().empty()) ostr << auxData();
if (gridMetadata().metaCount() != 0) {
ostr << "grid_metadata:\n" << gridMetadata().str(/*indent=*/" ");
}
return ostr.str();
}
std::ostream&
operator<<(std::ostream& os, const StreamMetadata& meta)
{
os << meta.str();
return os;
}
namespace {
template<typename T>
inline bool
writeAsType(std::ostream& os, const boost::any& val)
{
if (val.type() == typeid(T)) {
os << boost::any_cast<T>(val);
return true;
}
return false;
}
struct PopulateDelayedLoadMetadataOp
{
DelayedLoadMetadata& metadata;
uint32_t compression;
PopulateDelayedLoadMetadataOp(DelayedLoadMetadata& _metadata, uint32_t _compression)
: metadata(_metadata)
, compression(_compression) { }
template<typename GridT>
void operator()(const GridT& grid) const
{
using TreeT = typename GridT::TreeType;
using ValueT = typename TreeT::ValueType;
using LeafT = typename TreeT::LeafNodeType;
using MaskT = typename LeafT::NodeMaskType;
const TreeT& tree = grid.constTree();
const Index32 leafCount = tree.leafCount();
// early exit if not leaf nodes
if (leafCount == Index32(0)) return;
metadata.resizeMask(leafCount);
if (compression & (COMPRESS_BLOSC | COMPRESS_ZIP)) {
metadata.resizeCompressedSize(leafCount);
}
const auto background = tree.background();
const bool saveFloatAsHalf = grid.saveFloatAsHalf();
tree::LeafManager<const TreeT> leafManager(tree);
leafManager.foreach(
[&](const LeafT& leaf, size_t idx) {
// set mask value
MaskCompress<ValueT, MaskT> maskCompressData(
leaf.valueMask(), /*childMask=*/MaskT(), leaf.buffer().data(), background);
metadata.setMask(idx, maskCompressData.metadata);
if (compression & (COMPRESS_BLOSC | COMPRESS_ZIP)) {
// set compressed size value
size_t sizeBytes(8);
size_t compressedSize = io::writeCompressedValuesSize(
leaf.buffer().data(), LeafT::SIZE,
leaf.valueMask(), maskCompressData.metadata, saveFloatAsHalf, compression);
metadata.setCompressedSize(idx, compressedSize+sizeBytes);
}
}
);
}
};
bool populateDelayedLoadMetadata(DelayedLoadMetadata& metadata,
const GridBase& gridBase, uint32_t compression)
{
PopulateDelayedLoadMetadataOp op(metadata, compression);
using AllowedTypes = TypeList<
Int32Grid, Int64Grid,
FloatGrid, DoubleGrid,
Vec3IGrid, Vec3SGrid, Vec3DGrid>;
return gridBase.apply<AllowedTypes>(op);
}
} // unnamed namespace
std::ostream&
operator<<(std::ostream& os, const StreamMetadata::AuxDataMap& auxData)
{
for (StreamMetadata::AuxDataMap::const_iterator it = auxData.begin(), end = auxData.end();
it != end; ++it)
{
os << it->first << ": ";
// Note: boost::any doesn't support serialization.
const boost::any& val = it->second;
if (!writeAsType<int32_t>(os, val)
&& !writeAsType<int64_t>(os, val)
&& !writeAsType<int16_t>(os, val)
&& !writeAsType<int8_t>(os, val)
&& !writeAsType<uint32_t>(os, val)
&& !writeAsType<uint64_t>(os, val)
&& !writeAsType<uint16_t>(os, val)
&& !writeAsType<uint8_t>(os, val)
&& !writeAsType<float>(os, val)
&& !writeAsType<double>(os, val)
&& !writeAsType<long double>(os, val)
&& !writeAsType<bool>(os, val)
&& !writeAsType<std::string>(os, val)
&& !writeAsType<const char*>(os, val))
{
os << val.type().name() << "(...)";
}
os << "\n";
}
return os;
}
////////////////////////////////////////
// Memory-mapping a VDB file permits threaded input (and output, potentially,
// though that might not be practical for compressed files or files containing
// multiple grids). In particular, a memory-mapped file can be loaded lazily,
// meaning that the voxel buffers of the leaf nodes of a grid's tree are not allocated
// until they are actually accessed. When access to its buffer is requested,
// a leaf node allocates memory for the buffer and then streams in (and decompresses)
// its contents from the memory map, starting from a stream offset that was recorded
// at the time the node was constructed. The memory map must persist as long as
// there are unloaded leaf nodes; this is ensured by storing a shared pointer
// to the map in each unloaded node.
class MappedFile::Impl
{
public:
Impl(const std::string& filename, bool autoDelete)
: mMap(filename.c_str(), boost::interprocess::read_only)
, mRegion(mMap, boost::interprocess::read_only)
, mAutoDelete(autoDelete)
{
mLastWriteTime = this->getLastWriteTime();
if (mAutoDelete) {
#ifndef _MSC_VER
// On Unix systems, unlink the file so that it gets deleted once it is closed.
::unlink(mMap.get_name());
#endif
}
}
~Impl()
{
std::string filename;
if (const char* s = mMap.get_name()) filename = s;
OPENVDB_LOG_DEBUG_RUNTIME("closing memory-mapped file " << filename);
if (mNotifier) mNotifier(filename);
if (mAutoDelete) {
if (!boost::interprocess::file_mapping::remove(filename.c_str())) {
if (errno != ENOENT) {
// Warn if the file exists but couldn't be removed.
std::string mesg = getErrorString();
if (!mesg.empty()) mesg = " (" + mesg + ")";
OPENVDB_LOG_WARN("failed to remove temporary file " << filename << mesg);
}
}
}
}
Index64 getLastWriteTime() const
{
Index64 result = 0;
const char* filename = mMap.get_name();
#ifdef _MSC_VER
// boost::interprocess::detail was renamed to boost::interprocess::ipcdetail in Boost 1.48.
using namespace boost::interprocess::detail;
using namespace boost::interprocess::ipcdetail;
if (void* fh = open_existing_file(filename, boost::interprocess::read_only)) {
struct { unsigned long lo, hi; } mtime; // Windows FILETIME struct
if (GetFileTime(fh, nullptr, nullptr, &mtime)) {
result = (Index64(mtime.hi) << 32) | mtime.lo;
}
close_file(fh);
}
#else
struct stat info;
if (0 == ::stat(filename, &info)) {
result = Index64(info.st_mtime);
}
#endif
return result;
}
boost::interprocess::file_mapping mMap;
boost::interprocess::mapped_region mRegion;
bool mAutoDelete;
Notifier mNotifier;
mutable tbb::atomic<Index64> mLastWriteTime;
private:
Impl(const Impl&); // not copyable
Impl& operator=(const Impl&); // not copyable
};
MappedFile::MappedFile(const std::string& filename, bool autoDelete):
mImpl(new Impl(filename, autoDelete))
{
}
MappedFile::~MappedFile()
{
}
std::string
MappedFile::filename() const
{
std::string result;
if (const char* s = mImpl->mMap.get_name()) result = s;
return result;
}
SharedPtr<std::streambuf>
MappedFile::createBuffer() const
{
if (!mImpl->mAutoDelete && mImpl->mLastWriteTime > 0) {
// Warn if the file has been modified since it was opened
// (but don't bother checking if it is a private, temporary file).
if (mImpl->getLastWriteTime() > mImpl->mLastWriteTime) {
OPENVDB_LOG_WARN("file " << this->filename() << " might have changed on disk"
<< " since it was opened");
mImpl->mLastWriteTime = 0; // suppress further warnings
}
}
return SharedPtr<std::streambuf>{
new boost::iostreams::stream_buffer<boost::iostreams::array_source>{
static_cast<const char*>(mImpl->mRegion.get_address()), mImpl->mRegion.get_size()}};
}
void
MappedFile::setNotifier(const Notifier& notifier)
{
mImpl->mNotifier = notifier;
}
void
MappedFile::clearNotifier()
{
mImpl->mNotifier = nullptr;
}
////////////////////////////////////////
std::string
getErrorString(int errorNum)
{
return std::error_code(errorNum, std::generic_category()).message();
}
std::string
getErrorString()
{
return getErrorString(errno);
}
////////////////////////////////////////
Archive::Archive()
: mFileVersion(OPENVDB_FILE_VERSION)
, mLibraryVersion(OPENVDB_LIBRARY_MAJOR_VERSION, OPENVDB_LIBRARY_MINOR_VERSION)
, mUuid(boost::uuids::nil_uuid())
, mInputHasGridOffsets(false)
, mEnableInstancing(true)
, mCompression(DEFAULT_COMPRESSION_FLAGS)
, mEnableGridStats(true)
{
}
Archive::~Archive()
{
}
Archive::Ptr
Archive::copy() const
{
return Archive::Ptr(new Archive(*this));
}
////////////////////////////////////////
std::string
Archive::getUniqueTag() const
{
return boost::uuids::to_string(mUuid);
}
bool
Archive::isIdentical(const std::string& uuidStr) const
{
return uuidStr == getUniqueTag();
}
////////////////////////////////////////
uint32_t
getFormatVersion(std::ios_base& is)
{
/// @todo get from StreamMetadata
return static_cast<uint32_t>(is.iword(sStreamState.fileVersion));
}
void
Archive::setFormatVersion(std::istream& is)
{
is.iword(sStreamState.fileVersion) = mFileVersion; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(is)) {
meta->setFileVersion(mFileVersion);
}
}
VersionId
getLibraryVersion(std::ios_base& is)
{
/// @todo get from StreamMetadata
VersionId version;
version.first = static_cast<uint32_t>(is.iword(sStreamState.libraryMajorVersion));
version.second = static_cast<uint32_t>(is.iword(sStreamState.libraryMinorVersion));
return version;
}
void
Archive::setLibraryVersion(std::istream& is)
{
is.iword(sStreamState.libraryMajorVersion) = mLibraryVersion.first; ///< @todo remove
is.iword(sStreamState.libraryMinorVersion) = mLibraryVersion.second; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(is)) {
meta->setLibraryVersion(mLibraryVersion);
}
}
std::string
getVersion(std::ios_base& is)
{
VersionId version = getLibraryVersion(is);
std::ostringstream ostr;
ostr << version.first << "." << version.second << "/" << getFormatVersion(is);
return ostr.str();
}
void
setCurrentVersion(std::istream& is)
{
is.iword(sStreamState.fileVersion) = OPENVDB_FILE_VERSION; ///< @todo remove
is.iword(sStreamState.libraryMajorVersion) = OPENVDB_LIBRARY_MAJOR_VERSION; ///< @todo remove
is.iword(sStreamState.libraryMinorVersion) = OPENVDB_LIBRARY_MINOR_VERSION; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(is)) {
meta->setFileVersion(OPENVDB_FILE_VERSION);
meta->setLibraryVersion(VersionId(
OPENVDB_LIBRARY_MAJOR_VERSION, OPENVDB_LIBRARY_MINOR_VERSION));
}
}
void
setVersion(std::ios_base& strm, const VersionId& libraryVersion, uint32_t fileVersion)
{
strm.iword(sStreamState.fileVersion) = fileVersion; ///< @todo remove
strm.iword(sStreamState.libraryMajorVersion) = libraryVersion.first; ///< @todo remove
strm.iword(sStreamState.libraryMinorVersion) = libraryVersion.second; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setFileVersion(fileVersion);
meta->setLibraryVersion(libraryVersion);
}
}
std::string
Archive::version() const
{
std::ostringstream ostr;
ostr << mLibraryVersion.first << "." << mLibraryVersion.second << "/" << mFileVersion;
return ostr.str();
}
////////////////////////////////////////
uint32_t
getDataCompression(std::ios_base& strm)
{
/// @todo get from StreamMetadata
return uint32_t(strm.iword(sStreamState.dataCompression));
}
void
setDataCompression(std::ios_base& strm, uint32_t c)
{
strm.iword(sStreamState.dataCompression) = c; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setCompression(c);
}
}
void
Archive::setDataCompression(std::istream& is)
{
io::setDataCompression(is, mCompression); ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(is)) {
meta->setCompression(mCompression);
}
}
//static
bool
Archive::hasBloscCompression()
{
#ifdef OPENVDB_USE_BLOSC
return true;
#else
return false;
#endif
}
//static
bool
Archive::hasZLibCompression()
{
#ifdef OPENVDB_USE_ZLIB
return true;
#else
return false;
#endif
}
void
Archive::setGridCompression(std::ostream& os, const GridBase& grid) const
{
// Start with the options that are enabled globally for this archive.
uint32_t c = compression();
// Disable options that are inappropriate for the given grid.
switch (grid.getGridClass()) {
case GRID_LEVEL_SET:
case GRID_FOG_VOLUME:
// ZLIB compression is not used on level sets or fog volumes.
c = c & ~COMPRESS_ZIP;
break;
case GRID_STAGGERED:
case GRID_UNKNOWN:
break;
}
io::setDataCompression(os, c);
os.write(reinterpret_cast<const char*>(&c), sizeof(uint32_t));
}
void
Archive::readGridCompression(std::istream& is)
{
if (getFormatVersion(is) >= OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION) {
uint32_t c = COMPRESS_NONE;
is.read(reinterpret_cast<char*>(&c), sizeof(uint32_t));
io::setDataCompression(is, c);
}
}
////////////////////////////////////////
bool
getWriteGridStatsMetadata(std::ios_base& strm)
{
/// @todo get from StreamMetadata
return strm.iword(sStreamState.writeGridStatsMetadata) != 0;
}
void
setWriteGridStatsMetadata(std::ios_base& strm, bool writeGridStats)
{
strm.iword(sStreamState.writeGridStatsMetadata) = writeGridStats; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setWriteGridStats(writeGridStats);
}
}
////////////////////////////////////////
uint32_t
getGridClass(std::ios_base& strm)
{
/// @todo get from StreamMetadata
const uint32_t val = static_cast<uint32_t>(strm.iword(sStreamState.gridClass));
if (val >= NUM_GRID_CLASSES) return GRID_UNKNOWN;
return val;
}
void
setGridClass(std::ios_base& strm, uint32_t cls)
{
strm.iword(sStreamState.gridClass) = long(cls); ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setGridClass(cls);
}
}
bool
getHalfFloat(std::ios_base& strm)
{
/// @todo get from StreamMetadata
return strm.iword(sStreamState.halfFloat) != 0;
}
void
setHalfFloat(std::ios_base& strm, bool halfFloat)
{
strm.iword(sStreamState.halfFloat) = halfFloat; ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setHalfFloat(halfFloat);
}
}
const void*
getGridBackgroundValuePtr(std::ios_base& strm)
{
/// @todo get from StreamMetadata
return strm.pword(sStreamState.gridBackground);
}
void
setGridBackgroundValuePtr(std::ios_base& strm, const void* background)
{
strm.pword(sStreamState.gridBackground) = const_cast<void*>(background); ///< @todo remove
if (StreamMetadata::Ptr meta = getStreamMetadataPtr(strm)) {
meta->setBackgroundPtr(background);
}
}
MappedFile::Ptr
getMappedFilePtr(std::ios_base& strm)
{
if (const void* ptr = strm.pword(sStreamState.mappedFile)) {
return *static_cast<const MappedFile::Ptr*>(ptr);
}
return MappedFile::Ptr();
}
void
setMappedFilePtr(std::ios_base& strm, io::MappedFile::Ptr& mappedFile)
{
strm.pword(sStreamState.mappedFile) = &mappedFile;
}
StreamMetadata::Ptr
getStreamMetadataPtr(std::ios_base& strm)
{
if (const void* ptr = strm.pword(sStreamState.metadata)) {
return *static_cast<const StreamMetadata::Ptr*>(ptr);
}
return StreamMetadata::Ptr();
}
void
setStreamMetadataPtr(std::ios_base& strm, StreamMetadata::Ptr& meta, bool transfer)
{
strm.pword(sStreamState.metadata) = &meta;
if (transfer && meta) meta->transferTo(strm);
}
StreamMetadata::Ptr
clearStreamMetadataPtr(std::ios_base& strm)
{
StreamMetadata::Ptr result = getStreamMetadataPtr(strm);
strm.pword(sStreamState.metadata) = nullptr;
return result;
}
////////////////////////////////////////
bool
Archive::readHeader(std::istream& is)
{
// 1) Read the magic number for VDB.
int64_t magic;
is.read(reinterpret_cast<char*>(&magic), sizeof(int64_t));
if (magic != OPENVDB_MAGIC) {
OPENVDB_THROW(IoError, "not a VDB file");
}
// 2) Read the file format version number.
is.read(reinterpret_cast<char*>(&mFileVersion), sizeof(uint32_t));
if (mFileVersion > OPENVDB_FILE_VERSION) {
OPENVDB_LOG_WARN("unsupported VDB file format (expected version "
<< OPENVDB_FILE_VERSION << " or earlier, got version " << mFileVersion << ")");
} else if (mFileVersion < 211) {
// Versions prior to 211 stored separate major, minor and patch numbers.
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(uint32_t));
mFileVersion = 100 * mFileVersion + 10 * version;
is.read(reinterpret_cast<char*>(&version), sizeof(uint32_t));
mFileVersion += version;
}
// 3) Read the library version numbers (not stored prior to file format version 211).
mLibraryVersion.first = mLibraryVersion.second = 0;
if (mFileVersion >= 211) {
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(uint32_t));
mLibraryVersion.first = version; // major version
is.read(reinterpret_cast<char*>(&version), sizeof(uint32_t));
mLibraryVersion.second = version; // minor version
}
// 4) Read the flag indicating whether the stream supports partial reading.
// (Versions prior to 212 have no flag because they always supported partial reading.)
mInputHasGridOffsets = true;
if (mFileVersion >= 212) {
char hasGridOffsets;
is.read(&hasGridOffsets, sizeof(char));
mInputHasGridOffsets = hasGridOffsets;
}
// 5) Read the flag that indicates whether data is compressed.
// (From version 222 on, compression information is stored per grid.)
mCompression = DEFAULT_COMPRESSION_FLAGS;
if (mFileVersion < OPENVDB_FILE_VERSION_BLOSC_COMPRESSION) {
// Prior to the introduction of Blosc, ZLIB was the default compression scheme.
mCompression = (COMPRESS_ZIP | COMPRESS_ACTIVE_MASK);
}
if (mFileVersion >= OPENVDB_FILE_VERSION_SELECTIVE_COMPRESSION &&
mFileVersion < OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION)
{
char isCompressed;
is.read(&isCompressed, sizeof(char));
mCompression = (isCompressed != 0 ? COMPRESS_ZIP : COMPRESS_NONE);
}
// 6) Read the 16-byte (128-bit) uuid.
boost::uuids::uuid oldUuid = mUuid;
if (mFileVersion >= OPENVDB_FILE_VERSION_BOOST_UUID) {
// UUID is stored as an ASCII string.
is >> mUuid;
} else {
// Older versions stored the UUID as a byte string.
char uuidBytes[16];
is.read(uuidBytes, 16);
std::memcpy(&mUuid.data[0], uuidBytes, std::min<size_t>(16, mUuid.size()));
}
return oldUuid != mUuid; // true if UUID in input stream differs from old UUID
}
void
Archive::writeHeader(std::ostream& os, bool seekable) const
{
// 1) Write the magic number for VDB.
int64_t magic = OPENVDB_MAGIC;
os.write(reinterpret_cast<char*>(&magic), sizeof(int64_t));
// 2) Write the file format version number.
uint32_t version = OPENVDB_FILE_VERSION;
os.write(reinterpret_cast<char*>(&version), sizeof(uint32_t));
// 3) Write the library version numbers.
version = OPENVDB_LIBRARY_MAJOR_VERSION;
os.write(reinterpret_cast<char*>(&version), sizeof(uint32_t));
version = OPENVDB_LIBRARY_MINOR_VERSION;
os.write(reinterpret_cast<char*>(&version), sizeof(uint32_t));
// 4) Write a flag indicating that this stream contains no grid offsets.
char hasGridOffsets = seekable;
os.write(&hasGridOffsets, sizeof(char));
// 5) Write a flag indicating that this stream contains compressed leaf data.
// (Omitted as of version 222)
// 6) Generate a new random 16-byte (128-bit) uuid and write it to the stream.
std::mt19937 ran;
ran.seed(std::mt19937::result_type(std::random_device()() + std::time(nullptr)));
boost::uuids::basic_random_generator<std::mt19937> gen(&ran);
mUuid = gen(); // mUuid is mutable
os << mUuid;
}
////////////////////////////////////////
int32_t
Archive::readGridCount(std::istream& is)
{
int32_t gridCount = 0;
is.read(reinterpret_cast<char*>(&gridCount), sizeof(int32_t));
return gridCount;
}
////////////////////////////////////////
void
Archive::connectInstance(const GridDescriptor& gd, const NamedGridMap& grids) const
{
if (!gd.isInstance() || grids.empty()) return;
NamedGridMap::const_iterator it = grids.find(gd.uniqueName());
if (it == grids.end()) return;
GridBase::Ptr grid = it->second;
if (!grid) return;
it = grids.find(gd.instanceParentName());
if (it != grids.end()) {
GridBase::Ptr parent = it->second;
if (mEnableInstancing) {
// Share the instance parent's tree.
grid->setTree(parent->baseTreePtr());
} else {
// Copy the instance parent's tree.
grid->setTree(parent->baseTree().copy());
}
} else {
OPENVDB_THROW(KeyError, "missing instance parent \""
<< GridDescriptor::nameAsString(gd.instanceParentName())
<< "\" for grid " << GridDescriptor::nameAsString(gd.uniqueName()));
}
}
////////////////////////////////////////
//static
bool
Archive::isDelayedLoadingEnabled()
{
return (nullptr == std::getenv("OPENVDB_DISABLE_DELAYED_LOAD"));
}
namespace {
struct NoBBox {};
template<typename BoxType>
void
doReadGrid(GridBase::Ptr grid, const GridDescriptor& gd, std::istream& is, const BoxType& bbox)
{
struct Local {
static void readBuffers(GridBase& g, std::istream& istrm, NoBBox) { g.readBuffers(istrm); }
static void readBuffers(GridBase& g, std::istream& istrm, const CoordBBox& indexBBox) {
g.readBuffers(istrm, indexBBox);
}
static void readBuffers(GridBase& g, std::istream& istrm, const BBoxd& worldBBox) {
g.readBuffers(istrm, g.constTransform().worldToIndexNodeCentered(worldBBox));
}
};
// Restore the file-level stream metadata on exit.
struct OnExit {
OnExit(std::ios_base& strm_): strm(&strm_), ptr(strm_.pword(sStreamState.metadata)) {}
~OnExit() { strm->pword(sStreamState.metadata) = ptr; }
std::ios_base* strm;
void* ptr;
};
OnExit restore(is);
// Stream metadata varies per grid, and it needs to persist
// in case delayed load is in effect.
io::StreamMetadata::Ptr streamMetadata;
if (io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(is)) {
// Make a grid-level copy of the file-level stream metadata.
streamMetadata.reset(new StreamMetadata(*meta));
} else {
streamMetadata.reset(new StreamMetadata);
}
streamMetadata->setHalfFloat(grid->saveFloatAsHalf());
io::setStreamMetadataPtr(is, streamMetadata, /*transfer=*/false);
io::setGridClass(is, GRID_UNKNOWN);
io::setGridBackgroundValuePtr(is, nullptr);
grid->readMeta(is);
// Add a description of the compression settings to the grid as metadata.
/// @todo Would this be useful?
//const uint32_t c = getDataCompression(is);
//grid->insertMeta(GridBase::META_FILE_COMPRESSION,
// StringMetadata(compressionToString(c)));
const VersionId version = getLibraryVersion(is);
if (version.first < 6 || (version.first == 6 && version.second <= 1)) {
// If delay load metadata exists, but the file format version does not support
// delay load metadata, this likely means the original grid was read and then
// written using a prior version of OpenVDB and ABI>=5 where unknown metadata
// can be blindly copied. This means that it is possible for the metadata to
// no longer be in sync with the grid, so we remove it to ensure correctness.
if ((*grid)[GridBase::META_FILE_DELAYED_LOAD]) {
grid->removeMeta(GridBase::META_FILE_DELAYED_LOAD);
}
}
streamMetadata->gridMetadata() = static_cast<MetaMap&>(*grid);
const GridClass gridClass = grid->getGridClass();
io::setGridClass(is, gridClass);
// reset leaf value to zero
streamMetadata->setLeaf(0);
// drop DelayedLoadMetadata from the grid as it is only useful for IO
// a stream metadata non-zero value disables this behaviour for testing
if (streamMetadata->__test() == uint32_t(0)) {
if ((*grid)[GridBase::META_FILE_DELAYED_LOAD]) {
grid->removeMeta(GridBase::META_FILE_DELAYED_LOAD);
}
}
if (getFormatVersion(is) >= OPENVDB_FILE_VERSION_GRID_INSTANCING) {
grid->readTransform(is);
if (!gd.isInstance()) {
grid->readTopology(is);
Local::readBuffers(*grid, is, bbox);
}
} else {
// Older versions of the library stored the transform after the topology.
grid->readTopology(is);
grid->readTransform(is);
Local::readBuffers(*grid, is, bbox);
}
if (getFormatVersion(is) < OPENVDB_FILE_VERSION_NO_GRIDMAP) {
// Older versions of the library didn't store grid names as metadata,
// so when reading older files, copy the grid name from the descriptor
// to the grid's metadata.
if (grid->getName().empty()) {
grid->setName(gd.gridName());
}
}
}
} // unnamed namespace
void
Archive::readGrid(GridBase::Ptr grid, const GridDescriptor& gd, std::istream& is)
{
// Read the compression settings for this grid and tag the stream with them
// so that downstream functions can reference them.
readGridCompression(is);
doReadGrid(grid, gd, is, NoBBox());
}
void
Archive::readGrid(GridBase::Ptr grid, const GridDescriptor& gd,
std::istream& is, const BBoxd& worldBBox)
{
readGridCompression(is);
doReadGrid(grid, gd, is, worldBBox);
}
void
Archive::readGrid(GridBase::Ptr grid, const GridDescriptor& gd,
std::istream& is, const CoordBBox& indexBBox)
{
readGridCompression(is);
doReadGrid(grid, gd, is, indexBBox);
}
////////////////////////////////////////
void
Archive::write(std::ostream& os, const GridPtrVec& grids, bool seekable,
const MetaMap& metadata) const
{
this->write(os, GridCPtrVec(grids.begin(), grids.end()), seekable, metadata);
}
void
Archive::write(std::ostream& os, const GridCPtrVec& grids, bool seekable,
const MetaMap& metadata) const
{
// Set stream flags so that downstream functions can reference them.
io::StreamMetadata::Ptr streamMetadata = io::getStreamMetadataPtr(os);
if (!streamMetadata) {
streamMetadata.reset(new StreamMetadata);
io::setStreamMetadataPtr(os, streamMetadata, /*transfer=*/false);
}
io::setDataCompression(os, compression());
io::setWriteGridStatsMetadata(os, isGridStatsMetadataEnabled());
this->writeHeader(os, seekable);
metadata.writeMeta(os);
// Write the number of non-null grids.
int32_t gridCount = 0;
for (GridCPtrVecCIter i = grids.begin(), e = grids.end(); i != e; ++i) {
if (*i) ++gridCount;
}
os.write(reinterpret_cast<char*>(&gridCount), sizeof(int32_t));
using TreeMap = std::map<const TreeBase*, GridDescriptor>;
using TreeMapIter = TreeMap::iterator;
TreeMap treeMap;
// Determine which grid names are unique and which are not.
using NameHistogram = std::map<std::string, int /*count*/>;
NameHistogram nameCount;
for (GridCPtrVecCIter i = grids.begin(), e = grids.end(); i != e; ++i) {
if (const GridBase::ConstPtr& grid = *i) {
const std::string name = grid->getName();
NameHistogram::iterator it = nameCount.find(name);
if (it != nameCount.end()) it->second++;
else nameCount[name] = 1;
}
}
std::set<std::string> uniqueNames;
// Write out the non-null grids.
for (GridCPtrVecCIter i = grids.begin(), e = grids.end(); i != e; ++i) {
if (const GridBase::ConstPtr& grid = *i) {
// Ensure that the grid's descriptor has a unique grid name, by appending
// a number to it if a grid with the same name was already written.
// Always add a number if the grid name is empty, so that the grid can be
// properly identified as an instance parent, if necessary.
std::string name = grid->getName();
if (name.empty() || nameCount[name] > 1) {
name = GridDescriptor::addSuffix(name, 0);
}
for (int n = 1; uniqueNames.find(name) != uniqueNames.end(); ++n) {
name = GridDescriptor::addSuffix(grid->getName(), n);
}
uniqueNames.insert(name);
// Create a grid descriptor.
GridDescriptor gd(name, grid->type(), grid->saveFloatAsHalf());
// Check if this grid's tree is shared with a grid that has already been written.
const TreeBase* treePtr = &(grid->baseTree());
TreeMapIter mapIter = treeMap.find(treePtr);
bool isInstance = ((mapIter != treeMap.end())
&& (mapIter->second.saveFloatAsHalf() == gd.saveFloatAsHalf()));
if (mEnableInstancing && isInstance) {
// This grid's tree is shared with another grid that has already been written.
// Get the name of the other grid.
gd.setInstanceParentName(mapIter->second.uniqueName());
// Write out this grid's descriptor and metadata, but not its tree.
writeGridInstance(gd, grid, os, seekable);
OPENVDB_LOG_DEBUG_RUNTIME("io::Archive::write(): "
<< GridDescriptor::nameAsString(gd.uniqueName())
<< " (" << std::hex << treePtr << std::dec << ")"
<< " is an instance of "
<< GridDescriptor::nameAsString(gd.instanceParentName()));
} else {
// Write out the grid descriptor and its associated grid.
writeGrid(gd, grid, os, seekable);
// Record the grid's tree pointer so that the tree doesn't get written
// more than once.
treeMap[treePtr] = gd;
}
}
// Some compression options (e.g., mask compression) are set per grid.
// Restore the original settings before writing the next grid.
io::setDataCompression(os, compression());
}
}
void
Archive::writeGrid(GridDescriptor& gd, GridBase::ConstPtr grid,
std::ostream& os, bool seekable) const
{
// Restore file-level stream metadata on exit.
struct OnExit {
OnExit(std::ios_base& strm_): strm(&strm_), ptr(strm_.pword(sStreamState.metadata)) {}
~OnExit() { strm->pword(sStreamState.metadata) = ptr; }
std::ios_base* strm;
void* ptr;
};
OnExit restore(os);
// Stream metadata varies per grid, so make a copy of the file-level stream metadata.
io::StreamMetadata::Ptr streamMetadata;
if (io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(os)) {
streamMetadata.reset(new StreamMetadata(*meta));
} else {
streamMetadata.reset(new StreamMetadata);
}
streamMetadata->setHalfFloat(grid->saveFloatAsHalf());
streamMetadata->gridMetadata() = static_cast<const MetaMap&>(*grid);
io::setStreamMetadataPtr(os, streamMetadata, /*transfer=*/false);
// Write out the Descriptor's header information (grid name and type)
gd.writeHeader(os);
// Save the curent stream position as postion to where the offsets for
// this GridDescriptor will be written to.
int64_t offsetPos = (seekable ? int64_t(os.tellp()) : 0);
// Write out the offset information. At this point it will be incorrect.
// But we need to write it out to move the stream head forward.
gd.writeStreamPos(os);
// Now we know the starting grid storage position.
if (seekable) gd.setGridPos(os.tellp());
// Save the compression settings for this grid.
setGridCompression(os, *grid);
// copy grid and add delay load metadata
const auto copyOfGrid = grid->copyGrid(); // shallow copy
const auto nonConstCopyOfGrid = ConstPtrCast<GridBase>(copyOfGrid);
nonConstCopyOfGrid->insertMeta(GridBase::META_FILE_DELAYED_LOAD,
DelayedLoadMetadata());
DelayedLoadMetadata::Ptr delayLoadMeta =
nonConstCopyOfGrid->getMetadata<DelayedLoadMetadata>(GridBase::META_FILE_DELAYED_LOAD);
if (!populateDelayedLoadMetadata(*delayLoadMeta, *grid, compression())) {
nonConstCopyOfGrid->removeMeta(GridBase::META_FILE_DELAYED_LOAD);
}
// Save the grid's metadata and transform.
if (getWriteGridStatsMetadata(os)) {
// Compute and add grid statistics metadata.
nonConstCopyOfGrid->addStatsMetadata();
nonConstCopyOfGrid->insertMeta(GridBase::META_FILE_COMPRESSION,
StringMetadata(compressionToString(getDataCompression(os))));
}
copyOfGrid->writeMeta(os);
grid->writeTransform(os);
// Save the grid's structure.
grid->writeTopology(os);
// Now we know the grid block storage position.
if (seekable) gd.setBlockPos(os.tellp());
// Save out the data blocks of the grid.
grid->writeBuffers(os);
// Now we know the end position of this grid.
if (seekable) gd.setEndPos(os.tellp());
if (seekable) {
// Now, go back to where the Descriptor's offset information is written
// and write the offsets again.
os.seekp(offsetPos, std::ios_base::beg);
gd.writeStreamPos(os);
// Now seek back to the end.
gd.seekToEnd(os);
}
}
void
Archive::writeGridInstance(GridDescriptor& gd, GridBase::ConstPtr grid,
std::ostream& os, bool seekable) const
{
// Write out the Descriptor's header information (grid name, type
// and instance parent name).
gd.writeHeader(os);
// Save the curent stream position as postion to where the offsets for
// this GridDescriptor will be written to.
int64_t offsetPos = (seekable ? int64_t(os.tellp()) : 0);
// Write out the offset information. At this point it will be incorrect.
// But we need to write it out to move the stream head forward.
gd.writeStreamPos(os);
// Now we know the starting grid storage position.
if (seekable) gd.setGridPos(os.tellp());
// Save the compression settings for this grid.
setGridCompression(os, *grid);
// Save the grid's metadata and transform.
grid->writeMeta(os);
grid->writeTransform(os);
// Now we know the end position of this grid.
if (seekable) gd.setEndPos(os.tellp());
if (seekable) {
// Now, go back to where the Descriptor's offset information is written
// and write the offsets again.
os.seekp(offsetPos, std::ios_base::beg);
gd.writeStreamPos(os);
// Now seek back to the end.
gd.seekToEnd(os);
}
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 48,051 | C++ | 31.62186 | 99 | 0.640361 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/GridDescriptor.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "GridDescriptor.h"
#include <openvdb/Exceptions.h>
#include <boost/algorithm/string/predicate.hpp> // for boost::ends_with()
#include <boost/algorithm/string/erase.hpp> // for boost::erase_last()
#include <sstream>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
namespace {
// In order not to break backward compatibility with existing VDB files,
// grids stored using 16-bit half floats are flagged by adding the following
// suffix to the grid's type name on output. The suffix is removed on input
// and the grid's "save float as half" flag set accordingly.
const char* HALF_FLOAT_TYPENAME_SUFFIX = "_HalfFloat";
const char* SEP = "\x1e"; // ASCII "record separator"
}
GridDescriptor::GridDescriptor():
mSaveFloatAsHalf(false),
mGridPos(0),
mBlockPos(0),
mEndPos(0)
{
}
GridDescriptor::GridDescriptor(const Name &name, const Name &type, bool half):
mGridName(stripSuffix(name)),
mUniqueName(name),
mGridType(type),
mSaveFloatAsHalf(half),
mGridPos(0),
mBlockPos(0),
mEndPos(0)
{
}
GridDescriptor::~GridDescriptor()
{
}
void
GridDescriptor::writeHeader(std::ostream &os) const
{
writeString(os, mUniqueName);
Name gridType = mGridType;
if (mSaveFloatAsHalf) gridType += HALF_FLOAT_TYPENAME_SUFFIX;
writeString(os, gridType);
writeString(os, mInstanceParentName);
}
void
GridDescriptor::writeStreamPos(std::ostream &os) const
{
os.write(reinterpret_cast<const char*>(&mGridPos), sizeof(int64_t));
os.write(reinterpret_cast<const char*>(&mBlockPos), sizeof(int64_t));
os.write(reinterpret_cast<const char*>(&mEndPos), sizeof(int64_t));
}
GridBase::Ptr
GridDescriptor::read(std::istream &is)
{
// Read in the name.
mUniqueName = readString(is);
mGridName = stripSuffix(mUniqueName);
// Read in the grid type.
mGridType = readString(is);
if (boost::ends_with(mGridType, HALF_FLOAT_TYPENAME_SUFFIX)) {
mSaveFloatAsHalf = true;
boost::erase_last(mGridType, HALF_FLOAT_TYPENAME_SUFFIX);
}
if (getFormatVersion(is) >= OPENVDB_FILE_VERSION_GRID_INSTANCING) {
mInstanceParentName = readString(is);
}
// Create the grid of the type if it has been registered.
if (!GridBase::isRegistered(mGridType)) {
OPENVDB_THROW(LookupError, "Cannot read grid." <<
" Grid type " << mGridType << " is not registered.");
}
// else
GridBase::Ptr grid = GridBase::createGrid(mGridType);
if (grid) grid->setSaveFloatAsHalf(mSaveFloatAsHalf);
// Read in the offsets.
is.read(reinterpret_cast<char*>(&mGridPos), sizeof(int64_t));
is.read(reinterpret_cast<char*>(&mBlockPos), sizeof(int64_t));
is.read(reinterpret_cast<char*>(&mEndPos), sizeof(int64_t));
return grid;
}
void
GridDescriptor::seekToGrid(std::istream &is) const
{
is.seekg(mGridPos, std::ios_base::beg);
}
void
GridDescriptor::seekToBlocks(std::istream &is) const
{
is.seekg(mBlockPos, std::ios_base::beg);
}
void
GridDescriptor::seekToEnd(std::istream &is) const
{
is.seekg(mEndPos, std::ios_base::beg);
}
void
GridDescriptor::seekToGrid(std::ostream &os) const
{
os.seekp(mGridPos, std::ios_base::beg);
}
void
GridDescriptor::seekToBlocks(std::ostream &os) const
{
os.seekp(mBlockPos, std::ios_base::beg);
}
void
GridDescriptor::seekToEnd(std::ostream &os) const
{
os.seekp(mEndPos, std::ios_base::beg);
}
////////////////////////////////////////
// static
Name
GridDescriptor::addSuffix(const Name& name, int n)
{
std::ostringstream ostr;
ostr << name << SEP << n;
return ostr.str();
}
// static
Name
GridDescriptor::stripSuffix(const Name& name)
{
return name.substr(0, name.find(SEP));
}
// static
std::string
GridDescriptor::nameAsString(const Name& name)
{
std::string::size_type pos = name.find(SEP);
if (pos == std::string::npos) return name;
return name.substr(0, pos) + "[" + name.substr(pos + 1) + "]";
}
//static
Name
GridDescriptor::stringAsUniqueName(const std::string& s)
{
Name ret = s;
if (!ret.empty() && *ret.rbegin() == ']') { // found trailing ']'
std::string::size_type pos = ret.find("[");
// Replace "[N]" with SEP "N".
if (pos != std::string::npos) {
ret.resize(ret.size() - 1); // drop trailing ']'
ret.replace(ret.find("["), 1, SEP);
}
}
return ret;
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 4,601 | C++ | 22.360406 | 78 | 0.661595 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/GridDescriptor.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_IO_GRIDDESCRIPTOR_HAS_BEEN_INCLUDED
#define OPENVDB_IO_GRIDDESCRIPTOR_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <iostream>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
/// This structure stores useful information that describes a grid on disk.
/// It can be used to retrieve I/O information about the grid such as
/// offsets into the file where the grid is located, its type, etc.
class OPENVDB_API GridDescriptor
{
public:
GridDescriptor();
GridDescriptor(const Name& name, const Name& gridType, bool saveFloatAsHalf = false);
GridDescriptor(const GridDescriptor&) = default;
GridDescriptor& operator=(const GridDescriptor&) = default;
~GridDescriptor();
const Name& gridType() const { return mGridType; }
const Name& gridName() const { return mGridName; }
const Name& uniqueName() const { return mUniqueName; }
const Name& instanceParentName() const { return mInstanceParentName; }
void setInstanceParentName(const Name& name) { mInstanceParentName = name; }
bool isInstance() const { return !mInstanceParentName.empty(); }
bool saveFloatAsHalf() const { return mSaveFloatAsHalf; }
void setGridPos(int64_t pos) { mGridPos = pos; }
int64_t getGridPos() const { return mGridPos; }
void setBlockPos(int64_t pos) { mBlockPos = pos; }
int64_t getBlockPos() const { return mBlockPos; }
void setEndPos(int64_t pos) { mEndPos = pos; }
int64_t getEndPos() const { return mEndPos; }
// These methods seek to the right position in the given stream.
void seekToGrid(std::istream&) const;
void seekToBlocks(std::istream&) const;
void seekToEnd(std::istream&) const;
void seekToGrid(std::ostream&) const;
void seekToBlocks(std::ostream&) const;
void seekToEnd(std::ostream&) const;
/// @brief Write out this descriptor's header information (all data except for
/// stream offsets).
void writeHeader(std::ostream&) const;
/// @brief Since positions into the stream are known at a later time, they are
/// written out separately.
void writeStreamPos(std::ostream&) const;
/// @brief Read a grid descriptor from the given stream.
/// @return an empty grid of the type specified by the grid descriptor.
GridBase::Ptr read(std::istream&);
/// @brief Append the number @a n to the given name (separated by an ASCII
/// "record separator" character) and return the resulting name.
static Name addSuffix(const Name&, int n);
/// @brief Strip from the given name any suffix that is separated by an ASCII
/// "record separator" character and return the resulting name.
static Name stripSuffix(const Name&);
/// @brief Given a name with suffix N, return "name[N]", otherwise just return "name".
/// Use this to produce a human-readable string from a descriptor's unique name.
static std::string nameAsString(const Name&);
/// @brief Given a string of the form "name[N]", return "name" with the suffix N
/// separated by an ASCII "record separator" character). Otherwise just return
/// the string as is.
static Name stringAsUniqueName(const std::string&);
private:
/// Name of the grid
Name mGridName;
/// Unique name for this descriptor
Name mUniqueName;
/// If nonempty, the name of another grid that shares this grid's tree
Name mInstanceParentName;
/// The type of the grid
Name mGridType;
/// Are floats quantized to 16 bits on disk?
bool mSaveFloatAsHalf;
/// Location in the stream where the grid data is stored
int64_t mGridPos;
/// Location in the stream where the grid blocks are stored
int64_t mBlockPos;
/// Location in the stream where the next grid descriptor begins
int64_t mEndPos;
};
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_GRIDDESCRIPTOR_HAS_BEEN_INCLUDED
| 4,065 | C | 37.35849 | 90 | 0.706519 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/DelayedLoadMetadata.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_DELAYED_LOAD_METADATA_HAS_BEEN_INCLUDED
#define OPENVDB_DELAYED_LOAD_METADATA_HAS_BEEN_INCLUDED
#include <openvdb/Metadata.h>
#include <cstdint>
#include <iostream>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
/// @brief Store a buffer of data that can be optionally used
/// during reading for faster delayed-load I/O performance
class OPENVDB_API DelayedLoadMetadata: public Metadata
{
public:
using Ptr = SharedPtr<DelayedLoadMetadata>;
using ConstPtr = SharedPtr<const DelayedLoadMetadata>;
using MaskType = int8_t;
using CompressedSizeType = int64_t;
DelayedLoadMetadata() = default;
DelayedLoadMetadata(const DelayedLoadMetadata& other);
~DelayedLoadMetadata() override = default;
Name typeName() const override;
Metadata::Ptr copy() const override;
void copy(const Metadata&) override;
std::string str() const override;
bool asBool() const override;
Index32 size() const override;
static Name staticTypeName() { return "__delayedload"; }
static Metadata::Ptr createMetadata()
{
Metadata::Ptr ret(new DelayedLoadMetadata);
return ret;
}
static void registerType()
{
Metadata::registerType(DelayedLoadMetadata::staticTypeName(),
DelayedLoadMetadata::createMetadata);
}
static void unregisterType()
{
Metadata::unregisterType(DelayedLoadMetadata::staticTypeName());
}
static bool isRegisteredType()
{
return Metadata::isRegisteredType(DelayedLoadMetadata::staticTypeName());
}
/// @brief Delete the contents of the mask and compressed size arrays
void clear();
/// @brief Return @c true if both arrays are empty
bool empty() const;
/// @brief Resize the mask array
void resizeMask(size_t size);
/// @brief Resize the compressed size array
void resizeCompressedSize(size_t size);
/// @brief Return the mask value for a specific index
/// @note throws if index is out-of-range or DelayedLoadMask not registered
MaskType getMask(size_t index) const;
/// @brief Set the mask value for a specific index
/// @note throws if index is out-of-range
void setMask(size_t index, const MaskType& value);
/// @brief Return the compressed size value for a specific index
/// @note throws if index is out-of-range or DelayedLoadMask not registered
CompressedSizeType getCompressedSize(size_t index) const;
/// @brief Set the compressed size value for a specific index
/// @note throws if index is out-of-range
void setCompressedSize(size_t index, const CompressedSizeType& value);
protected:
void readValue(std::istream&, Index32 numBytes) override;
void writeValue(std::ostream&) const override;
private:
std::vector<MaskType> mMask;
std::vector<CompressedSizeType> mCompressedSize;
}; // class DelayedLoadMetadata
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_DELAYED_LOAD_METADATA_HAS_BEEN_INCLUDED
| 3,221 | C | 30.281553 | 81 | 0.710338 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Compression.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_IO_COMPRESSION_HAS_BEEN_INCLUDED
#define OPENVDB_IO_COMPRESSION_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/MetaMap.h>
#include <openvdb/math/Math.h> // for negative()
#include "io.h" // for getDataCompression(), etc.
#include "DelayedLoadMetadata.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
/// @brief OR-able bit flags for compression options on input and output streams
/// @details
/// <dl>
/// <dt><tt>COMPRESS_NONE</tt>
/// <dd>On write, don't compress data.<br>
/// On read, the input stream contains uncompressed data.
///
/// <dt><tt>COMPRESS_ZIP</tt>
/// <dd>When writing grids other than level sets or fog volumes, apply
/// ZLIB compression to internal and leaf node value buffers.<br>
/// When reading grids other than level sets or fog volumes, indicate that
/// the value buffers of internal and leaf nodes are ZLIB-compressed.<br>
/// ZLIB compresses well but is slow.
///
/// <dt><tt>COMPRESS_ACTIVE_MASK</tt>
/// <dd>When writing a grid of any class, don't output a node's inactive values
/// if it has two or fewer distinct values. Instead, output minimal information
/// to permit the lossless reconstruction of inactive values.<br>
/// On read, nodes might have been stored without inactive values.
/// Where necessary, reconstruct inactive values from available information.
///
/// <dt><tt>COMPRESS_BLOSC</tt>
/// <dd>When writing grids other than level sets or fog volumes, apply
/// Blosc compression to internal and leaf node value buffers.<br>
/// When reading grids other than level sets or fog volumes, indicate that
/// the value buffers of internal and leaf nodes are Blosc-compressed.<br>
/// Blosc is much faster than ZLIB and produces comparable file sizes.
/// </dl>
enum {
COMPRESS_NONE = 0,
COMPRESS_ZIP = 0x1,
COMPRESS_ACTIVE_MASK = 0x2,
COMPRESS_BLOSC = 0x4
};
/// Return a string describing the given compression flags.
OPENVDB_API std::string compressionToString(uint32_t flags);
////////////////////////////////////////
/// @internal Per-node indicator byte that specifies what additional metadata
/// is stored to permit reconstruction of inactive values
enum {
/*0*/ NO_MASK_OR_INACTIVE_VALS, // no inactive vals, or all inactive vals are +background
/*1*/ NO_MASK_AND_MINUS_BG, // all inactive vals are -background
/*2*/ NO_MASK_AND_ONE_INACTIVE_VAL, // all inactive vals have the same non-background val
/*3*/ MASK_AND_NO_INACTIVE_VALS, // mask selects between -background and +background
/*4*/ MASK_AND_ONE_INACTIVE_VAL, // mask selects between backgd and one other inactive val
/*5*/ MASK_AND_TWO_INACTIVE_VALS, // mask selects between two non-background inactive vals
/*6*/ NO_MASK_AND_ALL_VALS // > 2 inactive vals, so no mask compression at all
};
template <typename ValueT, typename MaskT>
struct MaskCompress
{
// Comparison function for values
static inline bool eq(const ValueT& a, const ValueT& b) {
return math::isExactlyEqual(a, b);
}
MaskCompress(
const MaskT& valueMask, const MaskT& childMask,
const ValueT* srcBuf, const ValueT& background)
{
/// @todo Consider all values, not just inactive values?
inactiveVal[0] = inactiveVal[1] = background;
int numUniqueInactiveVals = 0;
for (typename MaskT::OffIterator it = valueMask.beginOff();
numUniqueInactiveVals < 3 && it; ++it)
{
const Index32 idx = it.pos();
// Skip inactive values that are actually child node pointers.
if (childMask.isOn(idx)) continue;
const ValueT& val = srcBuf[idx];
const bool unique = !(
(numUniqueInactiveVals > 0 && MaskCompress::eq(val, inactiveVal[0])) ||
(numUniqueInactiveVals > 1 && MaskCompress::eq(val, inactiveVal[1]))
);
if (unique) {
if (numUniqueInactiveVals < 2) inactiveVal[numUniqueInactiveVals] = val;
++numUniqueInactiveVals;
}
}
metadata = NO_MASK_OR_INACTIVE_VALS;
if (numUniqueInactiveVals == 1) {
if (!MaskCompress::eq(inactiveVal[0], background)) {
if (MaskCompress::eq(inactiveVal[0], math::negative(background))) {
metadata = NO_MASK_AND_MINUS_BG;
} else {
metadata = NO_MASK_AND_ONE_INACTIVE_VAL;
}
}
} else if (numUniqueInactiveVals == 2) {
metadata = NO_MASK_OR_INACTIVE_VALS;
if (!MaskCompress::eq(inactiveVal[0], background) && !MaskCompress::eq(inactiveVal[1], background)) {
// If neither inactive value is equal to the background, both values
// need to be saved, along with a mask that selects between them.
metadata = MASK_AND_TWO_INACTIVE_VALS;
} else if (MaskCompress::eq(inactiveVal[1], background)) {
if (MaskCompress::eq(inactiveVal[0], math::negative(background))) {
// If the second inactive value is equal to the background and
// the first is equal to -background, neither value needs to be saved,
// but save a mask that selects between -background and +background.
metadata = MASK_AND_NO_INACTIVE_VALS;
} else {
// If the second inactive value is equal to the background, only
// the first value needs to be saved, along with a mask that selects
// between it and the background.
metadata = MASK_AND_ONE_INACTIVE_VAL;
}
} else if (MaskCompress::eq(inactiveVal[0], background)) {
if (MaskCompress::eq(inactiveVal[1], math::negative(background))) {
// If the first inactive value is equal to the background and
// the second is equal to -background, neither value needs to be saved,
// but save a mask that selects between -background and +background.
metadata = MASK_AND_NO_INACTIVE_VALS;
std::swap(inactiveVal[0], inactiveVal[1]);
} else {
// If the first inactive value is equal to the background, swap it
// with the second value and save only that value, along with a mask
// that selects between it and the background.
std::swap(inactiveVal[0], inactiveVal[1]);
metadata = MASK_AND_ONE_INACTIVE_VAL;
}
}
} else if (numUniqueInactiveVals > 2) {
metadata = NO_MASK_AND_ALL_VALS;
}
}
int8_t metadata = NO_MASK_AND_ALL_VALS;
ValueT inactiveVal[2];
};
////////////////////////////////////////
/// @brief RealToHalf and its specializations define a mapping from
/// floating-point data types to analogous half float types.
template<typename T>
struct RealToHalf {
enum { isReal = false }; // unless otherwise specified, type T is not a floating-point type
using HalfT = T; // type T's half float analogue is T itself
static HalfT convert(const T& val) { return val; }
};
template<> struct RealToHalf<float> {
enum { isReal = true };
using HalfT = half;
static HalfT convert(float val) { return HalfT(val); }
};
template<> struct RealToHalf<double> {
enum { isReal = true };
using HalfT = half;
// A half can only be constructed from a float, so cast the value to a float first.
static HalfT convert(double val) { return HalfT(float(val)); }
};
template<> struct RealToHalf<Vec2s> {
enum { isReal = true };
using HalfT = Vec2H;
static HalfT convert(const Vec2s& val) { return HalfT(val); }
};
template<> struct RealToHalf<Vec2d> {
enum { isReal = true };
using HalfT = Vec2H;
// A half can only be constructed from a float, so cast the vector's elements to floats first.
static HalfT convert(const Vec2d& val) { return HalfT(Vec2s(val)); }
};
template<> struct RealToHalf<Vec3s> {
enum { isReal = true };
using HalfT = Vec3H;
static HalfT convert(const Vec3s& val) { return HalfT(val); }
};
template<> struct RealToHalf<Vec3d> {
enum { isReal = true };
using HalfT = Vec3H;
// A half can only be constructed from a float, so cast the vector's elements to floats first.
static HalfT convert(const Vec3d& val) { return HalfT(Vec3s(val)); }
};
/// Return the given value truncated to 16-bit float precision.
template<typename T>
inline T
truncateRealToHalf(const T& val)
{
return T(RealToHalf<T>::convert(val));
}
////////////////////////////////////////
OPENVDB_API size_t zipToStreamSize(const char* data, size_t numBytes);
OPENVDB_API void zipToStream(std::ostream&, const char* data, size_t numBytes);
OPENVDB_API void unzipFromStream(std::istream&, char* data, size_t numBytes);
OPENVDB_API size_t bloscToStreamSize(const char* data, size_t valSize, size_t numVals);
OPENVDB_API void bloscToStream(std::ostream&, const char* data, size_t valSize, size_t numVals);
OPENVDB_API void bloscFromStream(std::istream&, char* data, size_t numBytes);
/// @brief Read data from a stream.
/// @param is the input stream
/// @param data the contiguous array of data to read in
/// @param count the number of elements to read in
/// @param compression whether and how the data is compressed (either COMPRESS_NONE,
/// COMPRESS_ZIP, COMPRESS_ACTIVE_MASK or COMPRESS_BLOSC)
/// @param metadata optional pointer to a DelayedLoadMetadata object that stores
/// the size of the compressed buffer
/// @param metadataOffset offset into DelayedLoadMetadata, ignored if pointer is null
/// @throw IoError if @a compression is COMPRESS_BLOSC but OpenVDB was compiled
/// without Blosc support.
/// @details This default implementation is instantiated only for types
/// whose size can be determined by the sizeof() operator.
template<typename T>
inline void
readData(std::istream& is, T* data, Index count, uint32_t compression,
DelayedLoadMetadata* metadata = nullptr, size_t metadataOffset = size_t(0))
{
const bool seek = data == nullptr;
if (seek) {
assert(!getStreamMetadataPtr(is) || getStreamMetadataPtr(is)->seekable());
}
const bool hasCompression = compression & (COMPRESS_BLOSC | COMPRESS_ZIP);
if (metadata && seek && hasCompression) {
size_t compressedSize = metadata->getCompressedSize(metadataOffset);
is.seekg(compressedSize, std::ios_base::cur);
} else if (compression & COMPRESS_BLOSC) {
bloscFromStream(is, reinterpret_cast<char*>(data), sizeof(T) * count);
} else if (compression & COMPRESS_ZIP) {
unzipFromStream(is, reinterpret_cast<char*>(data), sizeof(T) * count);
} else if (seek) {
is.seekg(sizeof(T) * count, std::ios_base::cur);
} else {
is.read(reinterpret_cast<char*>(data), sizeof(T) * count);
}
}
/// Specialization for std::string input
template<>
inline void
readData<std::string>(std::istream& is, std::string* data, Index count, uint32_t /*compression*/,
DelayedLoadMetadata* /*metadata*/, size_t /*metadataOffset*/)
{
for (Index i = 0; i < count; ++i) {
size_t len = 0;
is >> len;
//data[i].resize(len);
//is.read(&(data[i][0]), len);
std::string buffer(len+1, ' ');
is.read(&buffer[0], len+1);
if (data != nullptr) data[i].assign(buffer, 0, len);
}
}
/// HalfReader wraps a static function, read(), that is analogous to readData(), above,
/// except that it is partially specialized for floating-point types in order to promote
/// 16-bit half float values to full float. A wrapper class is required because
/// only classes, not functions, can be partially specialized.
template<bool IsReal, typename T> struct HalfReader;
/// Partial specialization for non-floating-point types (no half to float promotion)
template<typename T>
struct HalfReader</*IsReal=*/false, T> {
static inline void read(std::istream& is, T* data, Index count, uint32_t compression,
DelayedLoadMetadata* metadata = nullptr, size_t metadataOffset = size_t(0)) {
readData(is, data, count, compression, metadata, metadataOffset);
}
};
/// Partial specialization for floating-point types
template<typename T>
struct HalfReader</*IsReal=*/true, T> {
using HalfT = typename RealToHalf<T>::HalfT;
static inline void read(std::istream& is, T* data, Index count, uint32_t compression,
DelayedLoadMetadata* metadata = nullptr, size_t metadataOffset = size_t(0)) {
if (count < 1) return;
if (data == nullptr) {
// seek mode - pass through null pointer
readData<HalfT>(is, nullptr, count, compression, metadata, metadataOffset);
} else {
std::vector<HalfT> halfData(count); // temp buffer into which to read half float values
readData<HalfT>(is, reinterpret_cast<HalfT*>(&halfData[0]), count, compression,
metadata, metadataOffset);
// Copy half float values from the temporary buffer to the full float output array.
std::copy(halfData.begin(), halfData.end(), data);
}
}
};
template<typename T>
inline size_t
writeDataSize(const T *data, Index count, uint32_t compression)
{
if (compression & COMPRESS_BLOSC) {
return bloscToStreamSize(reinterpret_cast<const char*>(data), sizeof(T), count);
} else if (compression & COMPRESS_ZIP) {
return zipToStreamSize(reinterpret_cast<const char*>(data), sizeof(T) * count);
} else {
return sizeof(T) * count;
}
}
/// Specialization for std::string output
template<>
inline size_t
writeDataSize<std::string>(const std::string* data, Index count,
uint32_t /*compression*/) ///< @todo add compression
{
size_t size(0);
for (Index i = 0; i < count; ++i) {
const size_t len = data[i].size();
size += sizeof(size_t) + (len+1);
}
return size;
}
/// Write data to a stream.
/// @param os the output stream
/// @param data the contiguous array of data to write
/// @param count the number of elements to write out
/// @param compression whether and how to compress the data (either COMPRESS_NONE,
/// COMPRESS_ZIP, COMPRESS_ACTIVE_MASK or COMPRESS_BLOSC)
/// @throw IoError if @a compression is COMPRESS_BLOSC but OpenVDB was compiled
/// without Blosc support.
/// @details This default implementation is instantiated only for types
/// whose size can be determined by the sizeof() operator.
template<typename T>
inline void
writeData(std::ostream &os, const T *data, Index count, uint32_t compression)
{
if (compression & COMPRESS_BLOSC) {
bloscToStream(os, reinterpret_cast<const char*>(data), sizeof(T), count);
} else if (compression & COMPRESS_ZIP) {
zipToStream(os, reinterpret_cast<const char*>(data), sizeof(T) * count);
} else {
os.write(reinterpret_cast<const char*>(data), sizeof(T) * count);
}
}
/// Specialization for std::string output
template<>
inline void
writeData<std::string>(std::ostream& os, const std::string* data, Index count,
uint32_t /*compression*/) ///< @todo add compression
{
for (Index i = 0; i < count; ++i) {
const size_t len = data[i].size();
os << len;
os.write(data[i].c_str(), len+1);
//os.write(&(data[i][0]), len );
}
}
/// HalfWriter wraps a static function, write(), that is analogous to writeData(), above,
/// except that it is partially specialized for floating-point types in order to quantize
/// floating-point values to 16-bit half float. A wrapper class is required because
/// only classes, not functions, can be partially specialized.
template<bool IsReal, typename T> struct HalfWriter;
/// Partial specialization for non-floating-point types (no float to half quantization)
template<typename T>
struct HalfWriter</*IsReal=*/false, T> {
static inline size_t writeSize(const T* data, Index count, uint32_t compression) {
return writeDataSize(data, count, compression);
}
static inline void write(std::ostream& os, const T* data, Index count, uint32_t compression) {
writeData(os, data, count, compression);
}
};
/// Partial specialization for floating-point types
template<typename T>
struct HalfWriter</*IsReal=*/true, T> {
using HalfT = typename RealToHalf<T>::HalfT;
static inline size_t writeSize(const T* data, Index count, uint32_t compression) {
if (count < 1) return size_t(0);
// Convert full float values to half float, then output the half float array.
std::vector<HalfT> halfData(count);
for (Index i = 0; i < count; ++i) halfData[i] = RealToHalf<T>::convert(data[i]);
return writeDataSize<HalfT>(reinterpret_cast<const HalfT*>(&halfData[0]), count, compression);
}
static inline void write(std::ostream& os, const T* data, Index count, uint32_t compression) {
if (count < 1) return;
// Convert full float values to half float, then output the half float array.
std::vector<HalfT> halfData(count);
for (Index i = 0; i < count; ++i) halfData[i] = RealToHalf<T>::convert(data[i]);
writeData<HalfT>(os, reinterpret_cast<const HalfT*>(&halfData[0]), count, compression);
}
};
#ifdef _MSC_VER
/// Specialization to avoid double to float warnings in MSVC
template<>
struct HalfWriter</*IsReal=*/true, double> {
using HalfT = RealToHalf<double>::HalfT;
static inline size_t writeSize(const double* data, Index count, uint32_t compression)
{
if (count < 1) return size_t(0);
// Convert full float values to half float, then output the half float array.
std::vector<HalfT> halfData(count);
for (Index i = 0; i < count; ++i) halfData[i] = RealToHalf<double>::convert(data[i]);
return writeDataSize<HalfT>(reinterpret_cast<const HalfT*>(&halfData[0]), count, compression);
}
static inline void write(std::ostream& os, const double* data, Index count,
uint32_t compression)
{
if (count < 1) return;
// Convert full float values to half float, then output the half float array.
std::vector<HalfT> halfData(count);
for (Index i = 0; i < count; ++i) halfData[i] = RealToHalf<double>::convert(data[i]);
writeData<HalfT>(os, reinterpret_cast<const HalfT*>(&halfData[0]), count, compression);
}
};
#endif // _MSC_VER
////////////////////////////////////////
/// Populate the given buffer with @a destCount values of type @c ValueT
/// read from the given stream, taking into account that the stream might
/// have been compressed via one of several supported schemes.
/// [Mainly for internal use]
/// @param is a stream from which to read data (possibly compressed,
/// depending on the stream's compression settings)
/// @param destBuf a buffer into which to read values of type @c ValueT
/// @param destCount the number of values to be stored in the buffer
/// @param valueMask a bitmask (typically, a node's value mask) indicating
/// which positions in the buffer correspond to active values
/// @param fromHalf if true, read 16-bit half floats from the input stream
/// and convert them to full floats
template<typename ValueT, typename MaskT>
inline void
readCompressedValues(std::istream& is, ValueT* destBuf, Index destCount,
const MaskT& valueMask, bool fromHalf)
{
// Get the stream's compression settings.
auto meta = getStreamMetadataPtr(is);
const uint32_t compression = getDataCompression(is);
const bool maskCompressed = compression & COMPRESS_ACTIVE_MASK;
const bool seek = (destBuf == nullptr);
assert(!seek || (!meta || meta->seekable()));
// Get delayed load metadata if it exists
DelayedLoadMetadata::Ptr delayLoadMeta;
uint64_t leafIndex(0);
if (seek && meta && meta->delayedLoadMeta()) {
delayLoadMeta =
meta->gridMetadata().getMetadata<DelayedLoadMetadata>("file_delayed_load");
leafIndex = meta->leaf();
}
int8_t metadata = NO_MASK_AND_ALL_VALS;
if (getFormatVersion(is) >= OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION) {
// Read the flag that specifies what, if any, additional metadata
// (selection mask and/or inactive value(s)) is saved.
if (seek && !maskCompressed) {
is.seekg(/*bytes=*/1, std::ios_base::cur);
} else if (seek && delayLoadMeta) {
metadata = delayLoadMeta->getMask(leafIndex);
is.seekg(/*bytes=*/1, std::ios_base::cur);
} else {
is.read(reinterpret_cast<char*>(&metadata), /*bytes=*/1);
}
}
ValueT background = zeroVal<ValueT>();
if (const void* bgPtr = getGridBackgroundValuePtr(is)) {
background = *static_cast<const ValueT*>(bgPtr);
}
ValueT inactiveVal1 = background;
ValueT inactiveVal0 =
((metadata == NO_MASK_OR_INACTIVE_VALS) ? background : math::negative(background));
if (metadata == NO_MASK_AND_ONE_INACTIVE_VAL ||
metadata == MASK_AND_ONE_INACTIVE_VAL ||
metadata == MASK_AND_TWO_INACTIVE_VALS)
{
// Read one of at most two distinct inactive values.
if (seek) {
is.seekg(/*bytes=*/sizeof(ValueT), std::ios_base::cur);
} else {
is.read(reinterpret_cast<char*>(&inactiveVal0), /*bytes=*/sizeof(ValueT));
}
if (metadata == MASK_AND_TWO_INACTIVE_VALS) {
// Read the second of two distinct inactive values.
if (seek) {
is.seekg(/*bytes=*/sizeof(ValueT), std::ios_base::cur);
} else {
is.read(reinterpret_cast<char*>(&inactiveVal1), /*bytes=*/sizeof(ValueT));
}
}
}
MaskT selectionMask;
if (metadata == MASK_AND_NO_INACTIVE_VALS ||
metadata == MASK_AND_ONE_INACTIVE_VAL ||
metadata == MASK_AND_TWO_INACTIVE_VALS)
{
// For use in mask compression (only), read the bitmask that selects
// between two distinct inactive values.
if (seek) {
is.seekg(/*bytes=*/selectionMask.memUsage(), std::ios_base::cur);
} else {
selectionMask.load(is);
}
}
ValueT* tempBuf = destBuf;
std::unique_ptr<ValueT[]> scopedTempBuf;
Index tempCount = destCount;
if (maskCompressed && metadata != NO_MASK_AND_ALL_VALS
&& getFormatVersion(is) >= OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION)
{
tempCount = valueMask.countOn();
if (!seek && tempCount != destCount) {
// If this node has inactive voxels, allocate a temporary buffer
// into which to read just the active values.
scopedTempBuf.reset(new ValueT[tempCount]);
tempBuf = scopedTempBuf.get();
}
}
// Read in the buffer.
if (fromHalf) {
HalfReader<RealToHalf<ValueT>::isReal, ValueT>::read(
is, (seek ? nullptr : tempBuf), tempCount, compression, delayLoadMeta.get(), leafIndex);
} else {
readData<ValueT>(
is, (seek ? nullptr : tempBuf), tempCount, compression, delayLoadMeta.get(), leafIndex);
}
// If mask compression is enabled and the number of active values read into
// the temp buffer is smaller than the size of the destination buffer,
// then there are missing (inactive) values.
if (!seek && maskCompressed && tempCount != destCount) {
// Restore inactive values, using the background value and, if available,
// the inside/outside mask. (For fog volumes, the destination buffer is assumed
// to be initialized to background value zero, so inactive values can be ignored.)
for (Index destIdx = 0, tempIdx = 0; destIdx < MaskT::SIZE; ++destIdx) {
if (valueMask.isOn(destIdx)) {
// Copy a saved active value into this node's buffer.
destBuf[destIdx] = tempBuf[tempIdx];
++tempIdx;
} else {
// Reconstruct an unsaved inactive value and copy it into this node's buffer.
destBuf[destIdx] = (selectionMask.isOn(destIdx) ? inactiveVal1 : inactiveVal0);
}
}
}
}
template<typename ValueT, typename MaskT>
inline size_t
writeCompressedValuesSize(ValueT* srcBuf, Index srcCount,
const MaskT& valueMask, uint8_t maskMetadata, bool toHalf, uint32_t compress)
{
using NonConstValueT = typename std::remove_const<ValueT>::type;
const bool maskCompress = compress & COMPRESS_ACTIVE_MASK;
Index tempCount = srcCount;
ValueT* tempBuf = srcBuf;
std::unique_ptr<NonConstValueT[]> scopedTempBuf;
if (maskCompress && maskMetadata != NO_MASK_AND_ALL_VALS) {
tempCount = 0;
Index64 onVoxels = valueMask.countOn();
if (onVoxels > Index64(0)) {
// Create a new array to hold just the active values.
scopedTempBuf.reset(new NonConstValueT[onVoxels]);
NonConstValueT* localTempBuf = scopedTempBuf.get();
// Copy active values to a new, contiguous array.
for (typename MaskT::OnIterator it = valueMask.beginOn(); it; ++it, ++tempCount) {
localTempBuf[tempCount] = srcBuf[it.pos()];
}
tempBuf = scopedTempBuf.get();
}
}
// Return the buffer size.
if (toHalf) {
return HalfWriter<RealToHalf<NonConstValueT>::isReal, NonConstValueT>::writeSize(
tempBuf, tempCount, compress);
} else {
return writeDataSize<NonConstValueT>(tempBuf, tempCount, compress);
}
}
/// Write @a srcCount values of type @c ValueT to the given stream, optionally
/// after compressing the values via one of several supported schemes.
/// [Mainly for internal use]
/// @param os a stream to which to write data (possibly compressed, depending
/// on the stream's compression settings)
/// @param srcBuf a buffer containing values of type @c ValueT to be written
/// @param srcCount the number of values stored in the buffer
/// @param valueMask a bitmask (typically, a node's value mask) indicating
/// which positions in the buffer correspond to active values
/// @param childMask a bitmask (typically, a node's child mask) indicating
/// which positions in the buffer correspond to child node pointers
/// @param toHalf if true, convert floating-point values to 16-bit half floats
template<typename ValueT, typename MaskT>
inline void
writeCompressedValues(std::ostream& os, ValueT* srcBuf, Index srcCount,
const MaskT& valueMask, const MaskT& childMask, bool toHalf)
{
// Get the stream's compression settings.
const uint32_t compress = getDataCompression(os);
const bool maskCompress = compress & COMPRESS_ACTIVE_MASK;
Index tempCount = srcCount;
ValueT* tempBuf = srcBuf;
std::unique_ptr<ValueT[]> scopedTempBuf;
int8_t metadata = NO_MASK_AND_ALL_VALS;
if (!maskCompress) {
os.write(reinterpret_cast<const char*>(&metadata), /*bytes=*/1);
} else {
// A valid level set's inactive values are either +background (outside)
// or -background (inside), and a fog volume's inactive values are all zero.
// Rather than write out all of these values, we can store just the active values
// (given that the value mask specifies their positions) and, if necessary,
// an inside/outside bitmask.
const ValueT zero = zeroVal<ValueT>();
ValueT background = zero;
if (const void* bgPtr = getGridBackgroundValuePtr(os)) {
background = *static_cast<const ValueT*>(bgPtr);
}
MaskCompress<ValueT, MaskT> maskCompressData(valueMask, childMask, srcBuf, background);
metadata = maskCompressData.metadata;
os.write(reinterpret_cast<const char*>(&metadata), /*bytes=*/1);
if (metadata == NO_MASK_AND_ONE_INACTIVE_VAL ||
metadata == MASK_AND_ONE_INACTIVE_VAL ||
metadata == MASK_AND_TWO_INACTIVE_VALS)
{
if (!toHalf) {
// Write one of at most two distinct inactive values.
os.write(reinterpret_cast<const char*>(&maskCompressData.inactiveVal[0]), sizeof(ValueT));
if (metadata == MASK_AND_TWO_INACTIVE_VALS) {
// Write the second of two distinct inactive values.
os.write(reinterpret_cast<const char*>(&maskCompressData.inactiveVal[1]), sizeof(ValueT));
}
} else {
// Write one of at most two distinct inactive values.
ValueT truncatedVal = static_cast<ValueT>(truncateRealToHalf(maskCompressData.inactiveVal[0]));
os.write(reinterpret_cast<const char*>(&truncatedVal), sizeof(ValueT));
if (metadata == MASK_AND_TWO_INACTIVE_VALS) {
// Write the second of two distinct inactive values.
truncatedVal = truncateRealToHalf(maskCompressData.inactiveVal[1]);
os.write(reinterpret_cast<const char*>(&truncatedVal), sizeof(ValueT));
}
}
}
if (metadata == NO_MASK_AND_ALL_VALS) {
// If there are more than two unique inactive values, the entire input buffer
// needs to be saved (both active and inactive values).
/// @todo Save the selection mask as long as most of the inactive values
/// are one of two values?
} else {
// Create a new array to hold just the active values.
scopedTempBuf.reset(new ValueT[srcCount]);
tempBuf = scopedTempBuf.get();
if (metadata == NO_MASK_OR_INACTIVE_VALS ||
metadata == NO_MASK_AND_MINUS_BG ||
metadata == NO_MASK_AND_ONE_INACTIVE_VAL)
{
// Copy active values to the contiguous array.
tempCount = 0;
for (typename MaskT::OnIterator it = valueMask.beginOn(); it; ++it, ++tempCount) {
tempBuf[tempCount] = srcBuf[it.pos()];
}
} else {
// Copy active values to a new, contiguous array and populate a bitmask
// that selects between two distinct inactive values.
MaskT selectionMask;
tempCount = 0;
for (Index srcIdx = 0; srcIdx < srcCount; ++srcIdx) {
if (valueMask.isOn(srcIdx)) { // active value
tempBuf[tempCount] = srcBuf[srcIdx];
++tempCount;
} else { // inactive value
if (MaskCompress<ValueT, MaskT>::eq(srcBuf[srcIdx], maskCompressData.inactiveVal[1])) {
selectionMask.setOn(srcIdx); // inactive value 1
} // else inactive value 0
}
}
assert(tempCount == valueMask.countOn());
// Write out the mask that selects between two inactive values.
selectionMask.save(os);
}
}
}
// Write out the buffer.
if (toHalf) {
HalfWriter<RealToHalf<ValueT>::isReal, ValueT>::write(os, tempBuf, tempCount, compress);
} else {
writeData(os, tempBuf, tempCount, compress);
}
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_COMPRESSION_HAS_BEEN_INCLUDED | 32,146 | C | 41.578808 | 113 | 0.628352 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/TempFile.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file TempFile.cc
#include "TempFile.h"
#include <openvdb/Exceptions.h>
#ifndef _MSC_VER
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/device/file_descriptor.hpp>
#include <cstdlib> // for std::getenv(), mkstemp()
#include <sys/types.h> // for mode_t
#include <sys/stat.h> // for mkdir(), umask()
#include <unistd.h> // for access()
#else
#include <fstream> // for std::filebuf
#endif
#include <cstdio> // for std::tmpnam(), L_tmpnam, P_tmpdir
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
struct TempFile::TempFileImpl
{
const std::string& filename() const { return mPath; }
bool is_open() const { return mBuffer.is_open(); }
/// @internal boost::filesystem::unique_path(), etc. might be useful here,
/// but as of 9/2014, Houdini ships without the Boost.Filesystem library,
/// which makes it much less convenient to use that library.
#ifndef _MSC_VER
TempFileImpl(std::ostream& os): mFileDescr(-1) { this->init(os); }
void init(std::ostream& os)
{
std::string fn = this->getTempDir() + "/openvdb_temp_XXXXXX";
std::vector<char> fnbuf(fn.begin(), fn.end());
fnbuf.push_back(char(0));
//const mode_t savedMode = ::umask(~(S_IRUSR | S_IWUSR));
mFileDescr = ::mkstemp(&fnbuf[0]);
//::umask(savedMode);
if (mFileDescr < 0) {
OPENVDB_THROW(IoError, "failed to generate temporary file");
}
mPath.assign(&fnbuf[0]);
mDevice = DeviceType(mFileDescr, boost::iostreams::never_close_handle);
mBuffer.open(mDevice);
os.rdbuf(&mBuffer);
if (!os.good()) {
OPENVDB_THROW(IoError, "failed to open temporary file " + mPath);
}
}
void close() { mBuffer.close(); if (mFileDescr >= 0) ::close(mFileDescr); }
static std::string getTempDir()
{
if (const char* dir = std::getenv("OPENVDB_TEMP_DIR")) {
if (0 != ::access(dir, F_OK)) {
::mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
if (0 != ::access(dir, F_OK)) {
OPENVDB_THROW(IoError,
"failed to create OPENVDB_TEMP_DIR (" + std::string(dir) + ")");
}
}
return dir;
}
if (const char* dir = std::getenv("TMPDIR")) return dir;
return P_tmpdir;
}
using DeviceType = boost::iostreams::file_descriptor_sink;
using BufferType = boost::iostreams::stream_buffer<boost::iostreams::file_descriptor_sink>;
std::string mPath;
DeviceType mDevice;
BufferType mBuffer;
int mFileDescr;
#else // _MSC_VER
// Use only standard library routines; no POSIX.
TempFileImpl(std::ostream& os) { this->init(os); }
void init(std::ostream& os)
{
char fnbuf[L_tmpnam];
const char* filename = std::tmpnam(fnbuf);
if (!filename) {
OPENVDB_THROW(IoError, "failed to generate name for temporary file");
}
/// @todo This is not safe, since another process could open a file
/// with this name before we do. Unfortunately, there is no safe,
/// portable way to create a temporary file.
mPath = filename;
const std::ios_base::openmode mode = (std::ios_base::out | std::ios_base::binary);
os.rdbuf(mBuffer.open(mPath.c_str(), mode));
if (!os.good()) {
OPENVDB_THROW(IoError, "failed to open temporary file " + mPath);
}
}
void close() { mBuffer.close(); }
std::string mPath;
std::filebuf mBuffer;
#endif // _MSC_VER
private:
TempFileImpl(const TempFileImpl&); // disable copying
TempFileImpl& operator=(const TempFileImpl&); // disable assignment
};
TempFile::TempFile(): std::ostream(nullptr), mImpl(new TempFileImpl(*this)) {}
TempFile::~TempFile() { this->close(); }
const std::string& TempFile::filename() const { return mImpl->filename(); }
bool TempFile::is_open() const { return mImpl->is_open(); }
void TempFile::close() { mImpl->close(); }
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 4,308 | C++ | 30.452555 | 95 | 0.611421 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Queue.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file Queue.h
/// @author Peter Cucka
#ifndef OPENVDB_IO_QUEUE_HAS_BEEN_INCLUDED
#define OPENVDB_IO_QUEUE_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <algorithm> // for std::copy
#include <functional>
#include <iterator> // for std::back_inserter
#include <memory>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
class Archive;
/// @brief Queue for asynchronous output of grids to files or streams
///
/// @warning The queue holds shared pointers to grids. It is not safe
/// to modify a grid that has been placed in the queue. Instead,
/// make a deep copy of the grid (Grid::deepCopy()).
///
/// @par Example:
/// @code
/// #include <openvdb/openvdb.h>
/// #include <openvdb/io/Queue.h>
/// #include <tbb/concurrent_hash_map.h>
/// #include <functional>
///
/// using openvdb::io::Queue;
///
/// struct MyNotifier
/// {
/// // Use a concurrent container, because queue callback functions
/// // must be thread-safe.
/// using FilenameMap = tbb::concurrent_hash_map<Queue::Id, std::string>;
/// FilenameMap filenames;
///
/// // Callback function that prints the status of a completed task.
/// void callback(Queue::Id id, Queue::Status status)
/// {
/// const bool ok = (status == Queue::SUCCEEDED);
/// FilenameMap::accessor acc;
/// if (filenames.find(acc, id)) {
/// std::cout << (ok ? "wrote " : "failed to write ")
/// << acc->second << std::endl;
/// filenames.erase(acc);
/// }
/// }
/// };
///
/// int main()
/// {
/// // Construct an object to receive notifications from the queue.
/// // The object's lifetime must exceed the queue's.
/// MyNotifier notifier;
///
/// Queue queue;
///
/// // Register the callback() method of the MyNotifier object
/// // to receive notifications of completed tasks.
/// queue.addNotifier(std::bind(&MyNotifier::callback, ¬ifier,
/// std::placeholders::_1, std::placeholders::_2));
///
/// // Queue grids for output (e.g., for each step of a simulation).
/// for (int step = 1; step <= 10; ++step) {
/// openvdb::FloatGrid::Ptr grid = ...;
///
/// std::ostringstream os;
/// os << "mygrid." << step << ".vdb";
/// const std::string filename = os.str();
///
/// Queue::Id id = queue.writeGrid(grid, openvdb::io::File(filename));
///
/// // Associate the filename with the ID of the queued task.
/// MyNotifier::FilenameMap::accessor acc;
/// notifier.filenames.insert(acc, id);
/// acc->second = filename;
/// }
/// }
/// @endcode
/// Output:
/// @code
/// wrote mygrid.1.vdb
/// wrote mygrid.2.vdb
/// wrote mygrid.4.vdb
/// wrote mygrid.3.vdb
/// ...
/// wrote mygrid.10.vdb
/// @endcode
/// Note that tasks do not necessarily complete in the order in which they were queued.
class OPENVDB_API Queue
{
public:
/// Default maximum queue length (see setCapacity())
static const Index32 DEFAULT_CAPACITY = 100;
/// @brief Default maximum time in seconds to wait to queue a task
/// when the queue is full (see setTimeout())
static const Index32 DEFAULT_TIMEOUT = 120; // seconds
/// ID number of a queued task or of a registered notification callback
using Id = Index32;
/// Status of a queued task
enum Status { UNKNOWN, PENDING, SUCCEEDED, FAILED };
/// Construct a queue with the given capacity.
explicit Queue(Index32 capacity = DEFAULT_CAPACITY);
/// Block until all queued tasks complete (successfully or unsuccessfully).
~Queue();
/// @brief Return @c true if the queue is empty.
bool empty() const;
/// @brief Return the number of tasks currently in the queue.
Index32 size() const;
/// @brief Return the maximum number of tasks allowed in the queue.
/// @details Once the queue has reached its maximum size, adding
/// a new task will block until an existing task has executed.
Index32 capacity() const;
/// Set the maximum number of tasks allowed in the queue.
void setCapacity(Index32);
/// Return the maximum number of seconds to wait to queue a task when the queue is full.
Index32 timeout() const;
/// Set the maximum number of seconds to wait to queue a task when the queue is full.
void setTimeout(Index32 seconds = DEFAULT_TIMEOUT);
/// @brief Return the status of the task with the given ID.
/// @note Querying the status of a task that has already completed
/// (whether successfully or not) removes the task from the status registry.
/// Subsequent queries of its status will return UNKNOWN.
Status status(Id) const;
using Notifier = std::function<void (Id, Status)>;
/// @brief Register a function that will be called with a task's ID
/// and status when that task completes, whether successfully or not.
/// @return an ID that can be passed to removeNotifier() to deregister the function
/// @details When multiple notifiers are registered, they are called
/// in the order in which they were registered.
/// @warning Notifiers are called from worker threads, so they must be thread-safe
/// and their lifetimes must exceed that of the queue. They must also not call,
/// directly or indirectly, addNotifier(), removeNotifier() or clearNotifiers(),
/// as that can result in a deadlock.
Id addNotifier(Notifier);
/// Deregister the notifier with the given ID.
void removeNotifier(Id);
/// Deregister all notifiers.
void clearNotifiers();
/// @brief Queue a single grid for output to a file or stream.
/// @param grid the grid to be serialized
/// @param archive the io::File or io::Stream to which to output the grid
/// @param fileMetadata optional file-level metadata
/// @return an ID with which the status of the queued task can be queried
/// @throw RuntimeError if the task cannot be queued within the time limit
/// (see setTimeout()) because the queue is full
/// @par Example:
/// @code
/// openvdb::FloatGrid::Ptr grid = ...;
///
/// openvdb::io::Queue queue;
///
/// // Write the grid to the file mygrid.vdb.
/// queue.writeGrid(grid, openvdb::io::File("mygrid.vdb"));
///
/// // Stream the grid to a binary string.
/// std::ostringstream ostr(std::ios_base::binary);
/// queue.writeGrid(grid, openvdb::io::Stream(ostr));
/// @endcode
Id writeGrid(GridBase::ConstPtr grid, const Archive& archive,
const MetaMap& fileMetadata = MetaMap());
/// @brief Queue a container of grids for output to a file.
/// @param grids any iterable container of grid pointers
/// (e.g., a GridPtrVec or GridPtrSet)
/// @param archive the io::File or io::Stream to which to output the grids
/// @param fileMetadata optional file-level metadata
/// @return an ID with which the status of the queued task can be queried
/// @throw RuntimeError if the task cannot be queued within the time limit
/// (see setTimeout()) because the queue is full
/// @par Example:
/// @code
/// openvdb::FloatGrid::Ptr floatGrid = ...;
/// openvdb::BoolGrid::Ptr boolGrid = ...;
/// openvdb::GridPtrVec grids;
/// grids.push_back(floatGrid);
/// grids.push_back(boolGrid);
///
/// openvdb::io::Queue queue;
///
/// // Write the grids to the file mygrid.vdb.
/// queue.write(grids, openvdb::io::File("mygrid.vdb"));
///
/// // Stream the grids to a (binary) string.
/// std::ostringstream ostr(std::ios_base::binary);
/// queue.write(grids, openvdb::io::Stream(ostr));
/// @endcode
template<typename GridPtrContainer>
Id write(const GridPtrContainer& grids, const Archive& archive,
const MetaMap& fileMetadata = MetaMap());
private:
// Disallow copying of instances of this class.
Queue(const Queue&);
Queue& operator=(const Queue&);
Id writeGridVec(const GridCPtrVec&, const Archive&, const MetaMap&);
struct Impl;
std::unique_ptr<Impl> mImpl;
}; // class Queue
template<typename GridPtrContainer>
inline Queue::Id
Queue::write(const GridPtrContainer& container,
const Archive& archive, const MetaMap& metadata)
{
GridCPtrVec grids;
std::copy(container.begin(), container.end(), std::back_inserter(grids));
return this->writeGridVec(grids, archive, metadata);
}
// Specialization for vectors of const Grid pointers; no copying necessary
template<>
inline Queue::Id
Queue::write<GridCPtrVec>(const GridCPtrVec& grids,
const Archive& archive, const MetaMap& metadata)
{
return this->writeGridVec(grids, archive, metadata);
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_QUEUE_HAS_BEEN_INCLUDED
| 8,964 | C | 35.149193 | 92 | 0.651606 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Archive.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_IO_ARCHIVE_HAS_BEEN_INCLUDED
#define OPENVDB_IO_ARCHIVE_HAS_BEEN_INCLUDED
#include <openvdb/version.h>
#include "Compression.h" // for COMPRESS_ZIP, etc.
#include <openvdb/Grid.h>
#include <openvdb/MetaMap.h>
#include <openvdb/Platform.h>
#include <openvdb/version.h> // for VersionId
#include <boost/uuid/uuid.hpp>
#include <cstdint>
#include <iosfwd>
#include <map>
#include <memory>
#include <string>
class TestFile;
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
class GridDescriptor;
/// Grid serializer/unserializer
class OPENVDB_API Archive
{
public:
using Ptr = SharedPtr<Archive>;
using ConstPtr = SharedPtr<const Archive>;
static const uint32_t DEFAULT_COMPRESSION_FLAGS;
Archive();
Archive(const Archive&) = default;
Archive& operator=(const Archive&) = default;
virtual ~Archive();
/// @brief Return a copy of this archive.
virtual Ptr copy() const;
/// @brief Return the UUID that was most recently written (or read,
/// if no UUID has been written yet).
std::string getUniqueTag() const;
/// @brief Return @c true if the given UUID matches this archive's UUID.
bool isIdentical(const std::string& uuidStr) const;
/// @brief Return the file format version number of the input stream.
uint32_t fileVersion() const { return mFileVersion; }
/// @brief Return the (major, minor) version number of the library that was
/// used to write the input stream.
VersionId libraryVersion() const { return mLibraryVersion; }
/// @brief Return a string of the form "<major>.<minor>/<format>", giving the
/// library and file format version numbers associated with the input stream.
std::string version() const;
/// @brief Return @c true if trees shared by multiple grids are written out
/// only once, @c false if they are written out once per grid.
bool isInstancingEnabled() const { return mEnableInstancing; }
/// @brief Specify whether trees shared by multiple grids should be
/// written out only once (@c true) or once per grid (@c false).
/// @note Instancing is enabled by default.
void setInstancingEnabled(bool b) { mEnableInstancing = b; }
/// Return @c true if the OpenVDB library includes support for the Blosc compressor.
static bool hasBloscCompression();
/// Return @c true if the OpenVDB library includes support for the ZLib compressor.
static bool hasZLibCompression();
/// Return a bit mask specifying compression options for the data stream.
uint32_t compression() const { return mCompression; }
/// @brief Specify whether and how the data stream should be compressed.
/// @param c bitwise OR (e.g., COMPRESS_ZIP | COMPRESS_ACTIVE_MASK) of
/// compression option flags (see Compression.h for the available flags)
/// @note Not all combinations of compression options are supported.
void setCompression(uint32_t c) { mCompression = c; }
/// @brief Return @c true if grid statistics (active voxel count and
/// bounding box, etc.) are computed and written as grid metadata.
bool isGridStatsMetadataEnabled() const { return mEnableGridStats; }
/// @brief Specify whether grid statistics (active voxel count and
/// bounding box, etc.) should be computed and written as grid metadata.
void setGridStatsMetadataEnabled(bool b) { mEnableGridStats = b; }
/// @brief Write the grids in the given container to this archive's output stream.
virtual void write(const GridCPtrVec&, const MetaMap& = MetaMap()) const {}
/// @brief Return @c true if delayed loading is enabled.
/// @details If enabled, delayed loading can be disabled for individual files,
/// but not vice-versa.
/// @note Define the environment variable @c OPENVDB_DISABLE_DELAYED_LOAD
/// to disable delayed loading unconditionally.
static bool isDelayedLoadingEnabled();
protected:
/// @brief Return @c true if the input stream contains grid offsets
/// that allow for random access or partial reading.
bool inputHasGridOffsets() const { return mInputHasGridOffsets; }
void setInputHasGridOffsets(bool b) { mInputHasGridOffsets = b; }
/// @brief Tag the given input stream with the input file format version number.
///
/// The tag can be retrieved with getFormatVersion().
/// @sa getFormatVersion()
void setFormatVersion(std::istream&);
/// @brief Tag the given input stream with the version number of
/// the library with which the input stream was created.
///
/// The tag can be retrieved with getLibraryVersion().
/// @sa getLibraryVersion()
void setLibraryVersion(std::istream&);
/// @brief Tag the given input stream with flags indicating whether
/// the input stream contains compressed data and how it is compressed.
void setDataCompression(std::istream&);
/// @brief Tag an output stream with flags specifying only those
/// compression options that are applicable to the given grid.
void setGridCompression(std::ostream&, const GridBase&) const;
/// @brief Read in the compression flags for a grid and
/// tag the given input stream with those flags.
static void readGridCompression(std::istream&);
/// Read in and return the number of grids on the input stream.
static int32_t readGridCount(std::istream&);
/// Populate the given grid from the input stream.
static void readGrid(GridBase::Ptr, const GridDescriptor&, std::istream&);
/// @brief Populate the given grid from the input stream, but only where it
/// intersects the given world-space bounding box.
static void readGrid(GridBase::Ptr, const GridDescriptor&, std::istream&, const BBoxd&);
/// @brief Populate the given grid from the input stream, but only where it
/// intersects the given index-space bounding box.
static void readGrid(GridBase::Ptr, const GridDescriptor&, std::istream&, const CoordBBox&);
using NamedGridMap = std::map<Name /*uniqueName*/, GridBase::Ptr>;
/// @brief If the grid represented by the given grid descriptor
/// is an instance, connect it with its instance parent.
void connectInstance(const GridDescriptor&, const NamedGridMap&) const;
/// Write the given grid descriptor and grid to an output stream
/// and update the GridDescriptor offsets.
/// @param seekable if true, the output stream supports seek operations
void writeGrid(GridDescriptor&, GridBase::ConstPtr, std::ostream&, bool seekable) const;
/// Write the given grid descriptor and grid metadata to an output stream
/// and update the GridDescriptor offsets, but don't write the grid's tree,
/// since it is shared with another grid.
/// @param seekable if true, the output stream supports seek operations
void writeGridInstance(GridDescriptor&, GridBase::ConstPtr,
std::ostream&, bool seekable) const;
/// @brief Read the magic number, version numbers, UUID, etc. from the given input stream.
/// @return @c true if the input UUID differs from the previously-read UUID.
bool readHeader(std::istream&);
/// @brief Write the magic number, version numbers, UUID, etc. to the given output stream.
/// @param seekable if true, the output stream supports seek operations
/// @todo This method should not be const since it actually redefines the UUID!
void writeHeader(std::ostream&, bool seekable) const;
//@{
/// Write the given grids to an output stream.
void write(std::ostream&, const GridPtrVec&, bool seekable, const MetaMap& = MetaMap()) const;
void write(std::ostream&, const GridCPtrVec&, bool seekable, const MetaMap& = MetaMap()) const;
//@}
private:
friend class ::TestFile;
/// The version of the file that was read
uint32_t mFileVersion;
/// The version of the library that was used to create the file that was read
VersionId mLibraryVersion;
/// 16-byte (128-bit) UUID
mutable boost::uuids::uuid mUuid;// needs to be mutable since writeHeader is const!
/// Flag indicating whether the input stream contains grid offsets
/// and therefore supports partial reading
bool mInputHasGridOffsets;
/// Flag indicating whether a tree shared by multiple grids should be
/// written out only once (true) or once per grid (false)
bool mEnableInstancing;
/// Flags indicating whether and how the data stream is compressed
uint32_t mCompression;
/// Flag indicating whether grid statistics metadata should be written
bool mEnableGridStats;
}; // class Archive
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_ARCHIVE_HAS_BEEN_INCLUDED
| 8,848 | C | 43.024875 | 99 | 0.710669 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Stream.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_IO_STREAM_HAS_BEEN_INCLUDED
#define OPENVDB_IO_STREAM_HAS_BEEN_INCLUDED
#include "Archive.h"
#include <iosfwd>
#include <memory>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
class GridDescriptor;
/// Grid archive associated with arbitrary input and output streams (not necessarily files)
class OPENVDB_API Stream: public Archive
{
public:
/// @brief Read grids from an input stream.
/// @details If @a delayLoad is true, map the contents of the input stream
/// into memory and enable delayed loading of grids.
/// @note Define the environment variable @c OPENVDB_DISABLE_DELAYED_LOAD
/// to disable delayed loading unconditionally.
explicit Stream(std::istream&, bool delayLoad = true);
/// Construct an archive for stream output.
Stream();
/// Construct an archive for output to the given stream.
explicit Stream(std::ostream&);
Stream(const Stream&);
Stream& operator=(const Stream&);
~Stream() override;
/// @brief Return a copy of this archive.
Archive::Ptr copy() const override;
/// Return the file-level metadata in a newly created MetaMap.
MetaMap::Ptr getMetadata() const;
/// Return pointers to the grids that were read from the input stream.
GridPtrVecPtr getGrids();
/// @brief Write the grids in the given container to this archive's output stream.
/// @throw ValueError if this archive was constructed without specifying an output stream.
void write(const GridCPtrVec&, const MetaMap& = MetaMap()) const override;
/// @brief Write the grids in the given container to this archive's output stream.
/// @throw ValueError if this archive was constructed without specifying an output stream.
template<typename GridPtrContainerT>
void write(const GridPtrContainerT&, const MetaMap& = MetaMap()) const;
private:
/// Create a new grid of the type specified by the given descriptor,
/// then populate the grid from the given input stream.
/// @return the newly created grid.
GridBase::Ptr readGrid(const GridDescriptor&, std::istream&) const;
void writeGrids(std::ostream&, const GridCPtrVec&, const MetaMap&) const;
struct Impl;
std::unique_ptr<Impl> mImpl;
};
////////////////////////////////////////
template<typename GridPtrContainerT>
inline void
Stream::write(const GridPtrContainerT& container, const MetaMap& metadata) const
{
GridCPtrVec grids;
std::copy(container.begin(), container.end(), std::back_inserter(grids));
this->write(grids, metadata);
}
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_IO_STREAM_HAS_BEEN_INCLUDED
| 2,813 | C | 30.266666 | 94 | 0.709563 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/io/Compression.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "Compression.h"
#include <openvdb/Exceptions.h>
#include <openvdb/util/logging.h>
#include <boost/algorithm/string/join.hpp>
#ifdef OPENVDB_USE_ZLIB
#include <zlib.h>
#endif
#ifdef OPENVDB_USE_BLOSC
#include <blosc.h>
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace io {
std::string
compressionToString(uint32_t flags)
{
if (flags == COMPRESS_NONE) return "none";
std::vector<std::string> words;
if (flags & COMPRESS_ZIP) words.push_back("zip");
if (flags & COMPRESS_BLOSC) words.push_back("blosc");
if (flags & COMPRESS_ACTIVE_MASK) words.push_back("active values");
return boost::join(words, " + ");
}
////////////////////////////////////////
#ifdef OPENVDB_USE_ZLIB
namespace {
const int ZIP_COMPRESSION_LEVEL = Z_DEFAULT_COMPRESSION; ///< @todo use Z_BEST_SPEED?
}
#endif
#ifndef OPENVDB_USE_ZLIB
size_t
zipToStreamSize(const char*, size_t)
{
OPENVDB_THROW(IoError, "Zip encoding is not supported");
}
#else
size_t
zipToStreamSize(const char* data, size_t numBytes)
{
// Get an upper bound on the size of the compressed data.
uLongf numZippedBytes = compressBound(numBytes);
// Compress the data.
std::unique_ptr<Bytef[]> zippedData(new Bytef[numZippedBytes]);
int status = compress2(
/*dest=*/zippedData.get(), &numZippedBytes,
/*src=*/reinterpret_cast<const Bytef*>(data), numBytes,
/*level=*/ZIP_COMPRESSION_LEVEL);
if (status == Z_OK && numZippedBytes < numBytes) {
return size_t(numZippedBytes);
} else {
return size_t(numBytes);
}
}
#endif
#ifndef OPENVDB_USE_ZLIB
void
zipToStream(std::ostream&, const char*, size_t)
{
OPENVDB_THROW(IoError, "Zip encoding is not supported");
}
#else
void
zipToStream(std::ostream& os, const char* data, size_t numBytes)
{
// Get an upper bound on the size of the compressed data.
uLongf numZippedBytes = compressBound(numBytes);
// Compress the data.
std::unique_ptr<Bytef[]> zippedData(new Bytef[numZippedBytes]);
int status = compress2(
/*dest=*/zippedData.get(), &numZippedBytes,
/*src=*/reinterpret_cast<const Bytef*>(data), numBytes,
/*level=*/ZIP_COMPRESSION_LEVEL);
if (status != Z_OK) {
std::string errDescr;
if (const char* s = zError(status)) errDescr = s;
if (!errDescr.empty()) errDescr = " (" + errDescr + ")";
OPENVDB_LOG_DEBUG("zlib compress2() returned error code " << status << errDescr);
}
if (status == Z_OK && numZippedBytes < numBytes) {
// Write the size of the compressed data.
Int64 outZippedBytes = numZippedBytes;
os.write(reinterpret_cast<char*>(&outZippedBytes), 8);
// Write the compressed data.
os.write(reinterpret_cast<char*>(zippedData.get()), outZippedBytes);
} else {
// Write the size of the uncompressed data.
// numBytes expected to be <= the max value + 1 of a signed int64
assert(numBytes < size_t(std::numeric_limits<Int64>::max()));
Int64 negBytes = -Int64(numBytes);
os.write(reinterpret_cast<char*>(&negBytes), 8);
// Write the uncompressed data.
os.write(reinterpret_cast<const char*>(data), numBytes);
}
}
#endif
#ifndef OPENVDB_USE_ZLIB
void
unzipFromStream(std::istream&, char*, size_t)
{
OPENVDB_THROW(IoError, "Zip decoding is not supported");
}
#else
void
unzipFromStream(std::istream& is, char* data, size_t numBytes)
{
// Read the size of the compressed data.
// A negative size indicates uncompressed data.
Int64 numZippedBytes;
is.read(reinterpret_cast<char*>(&numZippedBytes), 8);
if (numZippedBytes <= 0) {
// Read the uncompressed data.
if (data == nullptr) {
is.seekg(-numZippedBytes, std::ios_base::cur);
} else {
is.read(data, -numZippedBytes);
}
if (size_t(-numZippedBytes) != numBytes) {
OPENVDB_THROW(RuntimeError, "Expected to read a " << numBytes
<< "-byte chunk, got a " << -numZippedBytes << "-byte chunk");
}
} else {
if (data == nullptr) {
// Seek over the compressed data.
is.seekg(numZippedBytes, std::ios_base::cur);
} else {
// Read the compressed data.
std::unique_ptr<Bytef[]> zippedData(new Bytef[numZippedBytes]);
is.read(reinterpret_cast<char*>(zippedData.get()), numZippedBytes);
// Uncompress the data.
uLongf numUnzippedBytes = numBytes;
int status = uncompress(
/*dest=*/reinterpret_cast<Bytef*>(data), &numUnzippedBytes,
/*src=*/zippedData.get(), static_cast<uLongf>(numZippedBytes));
if (status != Z_OK) {
std::string errDescr;
if (const char* s = zError(status)) errDescr = s;
if (!errDescr.empty()) errDescr = " (" + errDescr + ")";
OPENVDB_LOG_DEBUG("zlib uncompress() returned error code " << status << errDescr);
}
if (numUnzippedBytes != numBytes) {
OPENVDB_THROW(RuntimeError, "Expected to decompress " << numBytes
<< " byte" << (numBytes == 1 ? "" : "s") << ", got "
<< numZippedBytes << " byte" << (numZippedBytes == 1 ? "" : "s"));
}
}
}
}
#endif
namespace {
#ifdef OPENVDB_USE_BLOSC
int bloscCompress(size_t inBytes, const char* data, char* compressedData, int outBytes)
{
return blosc_compress_ctx(
/*clevel=*/9, // 0 (no compression) to 9 (maximum compression)
/*doshuffle=*/true,
/*typesize=*/sizeof(float), //for optimal float and Vec3f compression
/*srcsize=*/inBytes,
/*src=*/data,
/*dest=*/compressedData,
/*destsize=*/outBytes,
BLOSC_LZ4_COMPNAME,
/*blocksize=*/inBytes,//previously set to 256 (in v3.x)
/*numthreads=*/1);
}
#endif
} // namespace
#ifndef OPENVDB_USE_BLOSC
size_t
bloscToStreamSize(const char*, size_t, size_t)
{
OPENVDB_THROW(IoError, "Blosc encoding is not supported");
}
#else
size_t
bloscToStreamSize(const char* data, size_t valSize, size_t numVals)
{
const size_t inBytes = valSize * numVals;
int outBytes = int(inBytes) + BLOSC_MAX_OVERHEAD;
std::unique_ptr<char[]> compressedData(new char[outBytes]);
outBytes = bloscCompress(inBytes, data, compressedData.get(), outBytes);
if (outBytes <= 0) {
return size_t(inBytes);
}
return size_t(outBytes);
}
#endif
#ifndef OPENVDB_USE_BLOSC
void
bloscToStream(std::ostream&, const char*, size_t, size_t)
{
OPENVDB_THROW(IoError, "Blosc encoding is not supported");
}
#else
void
bloscToStream(std::ostream& os, const char* data, size_t valSize, size_t numVals)
{
const size_t inBytes = valSize * numVals;
// inBytes expected to be <= the max value + 1 of a signed int64
assert(inBytes < size_t(std::numeric_limits<Int64>::max()));
int outBytes = int(inBytes) + BLOSC_MAX_OVERHEAD;
std::unique_ptr<char[]> compressedData(new char[outBytes]);
outBytes = bloscCompress(inBytes, data, compressedData.get(), outBytes);
if (outBytes <= 0) {
std::ostringstream ostr;
ostr << "Blosc failed to compress " << inBytes << " byte" << (inBytes == 1 ? "" : "s");
if (outBytes < 0) ostr << " (internal error " << outBytes << ")";
OPENVDB_LOG_DEBUG(ostr.str());
// Write the size of the uncompressed data.
Int64 negBytes = -Int64(inBytes);
os.write(reinterpret_cast<char*>(&negBytes), 8);
// Write the uncompressed data.
os.write(reinterpret_cast<const char*>(data), inBytes);
} else {
// Write the size of the compressed data.
Int64 numBytes = outBytes;
os.write(reinterpret_cast<char*>(&numBytes), 8);
// Write the compressed data.
os.write(reinterpret_cast<char*>(compressedData.get()), outBytes);
}
}
#endif
#ifndef OPENVDB_USE_BLOSC
void
bloscFromStream(std::istream&, char*, size_t)
{
OPENVDB_THROW(IoError, "Blosc decoding is not supported");
}
#else
void
bloscFromStream(std::istream& is, char* data, size_t numBytes)
{
// Read the size of the compressed data.
// A negative size indicates uncompressed data.
Int64 numCompressedBytes;
is.read(reinterpret_cast<char*>(&numCompressedBytes), 8);
if (numCompressedBytes <= 0) {
// Read the uncompressed data.
if (data == nullptr) {
is.seekg(-numCompressedBytes, std::ios_base::cur);
} else {
is.read(data, -numCompressedBytes);
}
if (size_t(-numCompressedBytes) != numBytes) {
OPENVDB_THROW(RuntimeError, "Expected to read a " << numBytes
<< "-byte uncompressed chunk, got a " << -numCompressedBytes << "-byte chunk");
}
} else {
if (data == nullptr) {
// Seek over the compressed data.
is.seekg(numCompressedBytes, std::ios_base::cur);
} else {
// Read the compressed data.
std::unique_ptr<char[]> compressedData(new char[numCompressedBytes]);
is.read(reinterpret_cast<char*>(compressedData.get()), numCompressedBytes);
// Uncompress the data.
const int numUncompressedBytes = blosc_decompress_ctx(
/*src=*/compressedData.get(), /*dest=*/data, numBytes, /*numthreads=*/1);
if (numUncompressedBytes < 1) {
OPENVDB_LOG_DEBUG("blosc_decompress() returned error code "
<< numUncompressedBytes);
}
if (numUncompressedBytes != Int64(numBytes)) {
OPENVDB_THROW(RuntimeError, "Expected to decompress " << numBytes
<< " byte" << (numBytes == 1 ? "" : "s") << ", got "
<< numUncompressedBytes << " byte" << (numUncompressedBytes == 1 ? "" : "s"));
}
}
}
}
#endif
} // namespace io
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
| 10,182 | C++ | 31.533546 | 98 | 0.60823 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/cmd/openvdb_print.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include <algorithm>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
#include <openvdb/openvdb.h>
#include <openvdb/util/logging.h>
namespace {
using StringVec = std::vector<std::string>;
const char* INDENT = " ";
const char* gProgName = "";
void
usage [[noreturn]] (int exitStatus = EXIT_FAILURE)
{
std::cerr <<
"Usage: " << gProgName << " in.vdb [in.vdb ...] [options]\n" <<
"Which: prints information about OpenVDB grids\n" <<
"Options:\n" <<
" -l, -stats long printout, including grid statistics\n" <<
" -m, -metadata print per-file and per-grid metadata\n" <<
" -version print version information\n";
exit(exitStatus);
}
std::string
sizeAsString(openvdb::Index64 n, const std::string& units)
{
std::ostringstream ostr;
ostr << std::setprecision(3);
if (n < 1000) {
ostr << n;
} else if (n < 1000000) {
ostr << (double(n) / 1.0e3) << "K";
} else if (n < 1000000000) {
ostr << (double(n) / 1.0e6) << "M";
} else {
ostr << (double(n) / 1.0e9) << "G";
}
ostr << units;
return ostr.str();
}
std::string
bytesAsString(openvdb::Index64 n)
{
std::ostringstream ostr;
ostr << std::setprecision(3);
if (n >> 30) {
ostr << (double(n) / double(uint64_t(1) << 30)) << "GB";
} else if (n >> 20) {
ostr << (double(n) / double(uint64_t(1) << 20)) << "MB";
} else if (n >> 10) {
ostr << (double(n) / double(uint64_t(1) << 10)) << "KB";
} else {
ostr << n << "B";
}
return ostr.str();
}
std::string
coordAsString(const openvdb::Coord ijk, const std::string& sep,
const std::string& start, const std::string& stop)
{
std::ostringstream ostr;
ostr << start << ijk[0] << sep << ijk[1] << sep << ijk[2] << stop;
return ostr.str();
}
std::string
bkgdValueAsString(const openvdb::GridBase::ConstPtr& grid)
{
std::ostringstream ostr;
if (grid) {
const openvdb::TreeBase& tree = grid->baseTree();
ostr << "background: ";
openvdb::Metadata::Ptr background = tree.getBackgroundValue();
if (background) ostr << background->str();
}
return ostr.str();
}
/// Print detailed information about the given VDB files.
/// If @a metadata is true, include file-level metadata key, value pairs.
void
printLongListing(const StringVec& filenames)
{
bool oneFile = (filenames.size() == 1), firstFile = true;
for (size_t i = 0, N = filenames.size(); i < N; ++i, firstFile = false) {
openvdb::io::File file(filenames[i]);
std::string version;
openvdb::GridPtrVecPtr grids;
openvdb::MetaMap::Ptr meta;
try {
file.open();
grids = file.getGrids();
meta = file.getMetadata();
version = file.version();
file.close();
} catch (openvdb::Exception& e) {
OPENVDB_LOG_ERROR(e.what() << " (" << filenames[i] << ")");
}
if (!grids) continue;
if (!oneFile) {
if (!firstFile) {
std::cout << "\n" << std::string(40, '-') << "\n\n";
}
std::cout << filenames[i] << "\n\n";
}
// Print file-level metadata.
std::cout << "VDB version: " << version << "\n";
if (meta) {
std::string str = meta->str();
if (!str.empty()) std::cout << str << "\n";
}
std::cout << "\n";
// For each grid in the file...
bool firstGrid = true;
for (openvdb::GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) {
if (openvdb::GridBase::ConstPtr grid = *it) {
if (!firstGrid) std::cout << "\n\n";
std::cout << "Name: " << grid->getName() << std::endl;
grid->print(std::cout, /*verboseLevel=*/11);
firstGrid = false;
}
}
}
}
/// Print condensed information about the given VDB files.
/// If @a metadata is true, include file- and grid-level metadata.
void
printShortListing(const StringVec& filenames, bool metadata)
{
bool oneFile = (filenames.size() == 1), firstFile = true;
for (size_t i = 0, N = filenames.size(); i < N; ++i, firstFile = false) {
const std::string
indent(oneFile ? "": INDENT),
indent2(indent + INDENT);
if (!oneFile) {
if (metadata && !firstFile) std::cout << "\n";
std::cout << filenames[i] << ":\n";
}
openvdb::GridPtrVecPtr grids;
openvdb::MetaMap::Ptr meta;
openvdb::io::File file(filenames[i]);
try {
file.open();
grids = file.getGrids();
meta = file.getMetadata();
file.close();
} catch (openvdb::Exception& e) {
OPENVDB_LOG_ERROR(e.what() << " (" << filenames[i] << ")");
}
if (!grids) continue;
if (metadata) {
// Print file-level metadata.
std::string str = meta->str(indent);
if (!str.empty()) std::cout << str << "\n";
}
// For each grid in the file...
for (openvdb::GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) {
const openvdb::GridBase::ConstPtr grid = *it;
if (!grid) continue;
// Print the grid name and its voxel value datatype.
std::cout << indent << std::left << std::setw(11) << grid->getName()
<< " " << std::right << std::setw(6) << grid->valueType();
// Print the grid's bounding box and dimensions.
openvdb::CoordBBox bbox = grid->evalActiveVoxelBoundingBox();
std::string
boxStr = coordAsString(bbox.min(), ",", "(", ")") + "->" +
coordAsString(bbox.max(), ",", "(", ")"),
dimStr = coordAsString(bbox.extents(), "x", "", "");
boxStr += std::string(
std::max(1, int(40 - boxStr.size() - dimStr.size())), ' ') + dimStr;
std::cout << " " << std::left << std::setw(40) << boxStr;
// Print the number of active voxels.
std::cout << " " << std::right << std::setw(8)
<< sizeAsString(grid->activeVoxelCount(), "Vox");
// Print the grid's in-core size, in bytes.
std::cout << " " << std::right << std::setw(6) << bytesAsString(grid->memUsage());
std::cout << std::endl;
// Print grid-specific metadata.
if (metadata) {
// Print background value.
std::string str = bkgdValueAsString(grid);
if (!str.empty()) {
std::cout << indent2 << str << "\n";
}
// Print local and world transforms.
grid->transform().print(std::cout, indent2);
// Print custom metadata.
str = grid->str(indent2);
if (!str.empty()) std::cout << str << "\n";
std::cout << std::flush;
}
}
}
}
} // unnamed namespace
int
main(int argc, char *argv[])
{
OPENVDB_START_THREADSAFE_STATIC_WRITE
gProgName = argv[0];
if (const char* ptr = ::strrchr(gProgName, '/')) gProgName = ptr + 1;
OPENVDB_FINISH_THREADSAFE_STATIC_WRITE
int exitStatus = EXIT_SUCCESS;
if (argc == 1) usage();
openvdb::logging::initialize(argc, argv);
bool stats = false, metadata = false, version = false;
StringVec filenames;
for (int i = 1; i < argc; ++i) {
std::string arg = argv[i];
if (arg[0] == '-') {
if (arg == "-m" || arg == "-metadata") {
metadata = true;
} else if (arg == "-l" || arg == "-stats") {
stats = true;
} else if (arg == "-h" || arg == "-help" || arg == "--help") {
usage(EXIT_SUCCESS);
} else if (arg == "-version" || arg == "--version") {
version = true;
} else {
OPENVDB_LOG_FATAL("\"" << arg << "\" is not a valid option");
usage();
}
} else if (!arg.empty()) {
filenames.push_back(arg);
}
}
if (version) {
std::cout << "OpenVDB library version: "
<< openvdb::getLibraryAbiVersionString() << "\n";
std::cout << "OpenVDB file format version: "
<< openvdb::OPENVDB_FILE_VERSION << std::endl;
if (filenames.empty()) return EXIT_SUCCESS;
}
if (filenames.empty()) {
OPENVDB_LOG_FATAL("expected one or more OpenVDB files");
usage();
}
try {
openvdb::initialize();
/// @todo Remove the following at some point:
openvdb::Grid<openvdb::tree::Tree4<bool, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<float, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<double, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<int32_t, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<int64_t, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec2i, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec2s, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec2d, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec3i, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec3f, 4, 3, 3>::Type>::registerGrid();
openvdb::Grid<openvdb::tree::Tree4<openvdb::Vec3d, 4, 3, 3>::Type>::registerGrid();
if (stats) {
printLongListing(filenames);
} else {
printShortListing(filenames, metadata);
}
}
catch (const std::exception& e) {
OPENVDB_LOG_FATAL(e.what());
exitStatus = EXIT_FAILURE;
}
catch (...) {
OPENVDB_LOG_FATAL("Exception caught (unexpected type)");
std::terminate();
}
return exitStatus;
}
| 10,295 | C++ | 31.479495 | 97 | 0.518893 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/cmd/openvdb_render.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file main.cc
///
/// @brief Simple ray tracer for OpenVDB volumes
///
/// @note This is intended mainly as an example of how to ray-trace
/// OpenVDB volumes. It is not a production-quality renderer.
#include <algorithm>
#include <iomanip>
#include <iostream>
#include <limits>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/split.hpp>
#include <openvdb/PlatformConfig.h> // defines OPENEXR_DLL if required, must come before OpenEXR includes
#include <OpenEXR/ImfChannelList.h>
#include <OpenEXR/ImfFrameBuffer.h>
#include <OpenEXR/ImfHeader.h>
#include <OpenEXR/ImfOutputFile.h>
#include <OpenEXR/ImfPixelType.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/tick_count.h>
#include <openvdb/openvdb.h>
#include <openvdb/tools/RayIntersector.h>
#include <openvdb/tools/RayTracer.h>
namespace {
const char* gProgName = "";
const double LIGHT_DEFAULTS[] = { 0.3, 0.3, 0.0, 0.7, 0.7, 0.7 };
struct RenderOpts
{
std::string shader;
std::string color;
openvdb::Vec3SGrid::Ptr colorgrid;
std::string camera;
float aperture, focal, frame, znear, zfar;
double isovalue;
openvdb::Vec3d rotate;
openvdb::Vec3d translate;
openvdb::Vec3d target;
openvdb::Vec3d up;
bool lookat;
size_t samples;
openvdb::Vec3d absorb;
std::vector<double> light;
openvdb::Vec3d scatter;
double cutoff, gain;
openvdb::Vec2d step;
size_t width, height;
std::string compression;
int threads;
bool verbose;
RenderOpts():
shader("diffuse"),
camera("perspective"),
aperture(41.2136f),
focal(50.0f),
frame(1.0f),
znear(1.0e-3f),
zfar(std::numeric_limits<float>::max()),
isovalue(0.0),
rotate(0.0),
translate(0.0),
target(0.0),
up(0.0, 1.0, 0.0),
lookat(false),
samples(1),
absorb(0.1),
light(LIGHT_DEFAULTS, LIGHT_DEFAULTS + 6),
scatter(1.5),
cutoff(0.005),
gain(0.2),
step(1.0, 3.0),
width(1920),
height(1080),
compression("zip"),
threads(0),
verbose(false)
{}
std::string validate() const
{
if (shader != "diffuse" && shader != "matte" && shader != "normal" && shader != "position"){
return "expected diffuse, matte, normal or position shader, got \"" + shader + "\"";
}
if (!boost::starts_with(camera, "ortho") && !boost::starts_with(camera, "persp")) {
return "expected perspective or orthographic camera, got \"" + camera + "\"";
}
if (compression != "none" && compression != "rle" && compression != "zip") {
return "expected none, rle or zip compression, got \"" + compression + "\"";
}
if (width < 1 || height < 1) {
std::ostringstream ostr;
ostr << "expected width > 0 and height > 0, got " << width << "x" << height;
return ostr.str();
}
return "";
}
std::ostream& put(std::ostream& os) const
{
os << " -absorb " << absorb[0] << "," << absorb[1] << "," << absorb[2]
<< " -aperture " << aperture
<< " -camera " << camera;
if (!color.empty()) os << " -color '" << color << "'";
os << " -compression " << compression
<< " -cpus " << threads
<< " -cutoff " << cutoff
<< " -far " << zfar
<< " -focal " << focal
<< " -frame " << frame
<< " -gain " << gain
<< " -isovalue " << isovalue
<< " -light " << light[0] << "," << light[1] << "," << light[2]
<< "," << light[3] << "," << light[4] << "," << light[5];
if (lookat) os << " -lookat " << target[0] << "," << target[1] << "," << target[2];
os << " -near " << znear
<< " -res " << width << "x" << height;
if (!lookat) os << " -rotate " << rotate[0] << "," << rotate[1] << "," << rotate[2];
os << " -shader " << shader
<< " -samples " << samples
<< " -scatter " << scatter[0] << "," << scatter[1] << "," << scatter[2]
<< " -shadowstep " << step[1]
<< " -step " << step[0]
<< " -translate " << translate[0] << "," << translate[1] << "," << translate[2];
if (lookat) os << " -up " << up[0] << "," << up[1] << "," << up[2];
if (verbose) os << " -v";
return os;
}
};
std::ostream& operator<<(std::ostream& os, const RenderOpts& opts) { return opts.put(os); }
void
usage [[noreturn]] (int exitStatus = EXIT_FAILURE)
{
RenderOpts opts; // default options
const double fov = openvdb::tools::PerspectiveCamera::focalLengthToFieldOfView(
opts.focal, opts.aperture);
std::ostringstream ostr;
ostr << std::setprecision(3) <<
"Usage: " << gProgName << " in.vdb out.{exr,ppm} [options]\n" <<
"Which: ray-traces OpenVDB volumes\n" <<
"Options:\n" <<
" -aperture F perspective camera aperture in mm (default: " << opts.aperture << ")\n" <<
" -camera S camera type; either \"persp[ective]\" or \"ortho[graphic]\"\n" <<
" (default: " << opts.camera << ")\n" <<
" -compression S EXR compression scheme; either \"none\" (uncompressed),\n" <<
" \"rle\" or \"zip\" (default: " << opts.compression << ")\n" <<
" -cpus N number of rendering threads, or 1 to disable threading,\n" <<
" or 0 to use all available CPUs (default: " << opts.threads << ")\n" <<
" -far F camera far plane depth (default: " << opts.zfar << ")\n" <<
" -focal F perspective camera focal length in mm (default: " << opts.focal << ")\n" <<
" -fov F perspective camera field of view in degrees\n" <<
" (default: " << fov << ")\n" <<
" -frame F ortho camera frame width in world units (default: " <<
opts.frame << ")\n" <<
" -lookat X,Y,Z rotate the camera to point to (X, Y, Z)\n" <<
" -name S name of the volume to be rendered (default: render\n" <<
" the first floating-point volume found in in.vdb)\n" <<
" -near F camera near plane depth (default: " << opts.znear << ")\n" <<
" -res WxH image dimensions in pixels (default: " <<
opts.width << "x" << opts.height << ")\n" <<
" -r X,Y,Z \n" <<
" -rotate X,Y,Z camera rotation in degrees\n" <<
" (default: look at the center of the volume)\n" <<
" -t X,Y,Z \n" <<
" -translate X,Y,Z camera translation\n" <<
" -up X,Y,Z vector that should point up after rotation with -lookat\n" <<
" (default: " << opts.up << ")\n" <<
"\n" <<
" -v verbose (print timing and diagnostics)\n" <<
" -version print version information and exit\n" <<
" -h, -help print this usage message and exit\n" <<
"\n" <<
"Level set options:\n" <<
" -color S name of a vec3s volume to be used to set material colors\n" <<
" -isovalue F isovalue in world units for level set ray intersection\n" <<
" (default: " << opts.isovalue << ")\n" <<
" -samples N number of samples (rays) per pixel (default: " << opts.samples << ")\n" <<
" -shader S shader name; either \"diffuse\", \"matte\", \"normal\"\n" <<
" or \"position\" (default: " << opts.shader << ")\n" <<
"\n" <<
"Dense volume options:\n" <<
" -absorb R,G,B absorption coefficients (default: " << opts.absorb << ")\n" <<
" -cutoff F density and transmittance cutoff value (default: " << opts.cutoff << ")\n" <<
" -gain F amount of scatter along the shadow ray (default: " << opts.gain << ")\n" <<
" -light X,Y,Z[,R,G,B] light source direction and optional color\n" <<
" (default: [" << opts.light[0] << ", " << opts.light[1]
<< ", " << opts.light[2] << ", " << opts.light[3] << ", " << opts.light[4]
<< ", " << opts.light[5] << "])\n" <<
" -scatter R,G,B scattering coefficients (default: " << opts.scatter << ")\n" <<
" -shadowstep F step size in voxels for integration along the shadow ray\n" <<
" (default: " << opts.step[1] << ")\n" <<
" -step F step size in voxels for integration along the primary ray\n" <<
" (default: " << opts.step[0] << ")\n" <<
"\n" <<
"Examples:\n" <<
" " << gProgName << " crawler.vdb crawler.exr -shader diffuse -res 1920x1080 \\\n" <<
" -focal 35 -samples 4 -translate 0,210.5,400 -compression rle -v\n" <<
"\n" <<
" " << gProgName << " bunny_cloud.vdb bunny_cloud.exr -res 1920x1080 \\\n" <<
" -translate 0,0,110 -absorb 0.4,0.2,0.1 -gain 0.2 -v\n" <<
"\n" <<
"Warning:\n" <<
" This is not (and is not intended to be) a production-quality renderer.\n" <<
" Use it for fast previewing or simply as a reference implementation\n" <<
" for integration into existing ray tracers.\n";
std::cerr << ostr.str();
exit(exitStatus);
}
void
saveEXR(const std::string& fname, const openvdb::tools::Film& film, const RenderOpts& opts)
{
using RGBA = openvdb::tools::Film::RGBA;
std::string filename = fname;
if (!boost::iends_with(filename, ".exr")) filename += ".exr";
if (opts.verbose) {
std::cout << gProgName << ": writing " << filename << "..." << std::endl;
}
const tbb::tick_count start = tbb::tick_count::now();
int threads = (opts.threads == 0 ? 8 : opts.threads);
Imf::setGlobalThreadCount(threads);
Imf::Header header(int(film.width()), int(film.height()));
if (opts.compression == "none") {
header.compression() = Imf::NO_COMPRESSION;
} else if (opts.compression == "rle") {
header.compression() = Imf::RLE_COMPRESSION;
} else if (opts.compression == "zip") {
header.compression() = Imf::ZIP_COMPRESSION;
} else {
OPENVDB_THROW(openvdb::ValueError,
"expected none, rle or zip compression, got \"" << opts.compression << "\"");
}
header.channels().insert("R", Imf::Channel(Imf::FLOAT));
header.channels().insert("G", Imf::Channel(Imf::FLOAT));
header.channels().insert("B", Imf::Channel(Imf::FLOAT));
header.channels().insert("A", Imf::Channel(Imf::FLOAT));
const size_t pixelBytes = sizeof(RGBA), rowBytes = pixelBytes * film.width();
RGBA& pixel0 = const_cast<RGBA*>(film.pixels())[0];
Imf::FrameBuffer framebuffer;
framebuffer.insert("R",
Imf::Slice(Imf::FLOAT, reinterpret_cast<char*>(&pixel0.r), pixelBytes, rowBytes));
framebuffer.insert("G",
Imf::Slice(Imf::FLOAT, reinterpret_cast<char*>(&pixel0.g), pixelBytes, rowBytes));
framebuffer.insert("B",
Imf::Slice(Imf::FLOAT, reinterpret_cast<char*>(&pixel0.b), pixelBytes, rowBytes));
framebuffer.insert("A",
Imf::Slice(Imf::FLOAT, reinterpret_cast<char*>(&pixel0.a), pixelBytes, rowBytes));
Imf::OutputFile imgFile(filename.c_str(), header);
imgFile.setFrameBuffer(framebuffer);
imgFile.writePixels(int(film.height()));
if (opts.verbose) {
std::ostringstream ostr;
ostr << gProgName << ": ...completed in " << std::setprecision(3)
<< (tbb::tick_count::now() - start).seconds() << " sec";
std::cout << ostr.str() << std::endl;
}
}
template<typename GridType>
void
render(const GridType& grid, const std::string& imgFilename, const RenderOpts& opts)
{
using namespace openvdb;
const bool isLevelSet = (grid.getGridClass() == GRID_LEVEL_SET);
tools::Film film(opts.width, opts.height);
std::unique_ptr<tools::BaseCamera> camera;
if (boost::starts_with(opts.camera, "persp")) {
camera.reset(new tools::PerspectiveCamera(film, opts.rotate, opts.translate,
opts.focal, opts.aperture, opts.znear, opts.zfar));
} else if (boost::starts_with(opts.camera, "ortho")) {
camera.reset(new tools::OrthographicCamera(film, opts.rotate, opts.translate,
opts.frame, opts.znear, opts.zfar));
} else {
OPENVDB_THROW(ValueError,
"expected perspective or orthographic camera, got \"" << opts.camera << "\"");
}
if (opts.lookat) camera->lookAt(opts.target, opts.up);
// Define the shader for level set rendering. The default shader is a diffuse shader.
std::unique_ptr<tools::BaseShader> shader;
if (opts.shader == "matte") {
if (opts.colorgrid) {
shader.reset(new tools::MatteShader<openvdb::Vec3SGrid>(*opts.colorgrid));
} else {
shader.reset(new tools::MatteShader<>());
}
} else if (opts.shader == "normal") {
if (opts.colorgrid) {
shader.reset(new tools::NormalShader<Vec3SGrid>(*opts.colorgrid));
} else {
shader.reset(new tools::NormalShader<>());
}
} else if (opts.shader == "position") {
const CoordBBox bbox = grid.evalActiveVoxelBoundingBox();
const math::BBox<Vec3d> bboxIndex(bbox.min().asVec3d(), bbox.max().asVec3d());
const math::BBox<Vec3R> bboxWorld = bboxIndex.applyMap(*(grid.transform().baseMap()));
if (opts.colorgrid) {
shader.reset(new tools::PositionShader<Vec3SGrid>(bboxWorld, *opts.colorgrid));
} else {
shader.reset(new tools::PositionShader<>(bboxWorld));
}
} else /* if (opts.shader == "diffuse") */ { // default
if (opts.colorgrid) {
shader.reset(new tools::DiffuseShader<Vec3SGrid>(*opts.colorgrid));
} else {
shader.reset(new tools::DiffuseShader<>());
}
}
if (opts.verbose) {
std::cout << gProgName << ": ray-tracing";
const std::string gridName = grid.getName();
if (!gridName.empty()) std::cout << " " << gridName;
std::cout << "..." << std::endl;
}
const tbb::tick_count start = tbb::tick_count::now();
if (isLevelSet) {
tools::LevelSetRayIntersector<GridType> intersector(
grid, static_cast<typename GridType::ValueType>(opts.isovalue));
tools::rayTrace(grid, intersector, *shader, *camera, opts.samples,
/*seed=*/0, (opts.threads != 1));
} else {
using IntersectorType = tools::VolumeRayIntersector<GridType>;
IntersectorType intersector(grid);
tools::VolumeRender<IntersectorType> renderer(intersector, *camera);
renderer.setLightDir(opts.light[0], opts.light[1], opts.light[2]);
renderer.setLightColor(opts.light[3], opts.light[4], opts.light[5]);
renderer.setPrimaryStep(opts.step[0]);
renderer.setShadowStep(opts.step[1]);
renderer.setScattering(opts.scatter[0], opts.scatter[1], opts.scatter[2]);
renderer.setAbsorption(opts.absorb[0], opts.absorb[1], opts.absorb[2]);
renderer.setLightGain(opts.gain);
renderer.setCutOff(opts.cutoff);
renderer.render(opts.threads != 1);
}
if (opts.verbose) {
std::ostringstream ostr;
ostr << gProgName << ": ...completed in " << std::setprecision(3)
<< (tbb::tick_count::now() - start).seconds() << " sec";
std::cout << ostr.str() << std::endl;
}
if (boost::iends_with(imgFilename, ".ppm")) {
// Save as PPM (fast, but large file size).
std::string filename = imgFilename;
filename.erase(filename.size() - 4); // strip .ppm extension
film.savePPM(filename);
} else if (boost::iends_with(imgFilename, ".exr")) {
// Save as EXR (slow, but small file size).
saveEXR(imgFilename, film, opts);
} else {
OPENVDB_THROW(ValueError, "unsupported image file format (" + imgFilename + ")");
}
}
void
strToSize(const std::string& s, size_t& x, size_t& y)
{
std::vector<std::string> elems;
boost::split(elems, s, boost::algorithm::is_any_of(",x"));
const size_t numElems = elems.size();
if (numElems > 0) x = size_t(std::max(0, atoi(elems[0].c_str())));
if (numElems > 1) y = size_t(std::max(0, atoi(elems[1].c_str())));
}
std::vector<double>
strToVec(const std::string& s)
{
std::vector<double> result;
std::vector<std::string> elems;
boost::split(elems, s, boost::algorithm::is_any_of(","));
for (size_t i = 0, N = elems.size(); i < N; ++i) {
result.push_back(atof(elems[i].c_str()));
}
return result;
}
openvdb::Vec3d
strToVec3d(const std::string& s)
{
openvdb::Vec3d result(0.0, 0.0, 0.0);
std::vector<double> elems = strToVec(s);
if (!elems.empty()) {
result = openvdb::Vec3d(elems[0]);
for (int i = 1, N = std::min(3, int(elems.size())); i < N; ++i) {
result[i] = elems[i];
}
}
return result;
}
struct OptParse
{
int argc;
char** argv;
OptParse(int argc_, char* argv_[]): argc(argc_), argv(argv_) {}
bool check(int idx, const std::string& name, int numArgs = 1) const
{
if (argv[idx] == name) {
if (idx + numArgs >= argc) {
OPENVDB_LOG_FATAL("option " << name << " requires "
<< numArgs << " argument" << (numArgs == 1 ? "" : "s"));
usage();
}
return true;
}
return false;
}
};
} // unnamed namespace
int
main(int argc, char *argv[])
{
OPENVDB_START_THREADSAFE_STATIC_WRITE
gProgName = argv[0];
if (const char* ptr = ::strrchr(gProgName, '/')) gProgName = ptr + 1;
OPENVDB_FINISH_THREADSAFE_STATIC_WRITE
int retcode = EXIT_SUCCESS;
if (argc == 1) usage();
openvdb::logging::initialize(argc, argv);
std::string vdbFilename, imgFilename, gridName;
RenderOpts opts;
bool hasFocal = false, hasFov = false, hasRotate = false, hasLookAt = false;
float fov = 0.0;
OptParse parser(argc, argv);
for (int i = 1; i < argc; ++i) {
std::string arg = argv[i];
if (arg[0] == '-') {
if (parser.check(i, "-absorb")) {
++i;
opts.absorb = strToVec3d(argv[i]);
} else if (parser.check(i, "-aperture")) {
++i;
opts.aperture = float(atof(argv[i]));
} else if (parser.check(i, "-camera")) {
++i;
opts.camera = argv[i];
} else if (parser.check(i, "-color")) {
++i;
opts.color = argv[i];
} else if (parser.check(i, "-compression")) {
++i;
opts.compression = argv[i];
} else if (parser.check(i, "-cpus")) {
++i;
opts.threads = std::max(0, atoi(argv[i]));
} else if (parser.check(i, "-cutoff")) {
++i;
opts.cutoff = atof(argv[i]);
} else if (parser.check(i, "-isovalue")) {
++i;
opts.isovalue = atof(argv[i]);
} else if (parser.check(i, "-far")) {
++i;
opts.zfar = float(atof(argv[i]));
} else if (parser.check(i, "-focal")) {
++i;
opts.focal = float(atof(argv[i]));
hasFocal = true;
} else if (parser.check(i, "-fov")) {
++i;
fov = float(atof(argv[i]));
hasFov = true;
} else if (parser.check(i, "-frame")) {
++i;
opts.frame = float(atof(argv[i]));
} else if (parser.check(i, "-gain")) {
++i;
opts.gain = atof(argv[i]);
} else if (parser.check(i, "-light")) {
++i;
opts.light = strToVec(argv[i]);
} else if (parser.check(i, "-lookat")) {
++i;
opts.lookat = true;
opts.target = strToVec3d(argv[i]);
hasLookAt = true;
} else if (parser.check(i, "-name")) {
++i;
gridName = argv[i];
} else if (parser.check(i, "-near")) {
++i;
opts.znear = float(atof(argv[i]));
} else if (parser.check(i, "-r") || parser.check(i, "-rotate")) {
++i;
opts.rotate = strToVec3d(argv[i]);
hasRotate = true;
} else if (parser.check(i, "-res")) {
++i;
strToSize(argv[i], opts.width, opts.height);
} else if (parser.check(i, "-scatter")) {
++i;
opts.scatter = strToVec3d(argv[i]);
} else if (parser.check(i, "-shader")) {
++i;
opts.shader = argv[i];
} else if (parser.check(i, "-shadowstep")) {
++i;
opts.step[1] = atof(argv[i]);
} else if (parser.check(i, "-samples")) {
++i;
opts.samples = size_t(std::max(0, atoi(argv[i])));
} else if (parser.check(i, "-step")) {
++i;
opts.step[0] = atof(argv[i]);
} else if (parser.check(i, "-t") || parser.check(i, "-translate")) {
++i;
opts.translate = strToVec3d(argv[i]);
} else if (parser.check(i, "-up")) {
++i;
opts.up = strToVec3d(argv[i]);
} else if (arg == "-v") {
opts.verbose = true;
} else if (arg == "-version" || arg == "--version") {
std::cout << "OpenVDB library version: "
<< openvdb::getLibraryAbiVersionString() << "\n";
std::cout << "OpenVDB file format version: "
<< openvdb::OPENVDB_FILE_VERSION << std::endl;
return EXIT_SUCCESS;
} else if (arg == "-h" || arg == "-help" || arg == "--help") {
usage(EXIT_SUCCESS);
} else {
OPENVDB_LOG_FATAL("\"" << arg << "\" is not a valid option");
usage();
}
} else if (vdbFilename.empty()) {
vdbFilename = arg;
} else if (imgFilename.empty()) {
imgFilename = arg;
} else {
usage();
}
}
if (vdbFilename.empty() || imgFilename.empty()) {
usage();
}
if (hasFov) {
if (hasFocal) {
OPENVDB_LOG_FATAL("specify -focal or -fov, but not both");
usage();
}
opts.focal = float(
openvdb::tools::PerspectiveCamera::fieldOfViewToFocalLength(fov, opts.aperture));
}
if (hasLookAt && hasRotate) {
OPENVDB_LOG_FATAL("specify -lookat or -r[otate], but not both");
usage();
}
{
const std::string err = opts.validate();
if (!err.empty()) {
OPENVDB_LOG_FATAL(err);
usage();
}
}
try {
tbb::task_scheduler_init schedulerInit(
(opts.threads == 0) ? tbb::task_scheduler_init::automatic : opts.threads);
openvdb::initialize();
const tbb::tick_count start = tbb::tick_count::now();
if (opts.verbose) {
std::cout << gProgName << ": reading ";
if (!gridName.empty()) std::cout << gridName << " from ";
std::cout << vdbFilename << "..." << std::endl;
}
openvdb::FloatGrid::Ptr grid;
{
openvdb::io::File file(vdbFilename);
if (!gridName.empty()) {
file.open();
grid = openvdb::gridPtrCast<openvdb::FloatGrid>(file.readGrid(gridName));
if (!grid) {
OPENVDB_THROW(openvdb::ValueError,
gridName + " is not a scalar, floating-point volume");
}
} else {
// If no grid was specified by name, retrieve the first float grid from the file.
file.open(/*delayLoad=*/false);
openvdb::io::File::NameIterator it = file.beginName();
openvdb::GridPtrVecPtr grids = file.readAllGridMetadata();
for (size_t i = 0; i < grids->size(); ++i, ++it) {
grid = openvdb::gridPtrCast<openvdb::FloatGrid>(grids->at(i));
if (grid) {
gridName = *it;
file.close();
file.open();
grid = openvdb::gridPtrCast<openvdb::FloatGrid>(file.readGrid(gridName));
break;
}
}
if (!grid) {
OPENVDB_THROW(openvdb::ValueError,
"no scalar, floating-point volumes in file " + vdbFilename);
}
}
if (!opts.color.empty()) {
opts.colorgrid =
openvdb::gridPtrCast<openvdb::Vec3SGrid>(file.readGrid(opts.color));
if (!opts.colorgrid) {
OPENVDB_THROW(openvdb::ValueError,
opts.color + " is not a vec3s color volume");
}
}
}
if (opts.verbose) {
std::ostringstream ostr;
ostr << gProgName << ": ...completed in " << std::setprecision(3)
<< (tbb::tick_count::now() - start).seconds() << " sec";
std::cout << ostr.str() << std::endl;
}
if (grid) {
if (!hasLookAt && !hasRotate) {
// If the user specified neither the camera rotation nor a target
// to look at, orient the camera to point to the center of the grid.
opts.target = grid->evalActiveVoxelBoundingBox().getCenter();
opts.target = grid->constTransform().indexToWorld(opts.target);
opts.lookat = true;
}
if (opts.verbose) std::cout << opts << std::endl;
render<openvdb::FloatGrid>(*grid, imgFilename, opts);
}
} catch (std::exception& e) {
OPENVDB_LOG_FATAL(e.what());
retcode = EXIT_FAILURE;
} catch (...) {
OPENVDB_LOG_FATAL("Exception caught (unexpected type)");
std::terminate();
}
return retcode;
}
| 26,506 | C++ | 37.249639 | 105 | 0.519656 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/cmd/openvdb_lod.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include <openvdb/openvdb.h>
#include <openvdb/tools/MultiResGrid.h>
#include <openvdb/util/CpuTimer.h>
#include <openvdb/util/logging.h>
#include <boost/algorithm/string/classification.hpp> // for boost::is_any_of()
#include <boost/algorithm/string/split.hpp>
#include <cstdlib> // for std::atof()
#include <iomanip> // for std::setprecision()
#include <iostream>
#include <set>
#include <sstream>
#include <stdexcept> // for std::runtime_error
#include <string>
namespace {
const char* gProgName = "";
inline void
usage [[noreturn]] (int exitStatus = EXIT_FAILURE)
{
std::cerr <<
"Usage: " << gProgName << " in.vdb out.vdb -range FROM[-TO[:STEP]] [options]\n" <<
"Which: generates a volume mipmap from an OpenVDB grid\n" <<
"Where:\n" <<
" FROM is the highest-resolution mip level to be generated\n" <<
" TO is the lowest-resolution mip level to be generated (default: FROM)\n" <<
" STEP is the mip level step size (default: 1)\n" <<
"Options:\n" <<
" -name S[,S,S,...] name(s) of the grid(s) to be processed\n" <<
" (default: process all grids of supported types)\n" <<
" -keep pass through grids that were not processed\n" <<
" (default: discard grids that were not processed)\n" <<
" -nokeep cancel an earlier -keep option\n" <<
" -p, -preserve if only one mip level is generated, give it the same\n" <<
" name as the original grid (default: name each level\n" <<
" \"NAME_level_N\", where NAME is the original grid name\n" <<
" and N is the level number, e.g., \"density_level_0\")\n" <<
" -nopreserve cancel an earlier -p or -preserve option\n" <<
" -version print version information\n" <<
"\n" <<
"Mip level 0 is the input grid. Each successive integer level is half\n" <<
"the resolution of the previous level. Fractional levels are supported.\n" <<
"\n" <<
"Examples:\n" <<
" Generate levels 0, 1, and 2 (full resolution, half resolution,\n" <<
" and quarter resolution, respectively) for all grids of supported types\n" <<
" and ignore all other grids:\n" <<
"\n" <<
" " << gProgName << " in.vdb out.vdb -range 0-2\n" <<
"\n" <<
" Generate levels 0, 0.5, and 1 for all grids of supported types\n" <<
" and pass through all other grids:\n" <<
"\n" <<
" " << gProgName << " in.vdb out.vdb -range 0-1:0.5 -keep\n" <<
"\n" <<
" Generate level 3 for the first of multiple grids named \"density\":\n" <<
"\n" <<
" " << gProgName << " in.vdb out.vdb -range 3 -name 'density[0]'\n" <<
"\n" <<
" Generate level 1.5 for the second of multiple unnamed grids and for\n" <<
" the grid named \"velocity\" and give the resulting grids the same names\n"<<
" as the original grids:\n" <<
"\n" <<
" " << gProgName << " in.vdb out.vdb -range 1.5 -name '[1],velocity' -p\n" <<
"\n";
exit(exitStatus);
}
struct Options
{
Options(): from(0.0), to(0.0), step(1.0), keep(false), preserve(false) {}
double from, to, step;
bool keep, preserve;
};
/// @brief Parse a string of the form "from-to:step" and populate the given @a opts
/// with the resulting values.
/// @throw std::runtime_error if parsing fails for any reason
inline void
parseRangeSpec(const std::string& rangeSpec, Options& opts)
{
// Split on the "-" character, of which there should be at most one.
std::vector<std::string> rangeItems;
boost::split(rangeItems, rangeSpec, boost::is_any_of("-"));
if (rangeItems.empty() || rangeItems.size() > 2) throw std::runtime_error("");
// Extract the "from" value, and default "to" to "from" and "step" to 1.
opts.from = opts.to = std::atof(rangeItems[0].c_str());
opts.step = 1.0;
if (rangeItems.size() > 1) {
// Split on the ":" character, of which there should be at most one.
const std::string item = rangeItems[1];
boost::split(rangeItems, item, boost::is_any_of(":"));
if (rangeItems.empty() || rangeItems.size() > 2) throw std::runtime_error("");
// Extract the "to" value.
opts.to = std::atof(rangeItems[0].c_str());
if (rangeItems.size() > 1) {
// Extract the "step" value.
opts.step = std::atof(rangeItems[1].c_str());
}
}
if (opts.from < 0.0 || opts.to < opts.from || opts.step <= 0.0) throw std::runtime_error("");
}
/// @brief Mipmap a single grid of a fully-resolved type.
/// @return a vector of pointers to the member grids of the mipmap
template<typename GridType>
inline openvdb::GridPtrVec
mip(const GridType& inGrid, const Options& opts)
{
OPENVDB_LOG_INFO("processing grid \"" << inGrid.getName() << "\"");
// MultiResGrid requires at least two mipmap levels, starting from level 0.
const int levels = std::max(2, openvdb::math::Ceil(opts.to) + 1);
openvdb::util::CpuTimer timer;
timer.start();
// Initialize the mipmap.
typedef typename GridType::TreeType TreeT;
openvdb::tools::MultiResGrid<TreeT> mrg(levels, inGrid);
openvdb::GridPtrVec outGrids;
for (double level = opts.from; level <= opts.to; level += opts.step) {
// Request a level from the mipmap.
if (openvdb::GridBase::Ptr levelGrid =
mrg.template createGrid</*sampling order=*/1>(static_cast<float>(level)))
{
outGrids.push_back(levelGrid);
}
}
if (outGrids.size() == 1 && opts.preserve) {
// If -preserve is in effect and there is only one output grid,
// give it the same name as the input grid.
outGrids[0]->setName(inGrid.getName());
}
OPENVDB_LOG_INFO("processed grid \"" << inGrid.getName() << "\" in "
<< std::setprecision(3) << timer.seconds() << " sec");
return outGrids;
}
/// @brief Mipmap a single grid and append the resulting grids to @a outGrids.
inline void
process(const openvdb::GridBase::Ptr& baseGrid, openvdb::GridPtrVec& outGrids, const Options& opts)
{
using namespace openvdb;
if (!baseGrid) return;
GridPtrVec mipmap;
if (FloatGrid::Ptr g0 = GridBase::grid<FloatGrid>(baseGrid)) { mipmap = mip(*g0, opts); }
else if (DoubleGrid::Ptr g1 = GridBase::grid<DoubleGrid>(baseGrid)) { mipmap = mip(*g1, opts); }
else if (Vec3SGrid::Ptr g2 = GridBase::grid<Vec3SGrid>(baseGrid)) { mipmap = mip(*g2, opts); }
else if (Vec3DGrid::Ptr g3 = GridBase::grid<Vec3DGrid>(baseGrid)) { mipmap = mip(*g3, opts); }
else if (Vec3IGrid::Ptr g4 = GridBase::grid<Vec3IGrid>(baseGrid)) { mipmap = mip(*g4, opts); }
else if (Int32Grid::Ptr g5 = GridBase::grid<Int32Grid>(baseGrid)) { mipmap = mip(*g5, opts); }
else if (Int64Grid::Ptr g6 = GridBase::grid<Int64Grid>(baseGrid)) { mipmap = mip(*g6, opts); }
else {
std::string operation = "skipped";
if (opts.keep) {
operation = "passed through";
outGrids.push_back(baseGrid);
};
OPENVDB_LOG_WARN(operation << " grid \"" << baseGrid->getName()
<< "\" of unsupported type " << baseGrid->type());
}
outGrids.insert(outGrids.end(), mipmap.begin(), mipmap.end());
}
} // unnamed namespace
int
main(int argc, char *argv[])
{
OPENVDB_START_THREADSAFE_STATIC_WRITE
gProgName = argv[0];
if (const char* ptr = ::strrchr(gProgName, '/')) gProgName = ptr + 1;
OPENVDB_FINISH_THREADSAFE_STATIC_WRITE
int exitStatus = EXIT_SUCCESS;
if (argc == 1) usage();
openvdb::logging::initialize(argc, argv);
openvdb::initialize();
// Parse command-line arguments.
Options opts;
bool version = false;
std::string inFilename, outFilename, gridNameStr, rangeSpec;
for (int i = 1; i < argc; ++i) {
const std::string arg = argv[i];
if (arg[0] == '-') {
if (arg == "-name") {
if (i + 1 < argc && argv[i + 1]) {
gridNameStr = argv[i + 1];
++i;
} else {
OPENVDB_LOG_FATAL("missing grid name(s) after -name");
usage();
}
} else if (arg == "-keep") {
opts.keep = true;
} else if (arg == "-nokeep") {
opts.keep = false;
} else if (arg == "-p" || arg == "-preserve") {
opts.preserve = true;
} else if (arg == "-nopreserve") {
opts.preserve = false;
} else if (arg == "-range") {
if (i + 1 < argc && argv[i + 1]) {
rangeSpec = argv[i + 1];
++i;
} else {
OPENVDB_LOG_FATAL("missing level range specification after -range");
usage();
}
} else if (arg == "-h" || arg == "-help" || arg == "--help") {
usage(EXIT_SUCCESS);
} else if (arg == "-version" || arg == "--version") {
version = true;
} else {
OPENVDB_LOG_FATAL("\"" << arg << "\" is not a valid option");
usage();
}
} else if (!arg.empty()) {
if (inFilename.empty()) {
inFilename = arg;
} else if (outFilename.empty()) {
outFilename = arg;
} else {
OPENVDB_LOG_FATAL("unrecognized argument \"" << arg << "\"");
usage();
}
}
}
if (version) {
std::cout << "OpenVDB library version: "
<< openvdb::getLibraryAbiVersionString() << "\n";
std::cout << "OpenVDB file format version: "
<< openvdb::OPENVDB_FILE_VERSION << std::endl;
if (outFilename.empty()) return EXIT_SUCCESS;
}
if (inFilename.empty()) {
OPENVDB_LOG_FATAL("missing input OpenVDB filename");
usage();
}
if (outFilename.empty()) {
OPENVDB_LOG_FATAL("missing output OpenVDB filename");
usage();
}
if (rangeSpec.empty()) {
OPENVDB_LOG_FATAL("missing level range specification");
usage();
}
try {
parseRangeSpec(rangeSpec, opts);
} catch (...) {
OPENVDB_LOG_FATAL("invalid level range specification \"" << rangeSpec << "\"");
usage();
}
// If -name was specified, generate a accept list of names of grids to be processed.
// Otherwise (if the accept list is empty), process all grids of supported types.
std::set<std::string> acceptlist;
if (!gridNameStr.empty()) {
boost::split(acceptlist, gridNameStr, boost::is_any_of(","));
}
// Process the input file.
try {
openvdb::io::File file(inFilename);
file.open();
const openvdb::MetaMap::ConstPtr fileMetadata = file.getMetadata();
openvdb::GridPtrVec outGrids;
// For each input grid...
for (openvdb::io::File::NameIterator nameIter = file.beginName();
nameIter != file.endName(); ++nameIter)
{
const std::string& name = nameIter.gridName();
// If there is a white list, check if the grid is on the list.
const bool skip = (!acceptlist.empty() && (acceptlist.find(name) == acceptlist.end()));
if (skip && !opts.keep) {
OPENVDB_LOG_INFO("skipped grid \"" << name << "\"");
} else {
// If the grid's name is on the white list or if -keep is in effect, read the grid.
openvdb::GridBase::Ptr baseGrid = file.readGrid(name);
if (!baseGrid) {
OPENVDB_LOG_WARN("failed to read grid \"" << name << "\"");
} else {
if (skip) {
OPENVDB_LOG_INFO("passed through grid \"" << name << "\"");
outGrids.push_back(baseGrid);
} else {
process(baseGrid, outGrids, opts);
}
}
}
}
file.close();
openvdb::util::CpuTimer timer;
timer.start();
openvdb::io::File outFile(outFilename);
if (fileMetadata) {
outFile.write(outGrids, *fileMetadata);
} else {
outFile.write(outGrids);
}
const double msec = timer.milliseconds(); // elapsed time
if (outGrids.empty()) {
OPENVDB_LOG_WARN("wrote empty file " << outFilename << " in "
<< std::setprecision(3) << (msec / 1000.0) << " sec");
} else {
OPENVDB_LOG_INFO("wrote file " << outFilename << " in "
<< std::setprecision(3) << (msec / 1000.0) << " sec");
}
}
catch (const std::exception& e) {
OPENVDB_LOG_FATAL(e.what());
exitStatus = EXIT_FAILURE;
}
catch (...) {
OPENVDB_LOG_FATAL("Exception caught (unexpected type)");
std::terminate();
}
return exitStatus;
}
| 13,058 | C++ | 35.579832 | 100 | 0.554449 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/cmd/openvdb_view.cc | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#include "../viewer/Viewer.h"
#include <boost/algorithm/string/classification.hpp> // for boost::is_any_of()
#include <boost/algorithm/string/predicate.hpp> // for boost::starts_with()
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
inline void
usage [[noreturn]] (const char* progName, int status)
{
(status == EXIT_SUCCESS ? std::cout : std::cerr) <<
"Usage: " << progName << " file.vdb [file.vdb ...] [options]\n" <<
"Which: displays OpenVDB grids\n" <<
"Options:\n" <<
" -i print grid information\n" <<
" -h, -help print this usage message and exit\n" <<
" -version print version information\n" <<
"\n" <<
"Controls:\n" <<
" Esc exit\n" <<
" -> (Right) show next grid\n" <<
" <- (Left) show previous grid\n" <<
" 1 toggle tree topology view on/off\n" <<
" 2 toggle surface view on/off\n" <<
" 3 toggle data view on/off\n" <<
" G (\"geometry\") look at center of geometry\n" <<
" H (\"home\") look at origin\n" <<
" I toggle on-screen grid info on/off\n" <<
" left mouse tumble\n" <<
" right mouse pan\n" <<
" mouse wheel zoom\n" <<
"\n" <<
" X + wheel move right cut plane\n" <<
" Shift + X + wheel move left cut plane\n" <<
" Y + wheel move top cut plane\n" <<
" Shift + Y + wheel move bottom cut plane\n" <<
" Z + wheel move front cut plane\n" <<
" Shift + Z + wheel move back cut plane\n" <<
" Ctrl + X + wheel move both X cut planes\n" <<
" Ctrl + Y + wheel move both Y cut planes\n" <<
" Ctrl + Z + wheel move both Z cut planes\n";
exit(status);
}
////////////////////////////////////////
int
main(int argc, char *argv[])
{
const char* progName = argv[0];
if (const char* ptr = ::strrchr(progName, '/')) progName = ptr + 1;
int status = EXIT_SUCCESS;
try {
openvdb::initialize();
openvdb::logging::initialize(argc, argv);
bool printInfo = false, printGLInfo = false, printVersionInfo = false;
// Parse the command line.
std::vector<std::string> filenames;
for (int n = 1; n < argc; ++n) {
std::string str(argv[n]);
if (str[0] != '-') {
filenames.push_back(str);
} else if (str == "-i") {
printInfo = true;
} else if (str == "-d") { // deprecated
printGLInfo = true;
} else if (str == "-h" || str == "-help" || str == "--help") {
usage(progName, EXIT_SUCCESS);
} else if (str == "-version" || str == "--version") {
printVersionInfo = true;
printGLInfo = true;
} else {
usage(progName, EXIT_FAILURE);
}
}
const size_t numFiles = filenames.size();
if (printVersionInfo) {
std::cout << "OpenVDB library version: "
<< openvdb::getLibraryAbiVersionString() << "\n";
std::cout << "OpenVDB file format version: "
<< openvdb::OPENVDB_FILE_VERSION << std::endl;
// If there are no files to view, don't print the OpenGL version,
// since that would require opening a viewer window.
if (numFiles == 0) return EXIT_SUCCESS;
}
if (numFiles == 0 && !printGLInfo) usage(progName, EXIT_FAILURE);
openvdb_viewer::Viewer viewer = openvdb_viewer::init(progName, /*bg=*/false);
if (printGLInfo) {
// Now that the viewer window is open, we can get the OpenGL version, if requested.
if (!printVersionInfo) {
// Preserve the behavior of the deprecated -d option.
std::cout << viewer.getVersionString() << std::endl;
} else {
// Print OpenGL and GLFW versions.
std::ostringstream ostr;
ostr << viewer.getVersionString(); // returns comma-separated list of versions
const std::string s = ostr.str();
std::vector<std::string> elems;
boost::split(elems, s, boost::algorithm::is_any_of(","));
for (size_t i = 0; i < elems.size(); ++i) {
boost::trim(elems[i]);
// Don't print the OpenVDB library version again.
if (!boost::starts_with(elems[i], "OpenVDB:")) {
std::cout << elems[i] << std::endl;
}
}
}
if (numFiles == 0) return EXIT_SUCCESS;
}
openvdb::GridCPtrVec allGrids;
// Load VDB files.
std::string indent(numFiles == 1 ? "" : " ");
for (size_t n = 0; n < numFiles; ++n) {
openvdb::io::File file(filenames[n]);
file.open();
openvdb::GridPtrVecPtr grids = file.getGrids();
if (grids->empty()) {
OPENVDB_LOG_WARN(filenames[n] << " is empty");
continue;
}
allGrids.insert(allGrids.end(), grids->begin(), grids->end());
if (printInfo) {
if (numFiles > 1) std::cout << filenames[n] << ":\n";
for (size_t i = 0; i < grids->size(); ++i) {
const std::string name = (*grids)[i]->getName();
openvdb::Coord dim = (*grids)[i]->evalActiveVoxelDim();
std::cout << indent << (name.empty() ? "<unnamed>" : name)
<< " (" << dim[0] << " x " << dim[1] << " x " << dim[2]
<< " voxels)" << std::endl;
}
}
}
viewer.open();
viewer.view(allGrids);
openvdb_viewer::exit();
} catch (const char* s) {
OPENVDB_LOG_FATAL(s);
status = EXIT_FAILURE;
} catch (std::exception& e) {
OPENVDB_LOG_FATAL(e.what());
status = EXIT_FAILURE;
} catch (...) {
OPENVDB_LOG_FATAL("Exception caught (unexpected type)");
std::terminate();
}
return status;
}
| 6,418 | C++ | 35.890804 | 95 | 0.493456 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VectorTransformer.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file VectorTransformer.h
#ifndef OPENVDB_TOOLS_VECTORTRANSFORMER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_VECTORTRANSFORMER_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/math/Mat4.h>
#include <openvdb/math/Vec3.h>
#include "ValueTransformer.h" // for tools::foreach()
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Apply an affine transform to the voxel values of a vector-valued grid
/// in accordance with the grid's vector type (covariant, contravariant, etc.).
/// @throw TypeError if the grid is not vector-valued
template<typename GridType>
inline void
transformVectors(GridType&, const Mat4d&);
////////////////////////////////////////
// Functors for use with tools::foreach() to transform vector voxel values
struct HomogeneousMatMul
{
const Mat4d mat;
HomogeneousMatMul(const Mat4d& _mat): mat(_mat) {}
template<typename TreeIterT> void operator()(const TreeIterT& it) const
{
Vec3d v(*it);
it.setValue(mat.transformH(v));
}
};
struct MatMul
{
const Mat4d mat;
MatMul(const Mat4d& _mat): mat(_mat) {}
template<typename TreeIterT>
void operator()(const TreeIterT& it) const
{
Vec3d v(*it);
it.setValue(mat.transform3x3(v));
}
};
struct MatMulNormalize
{
const Mat4d mat;
MatMulNormalize(const Mat4d& _mat): mat(_mat) {}
template<typename TreeIterT>
void operator()(const TreeIterT& it) const
{
Vec3d v(*it);
v = mat.transform3x3(v);
v.normalize();
it.setValue(v);
}
};
//{
/// @cond OPENVDB_VECTOR_TRANSFORMER_INTERNAL
/// @internal This overload is enabled only for scalar-valued grids.
template<typename GridType> inline
typename std::enable_if<!VecTraits<typename GridType::ValueType>::IsVec, void>::type
doTransformVectors(GridType&, const Mat4d&)
{
OPENVDB_THROW(TypeError, "tools::transformVectors() requires a vector-valued grid");
}
/// @internal This overload is enabled only for vector-valued grids.
template<typename GridType> inline
typename std::enable_if<VecTraits<typename GridType::ValueType>::IsVec, void>::type
doTransformVectors(GridType& grid, const Mat4d& mat)
{
if (!grid.isInWorldSpace()) return;
const VecType vecType = grid.getVectorType();
switch (vecType) {
case VEC_COVARIANT:
case VEC_COVARIANT_NORMALIZE:
{
Mat4d invmat = mat.inverse();
invmat = invmat.transpose();
if (vecType == VEC_COVARIANT_NORMALIZE) {
foreach(grid.beginValueAll(), MatMulNormalize(invmat));
} else {
foreach(grid.beginValueAll(), MatMul(invmat));
}
break;
}
case VEC_CONTRAVARIANT_RELATIVE:
foreach(grid.beginValueAll(), MatMul(mat));
break;
case VEC_CONTRAVARIANT_ABSOLUTE:
foreach(grid.beginValueAll(), HomogeneousMatMul(mat));
break;
case VEC_INVARIANT:
break;
}
}
/// @endcond
//}
template<typename GridType>
inline void
transformVectors(GridType& grid, const Mat4d& mat)
{
doTransformVectors<GridType>(grid, mat);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_VECTORTRANSFORMER_HAS_BEEN_INCLUDED
| 3,472 | C | 24.725926 | 88 | 0.662154 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/MeshToVolume.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file MeshToVolume.h
///
/// @brief Convert polygonal meshes that consist of quads and/or triangles
/// into signed or unsigned distance field volumes.
///
/// @note The signed distance field conversion requires a closed surface
/// but not necessarily a manifold surface. Supports surfaces with
/// self intersections and degenerate faces and is independent of
/// mesh surface normals / polygon orientation.
///
/// @author Mihai Alden
#ifndef OPENVDB_TOOLS_MESH_TO_VOLUME_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_MESH_TO_VOLUME_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h> // for OPENVDB_HAS_CXX11
#include <openvdb/Types.h>
#include <openvdb/math/FiniteDifference.h> // for GodunovsNormSqrd
#include <openvdb/math/Proximity.h> // for closestPointOnTriangleToPoint
#include <openvdb/util/NullInterrupter.h>
#include <openvdb/util/Util.h>
#include "ChangeBackground.h"
#include "Prune.h" // for pruneInactive and pruneLevelSet
#include "SignedFloodFill.h" // for signedFloodFillWithValues
#include <tbb/blocked_range.h>
#include <tbb/enumerable_thread_specific.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <tbb/partitioner.h>
#include <tbb/task_group.h>
#include <tbb/task_scheduler_init.h>
#include <algorithm> // for std::sort()
#include <cmath> // for std::isfinite(), std::isnan()
#include <deque>
#include <limits>
#include <memory>
#include <sstream>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
////////////////////////////////////////
/// @brief Mesh to volume conversion flags
enum MeshToVolumeFlags {
/// Switch from the default signed distance field conversion that classifies
/// regions as either inside or outside the mesh boundary to a unsigned distance
/// field conversion that only computes distance values. This conversion type
/// does not require a closed watertight mesh.
UNSIGNED_DISTANCE_FIELD = 0x1,
/// Disable the cleanup step that removes voxels created by self intersecting
/// portions of the mesh.
DISABLE_INTERSECTING_VOXEL_REMOVAL = 0x2,
/// Disable the distance renormalization step that smooths out bumps caused
/// by self intersecting or overlapping portions of the mesh
DISABLE_RENORMALIZATION = 0x4,
/// Disable the cleanup step that removes active voxels that exceed the
/// narrow band limits. (Only relevant for small limits)
DISABLE_NARROW_BAND_TRIMMING = 0x8
};
/// @brief Convert polygonal meshes that consist of quads and/or triangles into
/// signed or unsigned distance field volumes.
///
/// @note Requires a closed surface but not necessarily a manifold surface.
/// Supports surfaces with self intersections and degenerate faces
/// and is independent of mesh surface normals.
///
/// @interface MeshDataAdapter
/// Expected interface for the MeshDataAdapter class
/// @code
/// struct MeshDataAdapter {
/// size_t polygonCount() const; // Total number of polygons
/// size_t pointCount() const; // Total number of points
/// size_t vertexCount(size_t n) const; // Vertex count for polygon n
///
/// // Return position pos in local grid index space for polygon n and vertex v
/// void getIndexSpacePoint(size_t n, size_t v, openvdb::Vec3d& pos) const;
/// };
/// @endcode
///
/// @param mesh mesh data access class that conforms to the MeshDataAdapter
/// interface
/// @param transform world-to-index-space transform
/// @param exteriorBandWidth exterior narrow band width in voxel units
/// @param interiorBandWidth interior narrow band width in voxel units
/// (set to std::numeric_limits<float>::max() to fill object
/// interior with distance values)
/// @param flags optional conversion flags defined in @c MeshToVolumeFlags
/// @param polygonIndexGrid optional grid output that will contain the closest-polygon
/// index for each voxel in the narrow band region
template <typename GridType, typename MeshDataAdapter>
inline typename GridType::Ptr
meshToVolume(
const MeshDataAdapter& mesh,
const math::Transform& transform,
float exteriorBandWidth = 3.0f,
float interiorBandWidth = 3.0f,
int flags = 0,
typename GridType::template ValueConverter<Int32>::Type * polygonIndexGrid = nullptr);
/// @brief Convert polygonal meshes that consist of quads and/or triangles into
/// signed or unsigned distance field volumes.
///
/// @param interrupter a callback to interrupt the conversion process that conforms
/// to the util::NullInterrupter interface
/// @param mesh mesh data access class that conforms to the MeshDataAdapter
/// interface
/// @param transform world-to-index-space transform
/// @param exteriorBandWidth exterior narrow band width in voxel units
/// @param interiorBandWidth interior narrow band width in voxel units (set this value to
/// std::numeric_limits<float>::max() to fill interior regions
/// with distance values)
/// @param flags optional conversion flags defined in @c MeshToVolumeFlags
/// @param polygonIndexGrid optional grid output that will contain the closest-polygon
/// index for each voxel in the active narrow band region
template <typename GridType, typename MeshDataAdapter, typename Interrupter>
inline typename GridType::Ptr
meshToVolume(
Interrupter& interrupter,
const MeshDataAdapter& mesh,
const math::Transform& transform,
float exteriorBandWidth = 3.0f,
float interiorBandWidth = 3.0f,
int flags = 0,
typename GridType::template ValueConverter<Int32>::Type * polygonIndexGrid = nullptr);
////////////////////////////////////////
/// @brief Contiguous quad and triangle data adapter class
///
/// @details PointType and PolygonType must provide element access
/// through the square brackets operator.
/// @details Points are assumed to be in local grid index space.
/// @details The PolygonType tuple can have either three or four components
/// this property must be specified in a static member variable
/// named @c size, similar to the math::Tuple class.
/// @details A four component tuple can represent a quads or a triangle
/// if the fourth component set to @c util::INVALID_INDEX
template<typename PointType, typename PolygonType>
struct QuadAndTriangleDataAdapter {
QuadAndTriangleDataAdapter(const std::vector<PointType>& points,
const std::vector<PolygonType>& polygons)
: mPointArray(points.empty() ? nullptr : &points[0])
, mPointArraySize(points.size())
, mPolygonArray(polygons.empty() ? nullptr : &polygons[0])
, mPolygonArraySize(polygons.size())
{
}
QuadAndTriangleDataAdapter(const PointType * pointArray, size_t pointArraySize,
const PolygonType* polygonArray, size_t polygonArraySize)
: mPointArray(pointArray)
, mPointArraySize(pointArraySize)
, mPolygonArray(polygonArray)
, mPolygonArraySize(polygonArraySize)
{
}
size_t polygonCount() const { return mPolygonArraySize; }
size_t pointCount() const { return mPointArraySize; }
/// @brief Vertex count for polygon @a n
size_t vertexCount(size_t n) const {
return (PolygonType::size == 3 || mPolygonArray[n][3] == util::INVALID_IDX) ? 3 : 4;
}
/// @brief Returns position @a pos in local grid index space
/// for polygon @a n and vertex @a v
void getIndexSpacePoint(size_t n, size_t v, Vec3d& pos) const {
const PointType& p = mPointArray[mPolygonArray[n][int(v)]];
pos[0] = double(p[0]);
pos[1] = double(p[1]);
pos[2] = double(p[2]);
}
private:
PointType const * const mPointArray;
size_t const mPointArraySize;
PolygonType const * const mPolygonArray;
size_t const mPolygonArraySize;
}; // struct QuadAndTriangleDataAdapter
////////////////////////////////////////
// Convenience functions for the mesh to volume converter that wrap stl containers.
//
// Note the meshToVolume() method declared above is more flexible and better suited
// for arbitrary data structures.
/// @brief Convert a triangle mesh to a level set volume.
///
/// @return a grid of type @c GridType containing a narrow-band level set
/// representation of the input mesh.
///
/// @throw TypeError if @c GridType is not scalar or not floating-point
///
/// @note Requires a closed surface but not necessarily a manifold surface.
/// Supports surfaces with self intersections and degenerate faces
/// and is independent of mesh surface normals.
///
/// @param xform transform for the output grid
/// @param points list of world space point positions
/// @param triangles triangle index list
/// @param halfWidth half the width of the narrow band, in voxel units
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// Adds support for a @a interrupter callback used to cancel the conversion.
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// @brief Convert a quad mesh to a level set volume.
///
/// @return a grid of type @c GridType containing a narrow-band level set
/// representation of the input mesh.
///
/// @throw TypeError if @c GridType is not scalar or not floating-point
///
/// @note Requires a closed surface but not necessarily a manifold surface.
/// Supports surfaces with self intersections and degenerate faces
/// and is independent of mesh surface normals.
///
/// @param xform transform for the output grid
/// @param points list of world space point positions
/// @param quads quad index list
/// @param halfWidth half the width of the narrow band, in voxel units
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec4I>& quads,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// Adds support for a @a interrupter callback used to cancel the conversion.
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec4I>& quads,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// @brief Convert a triangle and quad mesh to a level set volume.
///
/// @return a grid of type @c GridType containing a narrow-band level set
/// representation of the input mesh.
///
/// @throw TypeError if @c GridType is not scalar or not floating-point
///
/// @note Requires a closed surface but not necessarily a manifold surface.
/// Supports surfaces with self intersections and degenerate faces
/// and is independent of mesh surface normals.
///
/// @param xform transform for the output grid
/// @param points list of world space point positions
/// @param triangles triangle index list
/// @param quads quad index list
/// @param halfWidth half the width of the narrow band, in voxel units
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// Adds support for a @a interrupter callback used to cancel the conversion.
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float halfWidth = float(LEVEL_SET_HALF_WIDTH));
/// @brief Convert a triangle and quad mesh to a signed distance field
/// with an asymmetrical narrow band.
///
/// @return a grid of type @c GridType containing a narrow-band signed
/// distance field representation of the input mesh.
///
/// @throw TypeError if @c GridType is not scalar or not floating-point
///
/// @note Requires a closed surface but not necessarily a manifold surface.
/// Supports surfaces with self intersections and degenerate faces
/// and is independent of mesh surface normals.
///
/// @param xform transform for the output grid
/// @param points list of world space point positions
/// @param triangles triangle index list
/// @param quads quad index list
/// @param exBandWidth the exterior narrow-band width in voxel units
/// @param inBandWidth the interior narrow-band width in voxel units
template<typename GridType>
inline typename GridType::Ptr
meshToSignedDistanceField(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float exBandWidth,
float inBandWidth);
/// Adds support for a @a interrupter callback used to cancel the conversion.
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToSignedDistanceField(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float exBandWidth,
float inBandWidth);
/// @brief Convert a triangle and quad mesh to an unsigned distance field.
///
/// @return a grid of type @c GridType containing a narrow-band unsigned
/// distance field representation of the input mesh.
///
/// @throw TypeError if @c GridType is not scalar or not floating-point
///
/// @note Does not requires a closed surface.
///
/// @param xform transform for the output grid
/// @param points list of world space point positions
/// @param triangles triangle index list
/// @param quads quad index list
/// @param bandWidth the width of the narrow band, in voxel units
template<typename GridType>
inline typename GridType::Ptr
meshToUnsignedDistanceField(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float bandWidth);
/// Adds support for a @a interrupter callback used to cancel the conversion.
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToUnsignedDistanceField(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float bandWidth);
////////////////////////////////////////
/// @brief Return a grid of type @c GridType containing a narrow-band level set
/// representation of a box.
///
/// @param bbox a bounding box in world units
/// @param xform world-to-index-space transform
/// @param halfWidth half the width of the narrow band, in voxel units
template<typename GridType, typename VecType>
inline typename GridType::Ptr
createLevelSetBox(const math::BBox<VecType>& bbox,
const openvdb::math::Transform& xform,
typename VecType::ValueType halfWidth = LEVEL_SET_HALF_WIDTH);
////////////////////////////////////////
/// @brief Traces the exterior voxel boundary of closed objects in the input
/// volume @a tree. Exterior voxels are marked with a negative sign,
/// voxels with a value below @c 0.75 are left unchanged and act as
/// the boundary layer.
///
/// @note Does not propagate sign information into tile regions.
template <typename FloatTreeT>
inline void
traceExteriorBoundaries(FloatTreeT& tree);
////////////////////////////////////////
/// @brief Extracts and stores voxel edge intersection data from a mesh.
class MeshToVoxelEdgeData
{
public:
//////////
///@brief Internal edge data type.
struct EdgeData {
EdgeData(float dist = 1.0)
: mXDist(dist), mYDist(dist), mZDist(dist)
, mXPrim(util::INVALID_IDX)
, mYPrim(util::INVALID_IDX)
, mZPrim(util::INVALID_IDX)
{
}
//@{
/// Required by several of the tree nodes
/// @note These methods don't perform meaningful operations.
bool operator< (const EdgeData&) const { return false; }
bool operator> (const EdgeData&) const { return false; }
template<class T> EdgeData operator+(const T&) const { return *this; }
template<class T> EdgeData operator-(const T&) const { return *this; }
EdgeData operator-() const { return *this; }
//@}
bool operator==(const EdgeData& rhs) const
{
return mXPrim == rhs.mXPrim && mYPrim == rhs.mYPrim && mZPrim == rhs.mZPrim;
}
float mXDist, mYDist, mZDist;
Index32 mXPrim, mYPrim, mZPrim;
};
using TreeType = tree::Tree4<EdgeData, 5, 4, 3>::Type;
using Accessor = tree::ValueAccessor<TreeType>;
//////////
MeshToVoxelEdgeData();
/// @brief Threaded method to extract voxel edge data, the closest
/// intersection point and corresponding primitive index,
/// from the given mesh.
///
/// @param pointList List of points in grid index space, preferably unique
/// and shared by different polygons.
/// @param polygonList List of triangles and/or quads.
void convert(const std::vector<Vec3s>& pointList, const std::vector<Vec4I>& polygonList);
/// @brief Returns intersection points with corresponding primitive
/// indices for the given @c ijk voxel.
void getEdgeData(Accessor& acc, const Coord& ijk,
std::vector<Vec3d>& points, std::vector<Index32>& primitives);
/// @return An accessor of @c MeshToVoxelEdgeData::Accessor type that
/// provides random read access to the internal tree.
Accessor getAccessor() { return Accessor(mTree); }
private:
void operator=(const MeshToVoxelEdgeData&) {}
TreeType mTree;
class GenEdgeData;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Internal utility objects and implementation details
namespace mesh_to_volume_internal {
template<typename PointType>
struct TransformPoints {
TransformPoints(const PointType* pointsIn, PointType* pointsOut,
const math::Transform& xform)
: mPointsIn(pointsIn), mPointsOut(pointsOut), mXform(&xform)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
Vec3d pos;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const PointType& wsP = mPointsIn[n];
pos[0] = double(wsP[0]);
pos[1] = double(wsP[1]);
pos[2] = double(wsP[2]);
pos = mXform->worldToIndex(pos);
PointType& isP = mPointsOut[n];
isP[0] = typename PointType::value_type(pos[0]);
isP[1] = typename PointType::value_type(pos[1]);
isP[2] = typename PointType::value_type(pos[2]);
}
}
PointType const * const mPointsIn;
PointType * const mPointsOut;
math::Transform const * const mXform;
}; // TransformPoints
template<typename ValueType>
struct Tolerance
{
static ValueType epsilon() { return ValueType(1e-7); }
static ValueType minNarrowBandWidth() { return ValueType(1.0 + 1e-6); }
};
////////////////////////////////////////
template<typename TreeType>
class CombineLeafNodes
{
public:
using Int32TreeType = typename TreeType::template ValueConverter<Int32>::Type;
using LeafNodeType = typename TreeType::LeafNodeType;
using Int32LeafNodeType = typename Int32TreeType::LeafNodeType;
CombineLeafNodes(TreeType& lhsDistTree, Int32TreeType& lhsIdxTree,
LeafNodeType ** rhsDistNodes, Int32LeafNodeType ** rhsIdxNodes)
: mDistTree(&lhsDistTree)
, mIdxTree(&lhsIdxTree)
, mRhsDistNodes(rhsDistNodes)
, mRhsIdxNodes(rhsIdxNodes)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<TreeType> distAcc(*mDistTree);
tree::ValueAccessor<Int32TreeType> idxAcc(*mIdxTree);
using DistValueType = typename LeafNodeType::ValueType;
using IndexValueType = typename Int32LeafNodeType::ValueType;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const Coord& origin = mRhsDistNodes[n]->origin();
LeafNodeType* lhsDistNode = distAcc.probeLeaf(origin);
Int32LeafNodeType* lhsIdxNode = idxAcc.probeLeaf(origin);
DistValueType* lhsDistData = lhsDistNode->buffer().data();
IndexValueType* lhsIdxData = lhsIdxNode->buffer().data();
const DistValueType* rhsDistData = mRhsDistNodes[n]->buffer().data();
const IndexValueType* rhsIdxData = mRhsIdxNodes[n]->buffer().data();
for (Index32 offset = 0; offset < LeafNodeType::SIZE; ++offset) {
if (rhsIdxData[offset] != Int32(util::INVALID_IDX)) {
const DistValueType& lhsValue = lhsDistData[offset];
const DistValueType& rhsValue = rhsDistData[offset];
if (rhsValue < lhsValue) {
lhsDistNode->setValueOn(offset, rhsValue);
lhsIdxNode->setValueOn(offset, rhsIdxData[offset]);
} else if (math::isExactlyEqual(rhsValue, lhsValue)) {
lhsIdxNode->setValueOn(offset,
std::min(lhsIdxData[offset], rhsIdxData[offset]));
}
}
}
delete mRhsDistNodes[n];
delete mRhsIdxNodes[n];
}
}
private:
TreeType * const mDistTree;
Int32TreeType * const mIdxTree;
LeafNodeType ** const mRhsDistNodes;
Int32LeafNodeType ** const mRhsIdxNodes;
}; // class CombineLeafNodes
////////////////////////////////////////
template<typename TreeType>
struct StashOriginAndStoreOffset
{
using LeafNodeType = typename TreeType::LeafNodeType;
StashOriginAndStoreOffset(std::vector<LeafNodeType*>& nodes, Coord* coordinates)
: mNodes(nodes.empty() ? nullptr : &nodes[0]), mCoordinates(coordinates)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
Coord& origin = const_cast<Coord&>(mNodes[n]->origin());
mCoordinates[n] = origin;
origin[0] = static_cast<int>(n);
}
}
LeafNodeType ** const mNodes;
Coord * const mCoordinates;
};
template<typename TreeType>
struct RestoreOrigin
{
using LeafNodeType = typename TreeType::LeafNodeType;
RestoreOrigin(std::vector<LeafNodeType*>& nodes, const Coord* coordinates)
: mNodes(nodes.empty() ? nullptr : &nodes[0]), mCoordinates(coordinates)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
Coord& origin = const_cast<Coord&>(mNodes[n]->origin());
origin[0] = mCoordinates[n][0];
}
}
LeafNodeType ** const mNodes;
Coord const * const mCoordinates;
};
template<typename TreeType>
class ComputeNodeConnectivity
{
public:
using LeafNodeType = typename TreeType::LeafNodeType;
ComputeNodeConnectivity(const TreeType& tree, const Coord* coordinates,
size_t* offsets, size_t numNodes, const CoordBBox& bbox)
: mTree(&tree)
, mCoordinates(coordinates)
, mOffsets(offsets)
, mNumNodes(numNodes)
, mBBox(bbox)
{
}
ComputeNodeConnectivity(const ComputeNodeConnectivity&) = default;
// Disallow assignment
ComputeNodeConnectivity& operator=(const ComputeNodeConnectivity&) = delete;
void operator()(const tbb::blocked_range<size_t>& range) const {
size_t* offsetsNextX = mOffsets;
size_t* offsetsPrevX = mOffsets + mNumNodes;
size_t* offsetsNextY = mOffsets + mNumNodes * 2;
size_t* offsetsPrevY = mOffsets + mNumNodes * 3;
size_t* offsetsNextZ = mOffsets + mNumNodes * 4;
size_t* offsetsPrevZ = mOffsets + mNumNodes * 5;
tree::ValueAccessor<const TreeType> acc(*mTree);
Coord ijk;
const Int32 DIM = static_cast<Int32>(LeafNodeType::DIM);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const Coord& origin = mCoordinates[n];
offsetsNextX[n] = findNeighbourNode(acc, origin, Coord(DIM, 0, 0));
offsetsPrevX[n] = findNeighbourNode(acc, origin, Coord(-DIM, 0, 0));
offsetsNextY[n] = findNeighbourNode(acc, origin, Coord(0, DIM, 0));
offsetsPrevY[n] = findNeighbourNode(acc, origin, Coord(0, -DIM, 0));
offsetsNextZ[n] = findNeighbourNode(acc, origin, Coord(0, 0, DIM));
offsetsPrevZ[n] = findNeighbourNode(acc, origin, Coord(0, 0, -DIM));
}
}
size_t findNeighbourNode(tree::ValueAccessor<const TreeType>& acc,
const Coord& start, const Coord& step) const
{
Coord ijk = start + step;
CoordBBox bbox(mBBox);
while (bbox.isInside(ijk)) {
const LeafNodeType* node = acc.probeConstLeaf(ijk);
if (node) return static_cast<size_t>(node->origin()[0]);
ijk += step;
}
return std::numeric_limits<size_t>::max();
}
private:
TreeType const * const mTree;
Coord const * const mCoordinates;
size_t * const mOffsets;
const size_t mNumNodes;
const CoordBBox mBBox;
}; // class ComputeNodeConnectivity
template<typename TreeType>
struct LeafNodeConnectivityTable
{
enum { INVALID_OFFSET = std::numeric_limits<size_t>::max() };
using LeafNodeType = typename TreeType::LeafNodeType;
LeafNodeConnectivityTable(TreeType& tree)
{
mLeafNodes.reserve(tree.leafCount());
tree.getNodes(mLeafNodes);
if (mLeafNodes.empty()) return;
CoordBBox bbox;
tree.evalLeafBoundingBox(bbox);
const tbb::blocked_range<size_t> range(0, mLeafNodes.size());
// stash the leafnode origin coordinate and temporarily store the
// linear offset in the origin.x variable.
std::unique_ptr<Coord[]> coordinates{new Coord[mLeafNodes.size()]};
tbb::parallel_for(range,
StashOriginAndStoreOffset<TreeType>(mLeafNodes, coordinates.get()));
// build the leafnode offset table
mOffsets.reset(new size_t[mLeafNodes.size() * 6]);
tbb::parallel_for(range, ComputeNodeConnectivity<TreeType>(
tree, coordinates.get(), mOffsets.get(), mLeafNodes.size(), bbox));
// restore the leafnode origin coordinate
tbb::parallel_for(range, RestoreOrigin<TreeType>(mLeafNodes, coordinates.get()));
}
size_t size() const { return mLeafNodes.size(); }
std::vector<LeafNodeType*>& nodes() { return mLeafNodes; }
const std::vector<LeafNodeType*>& nodes() const { return mLeafNodes; }
const size_t* offsetsNextX() const { return mOffsets.get(); }
const size_t* offsetsPrevX() const { return mOffsets.get() + mLeafNodes.size(); }
const size_t* offsetsNextY() const { return mOffsets.get() + mLeafNodes.size() * 2; }
const size_t* offsetsPrevY() const { return mOffsets.get() + mLeafNodes.size() * 3; }
const size_t* offsetsNextZ() const { return mOffsets.get() + mLeafNodes.size() * 4; }
const size_t* offsetsPrevZ() const { return mOffsets.get() + mLeafNodes.size() * 5; }
private:
std::vector<LeafNodeType*> mLeafNodes;
std::unique_ptr<size_t[]> mOffsets;
}; // struct LeafNodeConnectivityTable
template<typename TreeType>
class SweepExteriorSign
{
public:
enum Axis { X_AXIS = 0, Y_AXIS = 1, Z_AXIS = 2 };
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using ConnectivityTable = LeafNodeConnectivityTable<TreeType>;
SweepExteriorSign(Axis axis, const std::vector<size_t>& startNodeIndices,
ConnectivityTable& connectivity)
: mStartNodeIndices(startNodeIndices.empty() ? nullptr : &startNodeIndices[0])
, mConnectivity(&connectivity)
, mAxis(axis)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
constexpr Int32 DIM = static_cast<Int32>(LeafNodeType::DIM);
std::vector<LeafNodeType*>& nodes = mConnectivity->nodes();
// Z Axis
size_t idxA = 0, idxB = 1;
Int32 step = 1;
const size_t* nextOffsets = mConnectivity->offsetsNextZ();
const size_t* prevOffsets = mConnectivity->offsetsPrevZ();
if (mAxis == Y_AXIS) {
idxA = 0;
idxB = 2;
step = DIM;
nextOffsets = mConnectivity->offsetsNextY();
prevOffsets = mConnectivity->offsetsPrevY();
} else if (mAxis == X_AXIS) {
idxA = 1;
idxB = 2;
step = DIM*DIM;
nextOffsets = mConnectivity->offsetsNextX();
prevOffsets = mConnectivity->offsetsPrevX();
}
Coord ijk(0, 0, 0);
Int32& a = ijk[idxA];
Int32& b = ijk[idxB];
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
size_t startOffset = mStartNodeIndices[n];
size_t lastOffset = startOffset;
Int32 pos(0);
for (a = 0; a < DIM; ++a) {
for (b = 0; b < DIM; ++b) {
pos = static_cast<Int32>(LeafNodeType::coordToOffset(ijk));
size_t offset = startOffset;
// sweep in +axis direction until a boundary voxel is hit.
while ( offset != ConnectivityTable::INVALID_OFFSET &&
traceVoxelLine(*nodes[offset], pos, step) ) {
lastOffset = offset;
offset = nextOffsets[offset];
}
// find last leafnode in +axis direction
offset = lastOffset;
while (offset != ConnectivityTable::INVALID_OFFSET) {
lastOffset = offset;
offset = nextOffsets[offset];
}
// sweep in -axis direction until a boundary voxel is hit.
offset = lastOffset;
pos += step * (DIM - 1);
while ( offset != ConnectivityTable::INVALID_OFFSET &&
traceVoxelLine(*nodes[offset], pos, -step)) {
offset = prevOffsets[offset];
}
}
}
}
}
bool traceVoxelLine(LeafNodeType& node, Int32 pos, const Int32 step) const {
ValueType* data = node.buffer().data();
bool isOutside = true;
for (Index i = 0; i < LeafNodeType::DIM; ++i) {
assert(pos >= 0);
ValueType& dist = data[pos];
if (dist < ValueType(0.0)) {
isOutside = true;
} else {
// Boundary voxel check. (Voxel that intersects the surface)
if (!(dist > ValueType(0.75))) isOutside = false;
if (isOutside) dist = ValueType(-dist);
}
pos += step;
}
return isOutside;
}
private:
size_t const * const mStartNodeIndices;
ConnectivityTable * const mConnectivity;
const Axis mAxis;
}; // class SweepExteriorSign
template<typename LeafNodeType>
inline void
seedFill(LeafNodeType& node)
{
using ValueType = typename LeafNodeType::ValueType;
using Queue = std::deque<Index>;
ValueType* data = node.buffer().data();
// find seed points
Queue seedPoints;
for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) {
if (data[pos] < 0.0) seedPoints.push_back(pos);
}
if (seedPoints.empty()) return;
// clear sign information
for (Queue::iterator it = seedPoints.begin(); it != seedPoints.end(); ++it) {
ValueType& dist = data[*it];
dist = -dist;
}
// flood fill
Coord ijk(0, 0, 0);
Index pos(0), nextPos(0);
while (!seedPoints.empty()) {
pos = seedPoints.back();
seedPoints.pop_back();
ValueType& dist = data[pos];
if (!(dist < ValueType(0.0))) {
dist = -dist; // flip sign
ijk = LeafNodeType::offsetToLocalCoord(pos);
if (ijk[0] != 0) { // i - 1, j, k
nextPos = pos - LeafNodeType::DIM * LeafNodeType::DIM;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
if (ijk[0] != (LeafNodeType::DIM - 1)) { // i + 1, j, k
nextPos = pos + LeafNodeType::DIM * LeafNodeType::DIM;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
if (ijk[1] != 0) { // i, j - 1, k
nextPos = pos - LeafNodeType::DIM;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
if (ijk[1] != (LeafNodeType::DIM - 1)) { // i, j + 1, k
nextPos = pos + LeafNodeType::DIM;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
if (ijk[2] != 0) { // i, j, k - 1
nextPos = pos - 1;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
if (ijk[2] != (LeafNodeType::DIM - 1)) { // i, j, k + 1
nextPos = pos + 1;
if (data[nextPos] > ValueType(0.75)) seedPoints.push_back(nextPos);
}
}
}
} // seedFill()
template<typename LeafNodeType>
inline bool
scanFill(LeafNodeType& node)
{
bool updatedNode = false;
using ValueType = typename LeafNodeType::ValueType;
ValueType* data = node.buffer().data();
Coord ijk(0, 0, 0);
bool updatedSign = true;
while (updatedSign) {
updatedSign = false;
for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) {
ValueType& dist = data[pos];
if (!(dist < ValueType(0.0)) && dist > ValueType(0.75)) {
ijk = LeafNodeType::offsetToLocalCoord(pos);
// i, j, k - 1
if (ijk[2] != 0 && data[pos - 1] < ValueType(0.0)) {
updatedSign = true;
dist = ValueType(-dist);
// i, j, k + 1
} else if (ijk[2] != (LeafNodeType::DIM - 1) && data[pos + 1] < ValueType(0.0)) {
updatedSign = true;
dist = ValueType(-dist);
// i, j - 1, k
} else if (ijk[1] != 0 && data[pos - LeafNodeType::DIM] < ValueType(0.0)) {
updatedSign = true;
dist = ValueType(-dist);
// i, j + 1, k
} else if (ijk[1] != (LeafNodeType::DIM - 1)
&& data[pos + LeafNodeType::DIM] < ValueType(0.0))
{
updatedSign = true;
dist = ValueType(-dist);
// i - 1, j, k
} else if (ijk[0] != 0
&& data[pos - LeafNodeType::DIM * LeafNodeType::DIM] < ValueType(0.0))
{
updatedSign = true;
dist = ValueType(-dist);
// i + 1, j, k
} else if (ijk[0] != (LeafNodeType::DIM - 1)
&& data[pos + LeafNodeType::DIM * LeafNodeType::DIM] < ValueType(0.0))
{
updatedSign = true;
dist = ValueType(-dist);
}
}
} // end value loop
updatedNode |= updatedSign;
} // end update loop
return updatedNode;
} // scanFill()
template<typename TreeType>
class SeedFillExteriorSign
{
public:
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
SeedFillExteriorSign(std::vector<LeafNodeType*>& nodes, const bool* changedNodeMask)
: mNodes(nodes.empty() ? nullptr : &nodes[0])
, mChangedNodeMask(changedNodeMask)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
if (mChangedNodeMask[n]) {
//seedFill(*mNodes[n]);
// Do not update the flag in mChangedNodeMask even if scanFill
// returns false. mChangedNodeMask is queried by neighboring
// accesses in ::SeedPoints which needs to know that this
// node has values propagated on a previous iteration.
scanFill(*mNodes[n]);
}
}
}
LeafNodeType ** const mNodes;
const bool * const mChangedNodeMask;
};
template<typename ValueType>
struct FillArray
{
FillArray(ValueType* array, const ValueType v) : mArray(array), mValue(v) { }
void operator()(const tbb::blocked_range<size_t>& range) const {
const ValueType v = mValue;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mArray[n] = v;
}
}
ValueType * const mArray;
const ValueType mValue;
};
template<typename ValueType>
inline void
fillArray(ValueType* array, const ValueType val, const size_t length)
{
const auto grainSize = std::max<size_t>(
length / tbb::task_scheduler_init::default_num_threads(), 1024);
const tbb::blocked_range<size_t> range(0, length, grainSize);
tbb::parallel_for(range, FillArray<ValueType>(array, val), tbb::simple_partitioner());
}
template<typename TreeType>
class SyncVoxelMask
{
public:
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
SyncVoxelMask(std::vector<LeafNodeType*>& nodes,
const bool* changedNodeMask, bool* changedVoxelMask)
: mNodes(nodes.empty() ? nullptr : &nodes[0])
, mChangedNodeMask(changedNodeMask)
, mChangedVoxelMask(changedVoxelMask)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
if (mChangedNodeMask[n]) {
bool* mask = &mChangedVoxelMask[n * LeafNodeType::SIZE];
ValueType* data = mNodes[n]->buffer().data();
for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) {
if (mask[pos]) {
data[pos] = ValueType(-data[pos]);
mask[pos] = false;
}
}
}
}
}
LeafNodeType ** const mNodes;
bool const * const mChangedNodeMask;
bool * const mChangedVoxelMask;
};
template<typename TreeType>
class SeedPoints
{
public:
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using ConnectivityTable = LeafNodeConnectivityTable<TreeType>;
SeedPoints(ConnectivityTable& connectivity,
bool* changedNodeMask, bool* nodeMask, bool* changedVoxelMask)
: mConnectivity(&connectivity)
, mChangedNodeMask(changedNodeMask)
, mNodeMask(nodeMask)
, mChangedVoxelMask(changedVoxelMask)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
bool changedValue = false;
changedValue |= processZ(n, /*firstFace=*/true);
changedValue |= processZ(n, /*firstFace=*/false);
changedValue |= processY(n, /*firstFace=*/true);
changedValue |= processY(n, /*firstFace=*/false);
changedValue |= processX(n, /*firstFace=*/true);
changedValue |= processX(n, /*firstFace=*/false);
mNodeMask[n] = changedValue;
}
}
bool processZ(const size_t n, bool firstFace) const
{
const size_t offset =
firstFace ? mConnectivity->offsetsPrevZ()[n] : mConnectivity->offsetsNextZ()[n];
if (offset != ConnectivityTable::INVALID_OFFSET && mChangedNodeMask[offset]) {
bool* mask = &mChangedVoxelMask[n * LeafNodeType::SIZE];
const ValueType* lhsData = mConnectivity->nodes()[n]->buffer().data();
const ValueType* rhsData = mConnectivity->nodes()[offset]->buffer().data();
const Index lastOffset = LeafNodeType::DIM - 1;
const Index lhsOffset =
firstFace ? 0 : lastOffset, rhsOffset = firstFace ? lastOffset : 0;
Index tmpPos(0), pos(0);
bool changedValue = false;
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
tmpPos = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
pos = tmpPos + (y << LeafNodeType::LOG2DIM);
if (lhsData[pos + lhsOffset] > ValueType(0.75)) {
if (rhsData[pos + rhsOffset] < ValueType(0.0)) {
changedValue = true;
mask[pos + lhsOffset] = true;
}
}
}
}
return changedValue;
}
return false;
}
bool processY(const size_t n, bool firstFace) const
{
const size_t offset =
firstFace ? mConnectivity->offsetsPrevY()[n] : mConnectivity->offsetsNextY()[n];
if (offset != ConnectivityTable::INVALID_OFFSET && mChangedNodeMask[offset]) {
bool* mask = &mChangedVoxelMask[n * LeafNodeType::SIZE];
const ValueType* lhsData = mConnectivity->nodes()[n]->buffer().data();
const ValueType* rhsData = mConnectivity->nodes()[offset]->buffer().data();
const Index lastOffset = LeafNodeType::DIM * (LeafNodeType::DIM - 1);
const Index lhsOffset =
firstFace ? 0 : lastOffset, rhsOffset = firstFace ? lastOffset : 0;
Index tmpPos(0), pos(0);
bool changedValue = false;
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
tmpPos = x << (2 * LeafNodeType::LOG2DIM);
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
pos = tmpPos + z;
if (lhsData[pos + lhsOffset] > ValueType(0.75)) {
if (rhsData[pos + rhsOffset] < ValueType(0.0)) {
changedValue = true;
mask[pos + lhsOffset] = true;
}
}
}
}
return changedValue;
}
return false;
}
bool processX(const size_t n, bool firstFace) const
{
const size_t offset =
firstFace ? mConnectivity->offsetsPrevX()[n] : mConnectivity->offsetsNextX()[n];
if (offset != ConnectivityTable::INVALID_OFFSET && mChangedNodeMask[offset]) {
bool* mask = &mChangedVoxelMask[n * LeafNodeType::SIZE];
const ValueType* lhsData = mConnectivity->nodes()[n]->buffer().data();
const ValueType* rhsData = mConnectivity->nodes()[offset]->buffer().data();
const Index lastOffset = LeafNodeType::DIM * LeafNodeType::DIM * (LeafNodeType::DIM-1);
const Index lhsOffset =
firstFace ? 0 : lastOffset, rhsOffset = firstFace ? lastOffset : 0;
Index tmpPos(0), pos(0);
bool changedValue = false;
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
tmpPos = y << LeafNodeType::LOG2DIM;
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
pos = tmpPos + z;
if (lhsData[pos + lhsOffset] > ValueType(0.75)) {
if (rhsData[pos + rhsOffset] < ValueType(0.0)) {
changedValue = true;
mask[pos + lhsOffset] = true;
}
}
}
}
return changedValue;
}
return false;
}
ConnectivityTable * const mConnectivity;
bool * const mChangedNodeMask;
bool * const mNodeMask;
bool * const mChangedVoxelMask;
};
////////////////////////////////////////
template<typename TreeType, typename MeshDataAdapter>
struct ComputeIntersectingVoxelSign
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using Int32TreeType = typename TreeType::template ValueConverter<Int32>::Type;
using Int32LeafNodeType = typename Int32TreeType::LeafNodeType;
using PointArray = std::unique_ptr<Vec3d[]>;
using MaskArray = std::unique_ptr<bool[]>;
using LocalData = std::pair<PointArray, MaskArray>;
using LocalDataTable = tbb::enumerable_thread_specific<LocalData>;
ComputeIntersectingVoxelSign(
std::vector<LeafNodeType*>& distNodes,
const TreeType& distTree,
const Int32TreeType& indexTree,
const MeshDataAdapter& mesh)
: mDistNodes(distNodes.empty() ? nullptr : &distNodes[0])
, mDistTree(&distTree)
, mIndexTree(&indexTree)
, mMesh(&mesh)
, mLocalDataTable(new LocalDataTable())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const TreeType> distAcc(*mDistTree);
tree::ValueAccessor<const Int32TreeType> idxAcc(*mIndexTree);
ValueType nval;
CoordBBox bbox;
Index xPos(0), yPos(0);
Coord ijk, nijk, nodeMin, nodeMax;
Vec3d cp, xyz, nxyz, dir1, dir2;
LocalData& localData = mLocalDataTable->local();
PointArray& points = localData.first;
if (!points) points.reset(new Vec3d[LeafNodeType::SIZE * 2]);
MaskArray& mask = localData.second;
if (!mask) mask.reset(new bool[LeafNodeType::SIZE]);
typename LeafNodeType::ValueOnCIter it;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
LeafNodeType& node = *mDistNodes[n];
ValueType* data = node.buffer().data();
const Int32LeafNodeType* idxNode = idxAcc.probeConstLeaf(node.origin());
const Int32* idxData = idxNode->buffer().data();
nodeMin = node.origin();
nodeMax = nodeMin.offsetBy(LeafNodeType::DIM - 1);
// reset computed voxel mask.
memset(mask.get(), 0, sizeof(bool) * LeafNodeType::SIZE);
for (it = node.cbeginValueOn(); it; ++it) {
Index pos = it.pos();
ValueType& dist = data[pos];
if (dist < 0.0 || dist > 0.75) continue;
ijk = node.offsetToGlobalCoord(pos);
xyz[0] = double(ijk[0]);
xyz[1] = double(ijk[1]);
xyz[2] = double(ijk[2]);
bbox.min() = Coord::maxComponent(ijk.offsetBy(-1), nodeMin);
bbox.max() = Coord::minComponent(ijk.offsetBy(1), nodeMax);
bool flipSign = false;
for (nijk[0] = bbox.min()[0]; nijk[0] <= bbox.max()[0] && !flipSign; ++nijk[0]) {
xPos = (nijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM);
for (nijk[1]=bbox.min()[1]; nijk[1] <= bbox.max()[1] && !flipSign; ++nijk[1]) {
yPos = xPos + ((nijk[1] & (LeafNodeType::DIM-1u)) << LeafNodeType::LOG2DIM);
for (nijk[2] = bbox.min()[2]; nijk[2] <= bbox.max()[2]; ++nijk[2]) {
pos = yPos + (nijk[2] & (LeafNodeType::DIM - 1u));
const Int32& polyIdx = idxData[pos];
if (polyIdx == Int32(util::INVALID_IDX) || !(data[pos] < -0.75))
continue;
const Index pointIndex = pos * 2;
if (!mask[pos]) {
mask[pos] = true;
nxyz[0] = double(nijk[0]);
nxyz[1] = double(nijk[1]);
nxyz[2] = double(nijk[2]);
Vec3d& point = points[pointIndex];
point = closestPoint(nxyz, polyIdx);
Vec3d& direction = points[pointIndex + 1];
direction = nxyz - point;
direction.normalize();
}
dir1 = xyz - points[pointIndex];
dir1.normalize();
if (points[pointIndex + 1].dot(dir1) > 0.0) {
flipSign = true;
break;
}
}
}
}
if (flipSign) {
dist = -dist;
} else {
for (Int32 m = 0; m < 26; ++m) {
nijk = ijk + util::COORD_OFFSETS[m];
if (!bbox.isInside(nijk) && distAcc.probeValue(nijk, nval) && nval<-0.75) {
nxyz[0] = double(nijk[0]);
nxyz[1] = double(nijk[1]);
nxyz[2] = double(nijk[2]);
cp = closestPoint(nxyz, idxAcc.getValue(nijk));
dir1 = xyz - cp;
dir1.normalize();
dir2 = nxyz - cp;
dir2.normalize();
if (dir2.dot(dir1) > 0.0) {
dist = -dist;
break;
}
}
}
}
} // active voxel loop
} // leaf node loop
}
private:
Vec3d closestPoint(const Vec3d& center, Int32 polyIdx) const
{
Vec3d a, b, c, cp, uvw;
const size_t polygon = size_t(polyIdx);
mMesh->getIndexSpacePoint(polygon, 0, a);
mMesh->getIndexSpacePoint(polygon, 1, b);
mMesh->getIndexSpacePoint(polygon, 2, c);
cp = closestPointOnTriangleToPoint(a, c, b, center, uvw);
if (4 == mMesh->vertexCount(polygon)) {
mMesh->getIndexSpacePoint(polygon, 3, b);
c = closestPointOnTriangleToPoint(a, b, c, center, uvw);
if ((center - c).lengthSqr() < (center - cp).lengthSqr()) {
cp = c;
}
}
return cp;
}
LeafNodeType ** const mDistNodes;
TreeType const * const mDistTree;
Int32TreeType const * const mIndexTree;
MeshDataAdapter const * const mMesh;
SharedPtr<LocalDataTable> mLocalDataTable;
}; // ComputeIntersectingVoxelSign
////////////////////////////////////////
template<typename LeafNodeType>
inline void
maskNodeInternalNeighbours(const Index pos, bool (&mask)[26])
{
using NodeT = LeafNodeType;
const Coord ijk = NodeT::offsetToLocalCoord(pos);
// Face adjacent neighbours
// i+1, j, k
mask[0] = ijk[0] != (NodeT::DIM - 1);
// i-1, j, k
mask[1] = ijk[0] != 0;
// i, j+1, k
mask[2] = ijk[1] != (NodeT::DIM - 1);
// i, j-1, k
mask[3] = ijk[1] != 0;
// i, j, k+1
mask[4] = ijk[2] != (NodeT::DIM - 1);
// i, j, k-1
mask[5] = ijk[2] != 0;
// Edge adjacent neighbour
// i+1, j, k-1
mask[6] = mask[0] && mask[5];
// i-1, j, k-1
mask[7] = mask[1] && mask[5];
// i+1, j, k+1
mask[8] = mask[0] && mask[4];
// i-1, j, k+1
mask[9] = mask[1] && mask[4];
// i+1, j+1, k
mask[10] = mask[0] && mask[2];
// i-1, j+1, k
mask[11] = mask[1] && mask[2];
// i+1, j-1, k
mask[12] = mask[0] && mask[3];
// i-1, j-1, k
mask[13] = mask[1] && mask[3];
// i, j-1, k+1
mask[14] = mask[3] && mask[4];
// i, j-1, k-1
mask[15] = mask[3] && mask[5];
// i, j+1, k+1
mask[16] = mask[2] && mask[4];
// i, j+1, k-1
mask[17] = mask[2] && mask[5];
// Corner adjacent neighbours
// i-1, j-1, k-1
mask[18] = mask[1] && mask[3] && mask[5];
// i-1, j-1, k+1
mask[19] = mask[1] && mask[3] && mask[4];
// i+1, j-1, k+1
mask[20] = mask[0] && mask[3] && mask[4];
// i+1, j-1, k-1
mask[21] = mask[0] && mask[3] && mask[5];
// i-1, j+1, k-1
mask[22] = mask[1] && mask[2] && mask[5];
// i-1, j+1, k+1
mask[23] = mask[1] && mask[2] && mask[4];
// i+1, j+1, k+1
mask[24] = mask[0] && mask[2] && mask[4];
// i+1, j+1, k-1
mask[25] = mask[0] && mask[2] && mask[5];
}
template<typename Compare, typename LeafNodeType>
inline bool
checkNeighbours(const Index pos, const typename LeafNodeType::ValueType * data, bool (&mask)[26])
{
using NodeT = LeafNodeType;
// i, j, k - 1
if (mask[5] && Compare::check(data[pos - 1])) return true;
// i, j, k + 1
if (mask[4] && Compare::check(data[pos + 1])) return true;
// i, j - 1, k
if (mask[3] && Compare::check(data[pos - NodeT::DIM])) return true;
// i, j + 1, k
if (mask[2] && Compare::check(data[pos + NodeT::DIM])) return true;
// i - 1, j, k
if (mask[1] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM])) return true;
// i + 1, j, k
if (mask[0] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM])) return true;
// i+1, j, k-1
if (mask[6] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM])) return true;
// i-1, j, k-1
if (mask[7] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM - 1])) return true;
// i+1, j, k+1
if (mask[8] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM + 1])) return true;
// i-1, j, k+1
if (mask[9] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM + 1])) return true;
// i+1, j+1, k
if (mask[10] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM + NodeT::DIM])) return true;
// i-1, j+1, k
if (mask[11] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM + NodeT::DIM])) return true;
// i+1, j-1, k
if (mask[12] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM - NodeT::DIM])) return true;
// i-1, j-1, k
if (mask[13] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM - NodeT::DIM])) return true;
// i, j-1, k+1
if (mask[14] && Compare::check(data[pos - NodeT::DIM + 1])) return true;
// i, j-1, k-1
if (mask[15] && Compare::check(data[pos - NodeT::DIM - 1])) return true;
// i, j+1, k+1
if (mask[16] && Compare::check(data[pos + NodeT::DIM + 1])) return true;
// i, j+1, k-1
if (mask[17] && Compare::check(data[pos + NodeT::DIM - 1])) return true;
// i-1, j-1, k-1
if (mask[18] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM - NodeT::DIM - 1])) return true;
// i-1, j-1, k+1
if (mask[19] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM - NodeT::DIM + 1])) return true;
// i+1, j-1, k+1
if (mask[20] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM - NodeT::DIM + 1])) return true;
// i+1, j-1, k-1
if (mask[21] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM - NodeT::DIM - 1])) return true;
// i-1, j+1, k-1
if (mask[22] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM + NodeT::DIM - 1])) return true;
// i-1, j+1, k+1
if (mask[23] && Compare::check(data[pos - NodeT::DIM * NodeT::DIM + NodeT::DIM + 1])) return true;
// i+1, j+1, k+1
if (mask[24] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM + NodeT::DIM + 1])) return true;
// i+1, j+1, k-1
if (mask[25] && Compare::check(data[pos + NodeT::DIM * NodeT::DIM + NodeT::DIM - 1])) return true;
return false;
}
template<typename Compare, typename AccessorType>
inline bool
checkNeighbours(const Coord& ijk, AccessorType& acc, bool (&mask)[26])
{
for (Int32 m = 0; m < 26; ++m) {
if (!mask[m] && Compare::check(acc.getValue(ijk + util::COORD_OFFSETS[m]))) {
return true;
}
}
return false;
}
template<typename TreeType>
struct ValidateIntersectingVoxels
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
struct IsNegative { static bool check(const ValueType v) { return v < ValueType(0.0); } };
ValidateIntersectingVoxels(TreeType& tree, std::vector<LeafNodeType*>& nodes)
: mTree(&tree)
, mNodes(nodes.empty() ? nullptr : &nodes[0])
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
tree::ValueAccessor<const TreeType> acc(*mTree);
bool neighbourMask[26];
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
LeafNodeType& node = *mNodes[n];
ValueType* data = node.buffer().data();
typename LeafNodeType::ValueOnCIter it;
for (it = node.cbeginValueOn(); it; ++it) {
const Index pos = it.pos();
ValueType& dist = data[pos];
if (dist < 0.0 || dist > 0.75) continue;
// Mask node internal neighbours
maskNodeInternalNeighbours<LeafNodeType>(pos, neighbourMask);
const bool hasNegativeNeighbour =
checkNeighbours<IsNegative, LeafNodeType>(pos, data, neighbourMask) ||
checkNeighbours<IsNegative>(node.offsetToGlobalCoord(pos), acc, neighbourMask);
if (!hasNegativeNeighbour) {
// push over boundary voxel distance
dist = ValueType(0.75) + Tolerance<ValueType>::epsilon();
}
}
}
}
TreeType * const mTree;
LeafNodeType ** const mNodes;
}; // ValidateIntersectingVoxels
template<typename TreeType>
struct RemoveSelfIntersectingSurface
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using Int32TreeType = typename TreeType::template ValueConverter<Int32>::Type;
struct Comp { static bool check(const ValueType v) { return !(v > ValueType(0.75)); } };
RemoveSelfIntersectingSurface(std::vector<LeafNodeType*>& nodes,
TreeType& distTree, Int32TreeType& indexTree)
: mNodes(nodes.empty() ? nullptr : &nodes[0])
, mDistTree(&distTree)
, mIndexTree(&indexTree)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
tree::ValueAccessor<const TreeType> distAcc(*mDistTree);
tree::ValueAccessor<Int32TreeType> idxAcc(*mIndexTree);
bool neighbourMask[26];
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
LeafNodeType& distNode = *mNodes[n];
ValueType* data = distNode.buffer().data();
typename Int32TreeType::LeafNodeType* idxNode =
idxAcc.probeLeaf(distNode.origin());
typename LeafNodeType::ValueOnCIter it;
for (it = distNode.cbeginValueOn(); it; ++it) {
const Index pos = it.pos();
if (!(data[pos] > 0.75)) continue;
// Mask node internal neighbours
maskNodeInternalNeighbours<LeafNodeType>(pos, neighbourMask);
const bool hasBoundaryNeighbour =
checkNeighbours<Comp, LeafNodeType>(pos, data, neighbourMask) ||
checkNeighbours<Comp>(distNode.offsetToGlobalCoord(pos),distAcc,neighbourMask);
if (!hasBoundaryNeighbour) {
distNode.setValueOff(pos);
idxNode->setValueOff(pos);
}
}
}
}
LeafNodeType * * const mNodes;
TreeType * const mDistTree;
Int32TreeType * const mIndexTree;
}; // RemoveSelfIntersectingSurface
////////////////////////////////////////
template<typename NodeType>
struct ReleaseChildNodes
{
ReleaseChildNodes(NodeType ** nodes) : mNodes(nodes) {}
void operator()(const tbb::blocked_range<size_t>& range) const {
using NodeMaskType = typename NodeType::NodeMaskType;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const_cast<NodeMaskType&>(mNodes[n]->getChildMask()).setOff();
}
}
NodeType ** const mNodes;
};
template<typename TreeType>
inline void
releaseLeafNodes(TreeType& tree)
{
using RootNodeType = typename TreeType::RootNodeType;
using NodeChainType = typename RootNodeType::NodeChainType;
using InternalNodeType = typename NodeChainType::template Get<1>;
std::vector<InternalNodeType*> nodes;
tree.getNodes(nodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
ReleaseChildNodes<InternalNodeType>(nodes.empty() ? nullptr : &nodes[0]));
}
template<typename TreeType>
struct StealUniqueLeafNodes
{
using LeafNodeType = typename TreeType::LeafNodeType;
StealUniqueLeafNodes(TreeType& lhsTree, TreeType& rhsTree,
std::vector<LeafNodeType*>& overlappingNodes)
: mLhsTree(&lhsTree)
, mRhsTree(&rhsTree)
, mNodes(&overlappingNodes)
{
}
void operator()() const {
std::vector<LeafNodeType*> rhsLeafNodes;
rhsLeafNodes.reserve(mRhsTree->leafCount());
//mRhsTree->getNodes(rhsLeafNodes);
//releaseLeafNodes(*mRhsTree);
mRhsTree->stealNodes(rhsLeafNodes);
tree::ValueAccessor<TreeType> acc(*mLhsTree);
for (size_t n = 0, N = rhsLeafNodes.size(); n < N; ++n) {
if (!acc.probeLeaf(rhsLeafNodes[n]->origin())) {
acc.addLeaf(rhsLeafNodes[n]);
} else {
mNodes->push_back(rhsLeafNodes[n]);
}
}
}
private:
TreeType * const mLhsTree;
TreeType * const mRhsTree;
std::vector<LeafNodeType*> * const mNodes;
};
template<typename DistTreeType, typename IndexTreeType>
inline void
combineData(DistTreeType& lhsDist, IndexTreeType& lhsIdx,
DistTreeType& rhsDist, IndexTreeType& rhsIdx)
{
using DistLeafNodeType = typename DistTreeType::LeafNodeType;
using IndexLeafNodeType = typename IndexTreeType::LeafNodeType;
std::vector<DistLeafNodeType*> overlappingDistNodes;
std::vector<IndexLeafNodeType*> overlappingIdxNodes;
// Steal unique leafnodes
tbb::task_group tasks;
tasks.run(StealUniqueLeafNodes<DistTreeType>(lhsDist, rhsDist, overlappingDistNodes));
tasks.run(StealUniqueLeafNodes<IndexTreeType>(lhsIdx, rhsIdx, overlappingIdxNodes));
tasks.wait();
// Combine overlapping leaf nodes
if (!overlappingDistNodes.empty() && !overlappingIdxNodes.empty()) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, overlappingDistNodes.size()),
CombineLeafNodes<DistTreeType>(lhsDist, lhsIdx,
&overlappingDistNodes[0], &overlappingIdxNodes[0]));
}
}
/// @brief TBB body object to voxelize a mesh of triangles and/or quads into a collection
/// of VDB grids, namely a squared distance grid, a closest primitive grid and an
/// intersecting voxels grid (masks the mesh intersecting voxels)
/// @note Only the leaf nodes that intersect the mesh are allocated, and only voxels in
/// a narrow band (of two to three voxels in proximity to the mesh's surface) are activated.
/// They are populated with distance values and primitive indices.
template<typename TreeType>
struct VoxelizationData {
using Ptr = std::unique_ptr<VoxelizationData>;
using ValueType = typename TreeType::ValueType;
using Int32TreeType = typename TreeType::template ValueConverter<Int32>::Type;
using UCharTreeType = typename TreeType::template ValueConverter<unsigned char>::Type;
using FloatTreeAcc = tree::ValueAccessor<TreeType>;
using Int32TreeAcc = tree::ValueAccessor<Int32TreeType>;
using UCharTreeAcc = tree::ValueAccessor<UCharTreeType>;
VoxelizationData()
: distTree(std::numeric_limits<ValueType>::max())
, distAcc(distTree)
, indexTree(Int32(util::INVALID_IDX))
, indexAcc(indexTree)
, primIdTree(MaxPrimId)
, primIdAcc(primIdTree)
, mPrimCount(0)
{
}
TreeType distTree;
FloatTreeAcc distAcc;
Int32TreeType indexTree;
Int32TreeAcc indexAcc;
UCharTreeType primIdTree;
UCharTreeAcc primIdAcc;
unsigned char getNewPrimId() {
/// @warning Don't use parallel methods here!
/// The primIdTree is used as a "scratch" pad to mark visits for a given polygon
/// into voxels which it may contribute to. The tree is kept as lightweight as
/// possible and is reset when a maximum count or size is reached. A previous
/// bug here occurred due to the calling of tree methods with multi-threaded
/// implementations, resulting in nested parallelization and re-use of the TLS
/// from the initial task. This consequently resulted in non deterministic values
/// of mPrimCount on the return of the initial task, and could potentially end up
/// with a mPrimCount equal to that of the MaxPrimId. This is used as the background
/// value of the scratch tree.
/// @see jira.aswf.io/browse/OVDB-117, PR #564
/// @todo Consider profiling this operator with tree.clear() and Investigate the
/// chosen value of MaxPrimId
if (mPrimCount == MaxPrimId || primIdTree.leafCount() > 1000) {
mPrimCount = 0;
primIdTree.root().clear();
primIdTree.clearAllAccessors();
assert(mPrimCount == 0);
}
return mPrimCount++;
}
private:
enum { MaxPrimId = 100 };
unsigned char mPrimCount;
};
template<typename TreeType, typename MeshDataAdapter, typename Interrupter = util::NullInterrupter>
class VoxelizePolygons
{
public:
using VoxelizationDataType = VoxelizationData<TreeType>;
using DataTable = tbb::enumerable_thread_specific<typename VoxelizationDataType::Ptr>;
VoxelizePolygons(DataTable& dataTable,
const MeshDataAdapter& mesh,
Interrupter* interrupter = nullptr)
: mDataTable(&dataTable)
, mMesh(&mesh)
, mInterrupter(interrupter)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
typename VoxelizationDataType::Ptr& dataPtr = mDataTable->local();
if (!dataPtr) dataPtr.reset(new VoxelizationDataType());
Triangle prim;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
if (this->wasInterrupted()) {
tbb::task::self().cancel_group_execution();
break;
}
const size_t numVerts = mMesh->vertexCount(n);
// rasterize triangles and quads.
if (numVerts == 3 || numVerts == 4) {
prim.index = Int32(n);
mMesh->getIndexSpacePoint(n, 0, prim.a);
mMesh->getIndexSpacePoint(n, 1, prim.b);
mMesh->getIndexSpacePoint(n, 2, prim.c);
evalTriangle(prim, *dataPtr);
if (numVerts == 4) {
mMesh->getIndexSpacePoint(n, 3, prim.b);
evalTriangle(prim, *dataPtr);
}
}
}
}
private:
bool wasInterrupted() const { return mInterrupter && mInterrupter->wasInterrupted(); }
struct Triangle { Vec3d a, b, c; Int32 index; };
struct SubTask
{
enum { POLYGON_LIMIT = 1000 };
SubTask(const Triangle& prim, DataTable& dataTable,
int subdivisionCount, size_t polygonCount,
Interrupter* interrupter = nullptr)
: mLocalDataTable(&dataTable)
, mPrim(prim)
, mSubdivisionCount(subdivisionCount)
, mPolygonCount(polygonCount)
, mInterrupter(interrupter)
{
}
void operator()() const
{
if (mSubdivisionCount <= 0 || mPolygonCount >= POLYGON_LIMIT) {
typename VoxelizationDataType::Ptr& dataPtr = mLocalDataTable->local();
if (!dataPtr) dataPtr.reset(new VoxelizationDataType());
voxelizeTriangle(mPrim, *dataPtr, mInterrupter);
} else if (!(mInterrupter && mInterrupter->wasInterrupted())) {
spawnTasks(mPrim, *mLocalDataTable, mSubdivisionCount, mPolygonCount, mInterrupter);
}
}
DataTable * const mLocalDataTable;
Triangle const mPrim;
int const mSubdivisionCount;
size_t const mPolygonCount;
Interrupter * const mInterrupter;
}; // struct SubTask
inline static int evalSubdivisionCount(const Triangle& prim)
{
const double ax = prim.a[0], bx = prim.b[0], cx = prim.c[0];
const double dx = std::max(ax, std::max(bx, cx)) - std::min(ax, std::min(bx, cx));
const double ay = prim.a[1], by = prim.b[1], cy = prim.c[1];
const double dy = std::max(ay, std::max(by, cy)) - std::min(ay, std::min(by, cy));
const double az = prim.a[2], bz = prim.b[2], cz = prim.c[2];
const double dz = std::max(az, std::max(bz, cz)) - std::min(az, std::min(bz, cz));
return int(std::max(dx, std::max(dy, dz)) / double(TreeType::LeafNodeType::DIM * 2));
}
void evalTriangle(const Triangle& prim, VoxelizationDataType& data) const
{
const size_t polygonCount = mMesh->polygonCount();
const int subdivisionCount =
polygonCount < SubTask::POLYGON_LIMIT ? evalSubdivisionCount(prim) : 0;
if (subdivisionCount <= 0) {
voxelizeTriangle(prim, data, mInterrupter);
} else {
spawnTasks(prim, *mDataTable, subdivisionCount, polygonCount, mInterrupter);
}
}
static void spawnTasks(
const Triangle& mainPrim,
DataTable& dataTable,
int subdivisionCount,
size_t polygonCount,
Interrupter* const interrupter)
{
subdivisionCount -= 1;
polygonCount *= 4;
tbb::task_group tasks;
const Vec3d ac = (mainPrim.a + mainPrim.c) * 0.5;
const Vec3d bc = (mainPrim.b + mainPrim.c) * 0.5;
const Vec3d ab = (mainPrim.a + mainPrim.b) * 0.5;
Triangle prim;
prim.index = mainPrim.index;
prim.a = mainPrim.a;
prim.b = ab;
prim.c = ac;
tasks.run(SubTask(prim, dataTable, subdivisionCount, polygonCount, interrupter));
prim.a = ab;
prim.b = bc;
prim.c = ac;
tasks.run(SubTask(prim, dataTable, subdivisionCount, polygonCount, interrupter));
prim.a = ab;
prim.b = mainPrim.b;
prim.c = bc;
tasks.run(SubTask(prim, dataTable, subdivisionCount, polygonCount, interrupter));
prim.a = ac;
prim.b = bc;
prim.c = mainPrim.c;
tasks.run(SubTask(prim, dataTable, subdivisionCount, polygonCount, interrupter));
tasks.wait();
}
static void voxelizeTriangle(const Triangle& prim, VoxelizationDataType& data, Interrupter* const interrupter)
{
std::deque<Coord> coordList;
Coord ijk, nijk;
ijk = Coord::floor(prim.a);
coordList.push_back(ijk);
// The first point may not be quite in bounds, and rely
// on one of the neighbours to have the first valid seed,
// so we cannot early-exit here.
updateDistance(ijk, prim, data);
unsigned char primId = data.getNewPrimId();
data.primIdAcc.setValueOnly(ijk, primId);
while (!coordList.empty()) {
if (interrupter && interrupter->wasInterrupted()) {
tbb::task::self().cancel_group_execution();
break;
}
for (Int32 pass = 0; pass < 1048576 && !coordList.empty(); ++pass) {
ijk = coordList.back();
coordList.pop_back();
for (Int32 i = 0; i < 26; ++i) {
nijk = ijk + util::COORD_OFFSETS[i];
if (primId != data.primIdAcc.getValue(nijk)) {
data.primIdAcc.setValueOnly(nijk, primId);
if(updateDistance(nijk, prim, data)) coordList.push_back(nijk);
}
}
}
}
}
static bool updateDistance(const Coord& ijk, const Triangle& prim, VoxelizationDataType& data)
{
Vec3d uvw, voxelCenter(ijk[0], ijk[1], ijk[2]);
using ValueType = typename TreeType::ValueType;
const ValueType dist = ValueType((voxelCenter -
closestPointOnTriangleToPoint(prim.a, prim.c, prim.b, voxelCenter, uvw)).lengthSqr());
// Either the points may be NAN, or they could be far enough from
// the origin that computing distance fails.
if (std::isnan(dist))
return false;
const ValueType oldDist = data.distAcc.getValue(ijk);
if (dist < oldDist) {
data.distAcc.setValue(ijk, dist);
data.indexAcc.setValue(ijk, prim.index);
} else if (math::isExactlyEqual(dist, oldDist)) {
// makes reduction deterministic when different polygons
// produce the same distance value.
data.indexAcc.setValueOnly(ijk, std::min(prim.index, data.indexAcc.getValue(ijk)));
}
return !(dist > 0.75); // true if the primitive intersects the voxel.
}
DataTable * const mDataTable;
MeshDataAdapter const * const mMesh;
Interrupter * const mInterrupter;
}; // VoxelizePolygons
////////////////////////////////////////
template<typename TreeType>
struct DiffLeafNodeMask
{
using AccessorType = typename tree::ValueAccessor<TreeType>;
using LeafNodeType = typename TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
DiffLeafNodeMask(const TreeType& rhsTree,
std::vector<BoolLeafNodeType*>& lhsNodes)
: mRhsTree(&rhsTree), mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes[0])
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const TreeType> acc(*mRhsTree);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
BoolLeafNodeType* lhsNode = mLhsNodes[n];
const LeafNodeType* rhsNode = acc.probeConstLeaf(lhsNode->origin());
if (rhsNode) lhsNode->topologyDifference(*rhsNode, false);
}
}
private:
TreeType const * const mRhsTree;
BoolLeafNodeType ** const mLhsNodes;
};
template<typename LeafNodeTypeA, typename LeafNodeTypeB>
struct UnionValueMasks
{
UnionValueMasks(std::vector<LeafNodeTypeA*>& nodesA, std::vector<LeafNodeTypeB*>& nodesB)
: mNodesA(nodesA.empty() ? nullptr : &nodesA[0])
, mNodesB(nodesB.empty() ? nullptr : &nodesB[0])
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mNodesA[n]->topologyUnion(*mNodesB[n]);
}
}
private:
LeafNodeTypeA ** const mNodesA;
LeafNodeTypeB ** const mNodesB;
};
template<typename TreeType>
struct ConstructVoxelMask
{
using LeafNodeType = typename TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
ConstructVoxelMask(BoolTreeType& maskTree, const TreeType& tree,
std::vector<LeafNodeType*>& nodes)
: mTree(&tree)
, mNodes(nodes.empty() ? nullptr : &nodes[0])
, mLocalMaskTree(false)
, mMaskTree(&maskTree)
{
}
ConstructVoxelMask(ConstructVoxelMask& rhs, tbb::split)
: mTree(rhs.mTree)
, mNodes(rhs.mNodes)
, mLocalMaskTree(false)
, mMaskTree(&mLocalMaskTree)
{
}
void operator()(const tbb::blocked_range<size_t>& range)
{
using Iterator = typename LeafNodeType::ValueOnCIter;
tree::ValueAccessor<const TreeType> acc(*mTree);
tree::ValueAccessor<BoolTreeType> maskAcc(*mMaskTree);
Coord ijk, nijk, localCorod;
Index pos, npos;
for (size_t n = range.begin(); n != range.end(); ++n) {
LeafNodeType& node = *mNodes[n];
CoordBBox bbox = node.getNodeBoundingBox();
bbox.expand(-1);
BoolLeafNodeType& maskNode = *maskAcc.touchLeaf(node.origin());
for (Iterator it = node.cbeginValueOn(); it; ++it) {
ijk = it.getCoord();
pos = it.pos();
localCorod = LeafNodeType::offsetToLocalCoord(pos);
if (localCorod[2] < int(LeafNodeType::DIM - 1)) {
npos = pos + 1;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(0, 0, 1);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
if (localCorod[2] > 0) {
npos = pos - 1;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(0, 0, -1);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
if (localCorod[1] < int(LeafNodeType::DIM - 1)) {
npos = pos + LeafNodeType::DIM;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(0, 1, 0);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
if (localCorod[1] > 0) {
npos = pos - LeafNodeType::DIM;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(0, -1, 0);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
if (localCorod[0] < int(LeafNodeType::DIM - 1)) {
npos = pos + LeafNodeType::DIM * LeafNodeType::DIM;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(1, 0, 0);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
if (localCorod[0] > 0) {
npos = pos - LeafNodeType::DIM * LeafNodeType::DIM;
if (!node.isValueOn(npos)) maskNode.setValueOn(npos);
} else {
nijk = ijk.offsetBy(-1, 0, 0);
if (!acc.isValueOn(nijk)) maskAcc.setValueOn(nijk);
}
}
}
}
void join(ConstructVoxelMask& rhs) { mMaskTree->merge(*rhs.mMaskTree); }
private:
TreeType const * const mTree;
LeafNodeType ** const mNodes;
BoolTreeType mLocalMaskTree;
BoolTreeType * const mMaskTree;
};
/// @note The interior and exterior widths should be in world space units and squared.
template<typename TreeType, typename MeshDataAdapter>
struct ExpandNarrowband
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using NodeMaskType = typename LeafNodeType::NodeMaskType;
using Int32TreeType = typename TreeType::template ValueConverter<Int32>::Type;
using Int32LeafNodeType = typename Int32TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
struct Fragment
{
Int32 idx, x, y, z;
ValueType dist;
Fragment() : idx(0), x(0), y(0), z(0), dist(0.0) {}
Fragment(Int32 idx_, Int32 x_, Int32 y_, Int32 z_, ValueType dist_)
: idx(idx_), x(x_), y(y_), z(z_), dist(dist_)
{
}
bool operator<(const Fragment& rhs) const { return idx < rhs.idx; }
}; // struct Fragment
////////////////////
ExpandNarrowband(
std::vector<BoolLeafNodeType*>& maskNodes,
BoolTreeType& maskTree,
TreeType& distTree,
Int32TreeType& indexTree,
const MeshDataAdapter& mesh,
ValueType exteriorBandWidth,
ValueType interiorBandWidth,
ValueType voxelSize)
: mMaskNodes(maskNodes.empty() ? nullptr : &maskNodes[0])
, mMaskTree(&maskTree)
, mDistTree(&distTree)
, mIndexTree(&indexTree)
, mMesh(&mesh)
, mNewMaskTree(false)
, mDistNodes()
, mUpdatedDistNodes()
, mIndexNodes()
, mUpdatedIndexNodes()
, mExteriorBandWidth(exteriorBandWidth)
, mInteriorBandWidth(interiorBandWidth)
, mVoxelSize(voxelSize)
{
}
ExpandNarrowband(const ExpandNarrowband& rhs, tbb::split)
: mMaskNodes(rhs.mMaskNodes)
, mMaskTree(rhs.mMaskTree)
, mDistTree(rhs.mDistTree)
, mIndexTree(rhs.mIndexTree)
, mMesh(rhs.mMesh)
, mNewMaskTree(false)
, mDistNodes()
, mUpdatedDistNodes()
, mIndexNodes()
, mUpdatedIndexNodes()
, mExteriorBandWidth(rhs.mExteriorBandWidth)
, mInteriorBandWidth(rhs.mInteriorBandWidth)
, mVoxelSize(rhs.mVoxelSize)
{
}
void join(ExpandNarrowband& rhs)
{
mDistNodes.insert(mDistNodes.end(), rhs.mDistNodes.begin(), rhs.mDistNodes.end());
mIndexNodes.insert(mIndexNodes.end(), rhs.mIndexNodes.begin(), rhs.mIndexNodes.end());
mUpdatedDistNodes.insert(mUpdatedDistNodes.end(),
rhs.mUpdatedDistNodes.begin(), rhs.mUpdatedDistNodes.end());
mUpdatedIndexNodes.insert(mUpdatedIndexNodes.end(),
rhs.mUpdatedIndexNodes.begin(), rhs.mUpdatedIndexNodes.end());
mNewMaskTree.merge(rhs.mNewMaskTree);
}
void operator()(const tbb::blocked_range<size_t>& range)
{
tree::ValueAccessor<BoolTreeType> newMaskAcc(mNewMaskTree);
tree::ValueAccessor<TreeType> distAcc(*mDistTree);
tree::ValueAccessor<Int32TreeType> indexAcc(*mIndexTree);
std::vector<Fragment> fragments;
fragments.reserve(256);
std::unique_ptr<LeafNodeType> newDistNodePt;
std::unique_ptr<Int32LeafNodeType> newIndexNodePt;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
BoolLeafNodeType& maskNode = *mMaskNodes[n];
if (maskNode.isEmpty()) continue;
// Setup local caches
const Coord& origin = maskNode.origin();
LeafNodeType * distNodePt = distAcc.probeLeaf(origin);
Int32LeafNodeType * indexNodePt = indexAcc.probeLeaf(origin);
assert(!distNodePt == !indexNodePt);
bool usingNewNodes = false;
if (!distNodePt && !indexNodePt) {
const ValueType backgroundDist = distAcc.getValue(origin);
if (!newDistNodePt.get() && !newIndexNodePt.get()) {
newDistNodePt.reset(new LeafNodeType(origin, backgroundDist));
newIndexNodePt.reset(new Int32LeafNodeType(origin, indexAcc.getValue(origin)));
} else {
if ((backgroundDist < ValueType(0.0)) !=
(newDistNodePt->getValue(0) < ValueType(0.0))) {
newDistNodePt->buffer().fill(backgroundDist);
}
newDistNodePt->setOrigin(origin);
newIndexNodePt->setOrigin(origin);
}
distNodePt = newDistNodePt.get();
indexNodePt = newIndexNodePt.get();
usingNewNodes = true;
}
// Gather neighbour information
CoordBBox bbox(Coord::max(), Coord::min());
for (typename BoolLeafNodeType::ValueOnIter it = maskNode.beginValueOn(); it; ++it) {
bbox.expand(it.getCoord());
}
bbox.expand(1);
gatherFragments(fragments, bbox, distAcc, indexAcc);
// Compute first voxel layer
bbox = maskNode.getNodeBoundingBox();
NodeMaskType mask;
bool updatedLeafNodes = false;
for (typename BoolLeafNodeType::ValueOnIter it = maskNode.beginValueOn(); it; ++it) {
const Coord ijk = it.getCoord();
if (updateVoxel(ijk, 5, fragments, *distNodePt, *indexNodePt, &updatedLeafNodes)) {
for (Int32 i = 0; i < 6; ++i) {
const Coord nijk = ijk + util::COORD_OFFSETS[i];
if (bbox.isInside(nijk)) {
mask.setOn(BoolLeafNodeType::coordToOffset(nijk));
} else {
newMaskAcc.setValueOn(nijk);
}
}
for (Int32 i = 6; i < 26; ++i) {
const Coord nijk = ijk + util::COORD_OFFSETS[i];
if (bbox.isInside(nijk)) {
mask.setOn(BoolLeafNodeType::coordToOffset(nijk));
}
}
}
}
if (updatedLeafNodes) {
// Compute second voxel layer
mask -= indexNodePt->getValueMask();
for (typename NodeMaskType::OnIterator it = mask.beginOn(); it; ++it) {
const Index pos = it.pos();
const Coord ijk = maskNode.origin() + LeafNodeType::offsetToLocalCoord(pos);
if (updateVoxel(ijk, 6, fragments, *distNodePt, *indexNodePt)) {
for (Int32 i = 0; i < 6; ++i) {
newMaskAcc.setValueOn(ijk + util::COORD_OFFSETS[i]);
}
}
}
// Export new distance values
if (usingNewNodes) {
newDistNodePt->topologyUnion(*newIndexNodePt);
mDistNodes.push_back(newDistNodePt.release());
mIndexNodes.push_back(newIndexNodePt.release());
} else {
mUpdatedDistNodes.push_back(distNodePt);
mUpdatedIndexNodes.push_back(indexNodePt);
}
}
} // end leafnode loop
}
//////////
BoolTreeType& newMaskTree() { return mNewMaskTree; }
std::vector<LeafNodeType*>& newDistNodes() { return mDistNodes; }
std::vector<LeafNodeType*>& updatedDistNodes() { return mUpdatedDistNodes; }
std::vector<Int32LeafNodeType*>& newIndexNodes() { return mIndexNodes; }
std::vector<Int32LeafNodeType*>& updatedIndexNodes() { return mUpdatedIndexNodes; }
private:
/// @note The output fragment list is ordered by the primitive index
void
gatherFragments(std::vector<Fragment>& fragments, const CoordBBox& bbox,
tree::ValueAccessor<TreeType>& distAcc, tree::ValueAccessor<Int32TreeType>& indexAcc)
{
fragments.clear();
const Coord nodeMin = bbox.min() & ~(LeafNodeType::DIM - 1);
const Coord nodeMax = bbox.max() & ~(LeafNodeType::DIM - 1);
CoordBBox region;
Coord ijk;
for (ijk[0] = nodeMin[0]; ijk[0] <= nodeMax[0]; ijk[0] += LeafNodeType::DIM) {
for (ijk[1] = nodeMin[1]; ijk[1] <= nodeMax[1]; ijk[1] += LeafNodeType::DIM) {
for (ijk[2] = nodeMin[2]; ijk[2] <= nodeMax[2]; ijk[2] += LeafNodeType::DIM) {
if (LeafNodeType* distleaf = distAcc.probeLeaf(ijk)) {
region.min() = Coord::maxComponent(bbox.min(), ijk);
region.max() = Coord::minComponent(bbox.max(),
ijk.offsetBy(LeafNodeType::DIM - 1));
gatherFragments(fragments, region, *distleaf, *indexAcc.probeLeaf(ijk));
}
}
}
}
std::sort(fragments.begin(), fragments.end());
}
void
gatherFragments(std::vector<Fragment>& fragments, const CoordBBox& bbox,
const LeafNodeType& distLeaf, const Int32LeafNodeType& idxLeaf) const
{
const typename LeafNodeType::NodeMaskType& mask = distLeaf.getValueMask();
const ValueType* distData = distLeaf.buffer().data();
const Int32* idxData = idxLeaf.buffer().data();
for (int x = bbox.min()[0]; x <= bbox.max()[0]; ++x) {
const Index xPos = (x & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM);
for (int y = bbox.min()[1]; y <= bbox.max()[1]; ++y) {
const Index yPos = xPos + ((y & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM);
for (int z = bbox.min()[2]; z <= bbox.max()[2]; ++z) {
const Index pos = yPos + (z & (LeafNodeType::DIM - 1u));
if (mask.isOn(pos)) {
fragments.push_back(Fragment(idxData[pos],x,y,z, std::abs(distData[pos])));
}
}
}
}
}
/// @note This method expects the fragment list to be ordered by the primitive index
/// to avoid redundant distance computations.
ValueType
computeDistance(const Coord& ijk, const Int32 manhattanLimit,
const std::vector<Fragment>& fragments, Int32& closestPrimIdx) const
{
Vec3d a, b, c, uvw, voxelCenter(ijk[0], ijk[1], ijk[2]);
double primDist, tmpDist, dist = std::numeric_limits<double>::max();
Int32 lastIdx = Int32(util::INVALID_IDX);
for (size_t n = 0, N = fragments.size(); n < N; ++n) {
const Fragment& fragment = fragments[n];
if (lastIdx == fragment.idx) continue;
const Int32 dx = std::abs(fragment.x - ijk[0]);
const Int32 dy = std::abs(fragment.y - ijk[1]);
const Int32 dz = std::abs(fragment.z - ijk[2]);
const Int32 manhattan = dx + dy + dz;
if (manhattan > manhattanLimit) continue;
lastIdx = fragment.idx;
const size_t polygon = size_t(lastIdx);
mMesh->getIndexSpacePoint(polygon, 0, a);
mMesh->getIndexSpacePoint(polygon, 1, b);
mMesh->getIndexSpacePoint(polygon, 2, c);
primDist = (voxelCenter -
closestPointOnTriangleToPoint(a, c, b, voxelCenter, uvw)).lengthSqr();
// Split quad into a second triangle
if (4 == mMesh->vertexCount(polygon)) {
mMesh->getIndexSpacePoint(polygon, 3, b);
tmpDist = (voxelCenter - closestPointOnTriangleToPoint(
a, b, c, voxelCenter, uvw)).lengthSqr();
if (tmpDist < primDist) primDist = tmpDist;
}
if (primDist < dist) {
dist = primDist;
closestPrimIdx = lastIdx;
}
}
return ValueType(std::sqrt(dist)) * mVoxelSize;
}
/// @note Returns true if the current voxel was updated and neighbouring
/// voxels need to be evaluated.
bool
updateVoxel(const Coord& ijk, const Int32 manhattanLimit,
const std::vector<Fragment>& fragments,
LeafNodeType& distLeaf, Int32LeafNodeType& idxLeaf, bool* updatedLeafNodes = nullptr)
{
Int32 closestPrimIdx = 0;
const ValueType distance = computeDistance(ijk, manhattanLimit, fragments, closestPrimIdx);
const Index pos = LeafNodeType::coordToOffset(ijk);
const bool inside = distLeaf.getValue(pos) < ValueType(0.0);
bool activateNeighbourVoxels = false;
if (!inside && distance < mExteriorBandWidth) {
if (updatedLeafNodes) *updatedLeafNodes = true;
activateNeighbourVoxels = (distance + mVoxelSize) < mExteriorBandWidth;
distLeaf.setValueOnly(pos, distance);
idxLeaf.setValueOn(pos, closestPrimIdx);
} else if (inside && distance < mInteriorBandWidth) {
if (updatedLeafNodes) *updatedLeafNodes = true;
activateNeighbourVoxels = (distance + mVoxelSize) < mInteriorBandWidth;
distLeaf.setValueOnly(pos, -distance);
idxLeaf.setValueOn(pos, closestPrimIdx);
}
return activateNeighbourVoxels;
}
//////////
BoolLeafNodeType ** const mMaskNodes;
BoolTreeType * const mMaskTree;
TreeType * const mDistTree;
Int32TreeType * const mIndexTree;
MeshDataAdapter const * const mMesh;
BoolTreeType mNewMaskTree;
std::vector<LeafNodeType*> mDistNodes, mUpdatedDistNodes;
std::vector<Int32LeafNodeType*> mIndexNodes, mUpdatedIndexNodes;
const ValueType mExteriorBandWidth, mInteriorBandWidth, mVoxelSize;
}; // struct ExpandNarrowband
template<typename TreeType>
struct AddNodes {
using LeafNodeType = typename TreeType::LeafNodeType;
AddNodes(TreeType& tree, std::vector<LeafNodeType*>& nodes)
: mTree(&tree) , mNodes(&nodes)
{
}
void operator()() const {
tree::ValueAccessor<TreeType> acc(*mTree);
std::vector<LeafNodeType*>& nodes = *mNodes;
for (size_t n = 0, N = nodes.size(); n < N; ++n) {
acc.addLeaf(nodes[n]);
}
}
TreeType * const mTree;
std::vector<LeafNodeType*> * const mNodes;
}; // AddNodes
template<typename TreeType, typename Int32TreeType, typename BoolTreeType, typename MeshDataAdapter>
inline void
expandNarrowband(
TreeType& distTree,
Int32TreeType& indexTree,
BoolTreeType& maskTree,
std::vector<typename BoolTreeType::LeafNodeType*>& maskNodes,
const MeshDataAdapter& mesh,
typename TreeType::ValueType exteriorBandWidth,
typename TreeType::ValueType interiorBandWidth,
typename TreeType::ValueType voxelSize)
{
ExpandNarrowband<TreeType, MeshDataAdapter> expandOp(maskNodes, maskTree,
distTree, indexTree, mesh, exteriorBandWidth, interiorBandWidth, voxelSize);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, maskNodes.size()), expandOp);
tbb::parallel_for(tbb::blocked_range<size_t>(0, expandOp.updatedIndexNodes().size()),
UnionValueMasks<typename TreeType::LeafNodeType, typename Int32TreeType::LeafNodeType>(
expandOp.updatedDistNodes(), expandOp.updatedIndexNodes()));
tbb::task_group tasks;
tasks.run(AddNodes<TreeType>(distTree, expandOp.newDistNodes()));
tasks.run(AddNodes<Int32TreeType>(indexTree, expandOp.newIndexNodes()));
tasks.wait();
maskTree.clear();
maskTree.merge(expandOp.newMaskTree());
}
////////////////////////////////////////
// Transform values (sqrt, world space scaling and sign flip if sdf)
template<typename TreeType>
struct TransformValues
{
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
TransformValues(std::vector<LeafNodeType*>& nodes,
ValueType voxelSize, bool unsignedDist)
: mNodes(&nodes[0])
, mVoxelSize(voxelSize)
, mUnsigned(unsignedDist)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
typename LeafNodeType::ValueOnIter iter;
const bool udf = mUnsigned;
const ValueType w[2] = { -mVoxelSize, mVoxelSize };
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
for (iter = mNodes[n]->beginValueOn(); iter; ++iter) {
ValueType& val = const_cast<ValueType&>(iter.getValue());
val = w[udf || (val < ValueType(0.0))] * std::sqrt(std::abs(val));
}
}
}
private:
LeafNodeType * * const mNodes;
const ValueType mVoxelSize;
const bool mUnsigned;
};
// Inactivate values outside the (exBandWidth, inBandWidth) range.
template<typename TreeType>
struct InactivateValues
{
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
InactivateValues(std::vector<LeafNodeType*>& nodes,
ValueType exBandWidth, ValueType inBandWidth)
: mNodes(nodes.empty() ? nullptr : &nodes[0])
, mExBandWidth(exBandWidth)
, mInBandWidth(inBandWidth)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
typename LeafNodeType::ValueOnIter iter;
const ValueType exVal = mExBandWidth;
const ValueType inVal = -mInBandWidth;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
for (iter = mNodes[n]->beginValueOn(); iter; ++iter) {
ValueType& val = const_cast<ValueType&>(iter.getValue());
const bool inside = val < ValueType(0.0);
if (inside && !(val > inVal)) {
val = inVal;
iter.setValueOff();
} else if (!inside && !(val < exVal)) {
val = exVal;
iter.setValueOff();
}
}
}
}
private:
LeafNodeType * * const mNodes;
const ValueType mExBandWidth, mInBandWidth;
};
template<typename TreeType>
struct OffsetValues
{
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
OffsetValues(std::vector<LeafNodeType*>& nodes, ValueType offset)
: mNodes(nodes.empty() ? nullptr : &nodes[0]), mOffset(offset)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
const ValueType offset = mOffset;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
typename LeafNodeType::ValueOnIter iter = mNodes[n]->beginValueOn();
for (; iter; ++iter) {
ValueType& val = const_cast<ValueType&>(iter.getValue());
val += offset;
}
}
}
private:
LeafNodeType * * const mNodes;
const ValueType mOffset;
};
template<typename TreeType>
struct Renormalize
{
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
Renormalize(const TreeType& tree, const std::vector<LeafNodeType*>& nodes,
ValueType* buffer, ValueType voxelSize)
: mTree(&tree)
, mNodes(nodes.empty() ? nullptr : &nodes[0])
, mBuffer(buffer)
, mVoxelSize(voxelSize)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using Vec3Type = math::Vec3<ValueType>;
tree::ValueAccessor<const TreeType> acc(*mTree);
Coord ijk;
Vec3Type up, down;
const ValueType dx = mVoxelSize, invDx = ValueType(1.0) / mVoxelSize;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
ValueType* bufferData = &mBuffer[n * LeafNodeType::SIZE];
typename LeafNodeType::ValueOnCIter iter = mNodes[n]->cbeginValueOn();
for (; iter; ++iter) {
const ValueType phi0 = *iter;
ijk = iter.getCoord();
up[0] = acc.getValue(ijk.offsetBy(1, 0, 0)) - phi0;
up[1] = acc.getValue(ijk.offsetBy(0, 1, 0)) - phi0;
up[2] = acc.getValue(ijk.offsetBy(0, 0, 1)) - phi0;
down[0] = phi0 - acc.getValue(ijk.offsetBy(-1, 0, 0));
down[1] = phi0 - acc.getValue(ijk.offsetBy(0, -1, 0));
down[2] = phi0 - acc.getValue(ijk.offsetBy(0, 0, -1));
const ValueType normSqGradPhi = math::GodunovsNormSqrd(phi0 > 0.0, down, up);
const ValueType diff = math::Sqrt(normSqGradPhi) * invDx - ValueType(1.0);
const ValueType S = phi0 / (math::Sqrt(math::Pow2(phi0) + normSqGradPhi));
bufferData[iter.pos()] = phi0 - dx * S * diff;
}
}
}
private:
TreeType const * const mTree;
LeafNodeType const * const * const mNodes;
ValueType * const mBuffer;
const ValueType mVoxelSize;
};
template<typename TreeType>
struct MinCombine
{
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
MinCombine(std::vector<LeafNodeType*>& nodes, const ValueType* buffer)
: mNodes(nodes.empty() ? nullptr : &nodes[0]), mBuffer(buffer)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const ValueType* bufferData = &mBuffer[n * LeafNodeType::SIZE];
typename LeafNodeType::ValueOnIter iter = mNodes[n]->beginValueOn();
for (; iter; ++iter) {
ValueType& val = const_cast<ValueType&>(iter.getValue());
val = std::min(val, bufferData[iter.pos()]);
}
}
}
private:
LeafNodeType * * const mNodes;
ValueType const * const mBuffer;
};
} // mesh_to_volume_internal namespace
////////////////////////////////////////
// Utility method implementation
template <typename FloatTreeT>
inline void
traceExteriorBoundaries(FloatTreeT& tree)
{
using ConnectivityTable = mesh_to_volume_internal::LeafNodeConnectivityTable<FloatTreeT>;
// Build a node connectivity table where each leaf node has an offset into a
// linearized list of nodes, and each leaf stores its six axis aligned neighbor
// offsets
ConnectivityTable nodeConnectivity(tree);
std::vector<size_t> zStartNodes, yStartNodes, xStartNodes;
// Store all nodes which do not have negative neighbors i.e. the nodes furthest
// in -X, -Y, -Z. We sweep from lowest coordinate positions +axis and then
// from the furthest positive coordinate positions -axis
for (size_t n = 0; n < nodeConnectivity.size(); ++n) {
if (ConnectivityTable::INVALID_OFFSET == nodeConnectivity.offsetsPrevX()[n]) {
xStartNodes.push_back(n);
}
if (ConnectivityTable::INVALID_OFFSET == nodeConnectivity.offsetsPrevY()[n]) {
yStartNodes.push_back(n);
}
if (ConnectivityTable::INVALID_OFFSET == nodeConnectivity.offsetsPrevZ()[n]) {
zStartNodes.push_back(n);
}
}
using SweepingOp = mesh_to_volume_internal::SweepExteriorSign<FloatTreeT>;
// Sweep the exterior value signs (make them negative) up until the voxel intersection
// with the isosurface. Do this in both lowest -> + and largest -> - directions
tbb::parallel_for(tbb::blocked_range<size_t>(0, zStartNodes.size()),
SweepingOp(SweepingOp::Z_AXIS, zStartNodes, nodeConnectivity));
tbb::parallel_for(tbb::blocked_range<size_t>(0, yStartNodes.size()),
SweepingOp(SweepingOp::Y_AXIS, yStartNodes, nodeConnectivity));
tbb::parallel_for(tbb::blocked_range<size_t>(0, xStartNodes.size()),
SweepingOp(SweepingOp::X_AXIS, xStartNodes, nodeConnectivity));
const size_t numLeafNodes = nodeConnectivity.size();
const size_t numVoxels = numLeafNodes * FloatTreeT::LeafNodeType::SIZE;
std::unique_ptr<bool[]> changedNodeMaskA{new bool[numLeafNodes]};
std::unique_ptr<bool[]> changedNodeMaskB{new bool[numLeafNodes]};
std::unique_ptr<bool[]> changedVoxelMask{new bool[numVoxels]};
mesh_to_volume_internal::fillArray(changedNodeMaskA.get(), true, numLeafNodes);
mesh_to_volume_internal::fillArray(changedNodeMaskB.get(), false, numLeafNodes);
mesh_to_volume_internal::fillArray(changedVoxelMask.get(), false, numVoxels);
const tbb::blocked_range<size_t> nodeRange(0, numLeafNodes);
bool nodesUpdated = false;
do {
// Perform per leaf node localized propagation of signs by looping over
// all voxels and checking to see if any of their neighbors (within the
// same leaf) are negative
tbb::parallel_for(nodeRange, mesh_to_volume_internal::SeedFillExteriorSign<FloatTreeT>(
nodeConnectivity.nodes(), changedNodeMaskA.get()));
// For each leaf, check its axis aligned neighbors and propagate any changes
// which occurred previously (in SeedFillExteriorSign OR in SyncVoxelMask) to
// the leaf faces. Note that this operation stores the propagated face results
// in a separate buffer (changedVoxelMask) to avoid writing to nodes being read
// from other threads. Additionally mark any leaf nodes which will absorb any
// changes from its neighbors in changedNodeMaskB
tbb::parallel_for(nodeRange, mesh_to_volume_internal::SeedPoints<FloatTreeT>(
nodeConnectivity, changedNodeMaskA.get(), changedNodeMaskB.get(),
changedVoxelMask.get()));
// Only nodes where a value was influenced by an adjacent node need to be
// processed on the next pass.
changedNodeMaskA.swap(changedNodeMaskB);
nodesUpdated = false;
for (size_t n = 0; n < numLeafNodes; ++n) {
nodesUpdated |= changedNodeMaskA[n];
if (nodesUpdated) break;
}
// Use the voxel mask updates in ::SeedPoints to actually assign the new values
// across leaf node faces
if (nodesUpdated) {
tbb::parallel_for(nodeRange, mesh_to_volume_internal::SyncVoxelMask<FloatTreeT>(
nodeConnectivity.nodes(), changedNodeMaskA.get(), changedVoxelMask.get()));
}
} while (nodesUpdated);
} // void traceExteriorBoundaries()
////////////////////////////////////////
template <typename GridType, typename MeshDataAdapter, typename Interrupter>
inline typename GridType::Ptr
meshToVolume(
Interrupter& interrupter,
const MeshDataAdapter& mesh,
const math::Transform& transform,
float exteriorBandWidth,
float interiorBandWidth,
int flags,
typename GridType::template ValueConverter<Int32>::Type * polygonIndexGrid)
{
using GridTypePtr = typename GridType::Ptr;
using TreeType = typename GridType::TreeType;
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename GridType::ValueType;
using Int32GridType = typename GridType::template ValueConverter<Int32>::Type;
using Int32TreeType = typename Int32GridType::TreeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
//////////
// Setup
GridTypePtr distGrid(new GridType(std::numeric_limits<ValueType>::max()));
distGrid->setTransform(transform.copy());
ValueType exteriorWidth = ValueType(exteriorBandWidth);
ValueType interiorWidth = ValueType(interiorBandWidth);
// Note: inf interior width is all right, this value makes the converter fill
// interior regions with distance values.
if (!std::isfinite(exteriorWidth) || std::isnan(interiorWidth)) {
std::stringstream msg;
msg << "Illegal narrow band width: exterior = " << exteriorWidth
<< ", interior = " << interiorWidth;
OPENVDB_LOG_DEBUG(msg.str());
return distGrid;
}
const ValueType voxelSize = ValueType(transform.voxelSize()[0]);
if (!std::isfinite(voxelSize) || math::isZero(voxelSize)) {
std::stringstream msg;
msg << "Illegal transform, voxel size = " << voxelSize;
OPENVDB_LOG_DEBUG(msg.str());
return distGrid;
}
// Convert narrow band width from voxel units to world space units.
exteriorWidth *= voxelSize;
// Avoid the unit conversion if the interior band width is set to
// inf or std::numeric_limits<float>::max().
if (interiorWidth < std::numeric_limits<ValueType>::max()) {
interiorWidth *= voxelSize;
}
const bool computeSignedDistanceField = (flags & UNSIGNED_DISTANCE_FIELD) == 0;
const bool removeIntersectingVoxels = (flags & DISABLE_INTERSECTING_VOXEL_REMOVAL) == 0;
const bool renormalizeValues = (flags & DISABLE_RENORMALIZATION) == 0;
const bool trimNarrowBand = (flags & DISABLE_NARROW_BAND_TRIMMING) == 0;
Int32GridType* indexGrid = nullptr;
typename Int32GridType::Ptr temporaryIndexGrid;
if (polygonIndexGrid) {
indexGrid = polygonIndexGrid;
} else {
temporaryIndexGrid.reset(new Int32GridType(Int32(util::INVALID_IDX)));
indexGrid = temporaryIndexGrid.get();
}
indexGrid->newTree();
indexGrid->setTransform(transform.copy());
if (computeSignedDistanceField) {
distGrid->setGridClass(GRID_LEVEL_SET);
} else {
distGrid->setGridClass(GRID_UNKNOWN);
interiorWidth = ValueType(0.0);
}
TreeType& distTree = distGrid->tree();
Int32TreeType& indexTree = indexGrid->tree();
//////////
// Voxelize mesh
{
using VoxelizationDataType = mesh_to_volume_internal::VoxelizationData<TreeType>;
using DataTable = tbb::enumerable_thread_specific<typename VoxelizationDataType::Ptr>;
DataTable data;
using Voxelizer =
mesh_to_volume_internal::VoxelizePolygons<TreeType, MeshDataAdapter, Interrupter>;
const tbb::blocked_range<size_t> polygonRange(0, mesh.polygonCount());
tbb::parallel_for(polygonRange, Voxelizer(data, mesh, &interrupter));
for (typename DataTable::iterator i = data.begin(); i != data.end(); ++i) {
VoxelizationDataType& dataItem = **i;
mesh_to_volume_internal::combineData(
distTree, indexTree, dataItem.distTree, dataItem.indexTree);
}
}
// The progress estimates are based on the observed average time for a few different
// test cases and is only intended to provide some rough progression feedback to the user.
if (interrupter.wasInterrupted(30)) return distGrid;
//////////
// Classify interior and exterior regions
if (computeSignedDistanceField) {
// Determines the inside/outside state for the narrow band of voxels.
traceExteriorBoundaries(distTree);
std::vector<LeafNodeType*> nodes;
nodes.reserve(distTree.leafCount());
distTree.getNodes(nodes);
const tbb::blocked_range<size_t> nodeRange(0, nodes.size());
using SignOp =
mesh_to_volume_internal::ComputeIntersectingVoxelSign<TreeType, MeshDataAdapter>;
tbb::parallel_for(nodeRange, SignOp(nodes, distTree, indexTree, mesh));
if (interrupter.wasInterrupted(45)) return distGrid;
// Remove voxels created by self intersecting portions of the mesh.
if (removeIntersectingVoxels) {
tbb::parallel_for(nodeRange,
mesh_to_volume_internal::ValidateIntersectingVoxels<TreeType>(distTree, nodes));
tbb::parallel_for(nodeRange,
mesh_to_volume_internal::RemoveSelfIntersectingSurface<TreeType>(
nodes, distTree, indexTree));
tools::pruneInactive(distTree, /*threading=*/true);
tools::pruneInactive(indexTree, /*threading=*/true);
}
}
if (interrupter.wasInterrupted(50)) return distGrid;
if (distTree.activeVoxelCount() == 0) {
distTree.clear();
distTree.root().setBackground(exteriorWidth, /*updateChildNodes=*/false);
return distGrid;
}
// Transform values (world space scaling etc.).
{
std::vector<LeafNodeType*> nodes;
nodes.reserve(distTree.leafCount());
distTree.getNodes(nodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::TransformValues<TreeType>(
nodes, voxelSize, !computeSignedDistanceField));
}
// Propagate sign information into tile regions.
if (computeSignedDistanceField) {
distTree.root().setBackground(exteriorWidth, /*updateChildNodes=*/false);
tools::signedFloodFillWithValues(distTree, exteriorWidth, -interiorWidth);
} else {
tools::changeBackground(distTree, exteriorWidth);
}
if (interrupter.wasInterrupted(54)) return distGrid;
//////////
// Expand the narrow band region
const ValueType minBandWidth = voxelSize * ValueType(2.0);
if (interiorWidth > minBandWidth || exteriorWidth > minBandWidth) {
// Create the initial voxel mask.
BoolTreeType maskTree(false);
{
std::vector<LeafNodeType*> nodes;
nodes.reserve(distTree.leafCount());
distTree.getNodes(nodes);
mesh_to_volume_internal::ConstructVoxelMask<TreeType> op(maskTree, distTree, nodes);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op);
}
// Progress estimation
unsigned maxIterations = std::numeric_limits<unsigned>::max();
float progress = 54.0f, step = 0.0f;
double estimated =
2.0 * std::ceil((std::max(interiorWidth, exteriorWidth) - minBandWidth) / voxelSize);
if (estimated < double(maxIterations)) {
maxIterations = unsigned(estimated);
step = 40.0f / float(maxIterations);
}
std::vector<typename BoolTreeType::LeafNodeType*> maskNodes;
unsigned count = 0;
while (true) {
if (interrupter.wasInterrupted(int(progress))) return distGrid;
const size_t maskNodeCount = maskTree.leafCount();
if (maskNodeCount == 0) break;
maskNodes.clear();
maskNodes.reserve(maskNodeCount);
maskTree.getNodes(maskNodes);
const tbb::blocked_range<size_t> range(0, maskNodes.size());
tbb::parallel_for(range,
mesh_to_volume_internal::DiffLeafNodeMask<TreeType>(distTree, maskNodes));
mesh_to_volume_internal::expandNarrowband(distTree, indexTree, maskTree, maskNodes,
mesh, exteriorWidth, interiorWidth, voxelSize);
if ((++count) >= maxIterations) break;
progress += step;
}
}
if (interrupter.wasInterrupted(94)) return distGrid;
if (!polygonIndexGrid) indexGrid->clear();
/////////
// Renormalize distances to smooth out bumps caused by self intersecting
// and overlapping portions of the mesh and renormalize the level set.
if (computeSignedDistanceField && renormalizeValues) {
std::vector<LeafNodeType*> nodes;
nodes.reserve(distTree.leafCount());
distTree.getNodes(nodes);
std::unique_ptr<ValueType[]> buffer{new ValueType[LeafNodeType::SIZE * nodes.size()]};
const ValueType offset = ValueType(0.8 * voxelSize);
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::OffsetValues<TreeType>(nodes, -offset));
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::Renormalize<TreeType>(
distTree, nodes, buffer.get(), voxelSize));
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::MinCombine<TreeType>(nodes, buffer.get()));
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::OffsetValues<TreeType>(
nodes, offset - mesh_to_volume_internal::Tolerance<ValueType>::epsilon()));
}
if (interrupter.wasInterrupted(99)) return distGrid;
/////////
// Remove active voxels that exceed the narrow band limits
if (trimNarrowBand && std::min(interiorWidth, exteriorWidth) < voxelSize * ValueType(4.0)) {
std::vector<LeafNodeType*> nodes;
nodes.reserve(distTree.leafCount());
distTree.getNodes(nodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
mesh_to_volume_internal::InactivateValues<TreeType>(
nodes, exteriorWidth, computeSignedDistanceField ? interiorWidth : exteriorWidth));
tools::pruneLevelSet(
distTree, exteriorWidth, computeSignedDistanceField ? -interiorWidth : -exteriorWidth);
}
return distGrid;
}
template <typename GridType, typename MeshDataAdapter>
inline typename GridType::Ptr
meshToVolume(
const MeshDataAdapter& mesh,
const math::Transform& transform,
float exteriorBandWidth,
float interiorBandWidth,
int flags,
typename GridType::template ValueConverter<Int32>::Type * polygonIndexGrid)
{
util::NullInterrupter nullInterrupter;
return meshToVolume<GridType>(nullInterrupter, mesh, transform,
exteriorBandWidth, interiorBandWidth, flags, polygonIndexGrid);
}
////////////////////////////////////////
//{
/// @cond OPENVDB_MESH_TO_VOLUME_INTERNAL
/// @internal This overload is enabled only for grids with a scalar, floating-point ValueType.
template<typename GridType, typename Interrupter>
inline typename std::enable_if<std::is_floating_point<typename GridType::ValueType>::value,
typename GridType::Ptr>::type
doMeshConversion(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float exBandWidth,
float inBandWidth,
bool unsignedDistanceField = false)
{
if (points.empty()) {
return typename GridType::Ptr(new GridType(typename GridType::ValueType(exBandWidth)));
}
const size_t numPoints = points.size();
std::unique_ptr<Vec3s[]> indexSpacePoints{new Vec3s[numPoints]};
// transform points to local grid index space
tbb::parallel_for(tbb::blocked_range<size_t>(0, numPoints),
mesh_to_volume_internal::TransformPoints<Vec3s>(
&points[0], indexSpacePoints.get(), xform));
const int conversionFlags = unsignedDistanceField ? UNSIGNED_DISTANCE_FIELD : 0;
if (quads.empty()) {
QuadAndTriangleDataAdapter<Vec3s, Vec3I>
mesh(indexSpacePoints.get(), numPoints, &triangles[0], triangles.size());
return meshToVolume<GridType>(
interrupter, mesh, xform, exBandWidth, inBandWidth, conversionFlags);
} else if (triangles.empty()) {
QuadAndTriangleDataAdapter<Vec3s, Vec4I>
mesh(indexSpacePoints.get(), numPoints, &quads[0], quads.size());
return meshToVolume<GridType>(
interrupter, mesh, xform, exBandWidth, inBandWidth, conversionFlags);
}
// pack primitives
const size_t numPrimitives = triangles.size() + quads.size();
std::unique_ptr<Vec4I[]> prims{new Vec4I[numPrimitives]};
for (size_t n = 0, N = triangles.size(); n < N; ++n) {
const Vec3I& triangle = triangles[n];
Vec4I& prim = prims[n];
prim[0] = triangle[0];
prim[1] = triangle[1];
prim[2] = triangle[2];
prim[3] = util::INVALID_IDX;
}
const size_t offset = triangles.size();
for (size_t n = 0, N = quads.size(); n < N; ++n) {
prims[offset + n] = quads[n];
}
QuadAndTriangleDataAdapter<Vec3s, Vec4I>
mesh(indexSpacePoints.get(), numPoints, prims.get(), numPrimitives);
return meshToVolume<GridType>(interrupter, mesh, xform,
exBandWidth, inBandWidth, conversionFlags);
}
/// @internal This overload is enabled only for grids that do not have a scalar,
/// floating-point ValueType.
template<typename GridType, typename Interrupter>
inline typename std::enable_if<!std::is_floating_point<typename GridType::ValueType>::value,
typename GridType::Ptr>::type
doMeshConversion(
Interrupter&,
const math::Transform& /*xform*/,
const std::vector<Vec3s>& /*points*/,
const std::vector<Vec3I>& /*triangles*/,
const std::vector<Vec4I>& /*quads*/,
float /*exBandWidth*/,
float /*inBandWidth*/,
bool /*unsignedDistanceField*/ = false)
{
OPENVDB_THROW(TypeError,
"mesh to volume conversion is supported only for scalar floating-point grids");
}
/// @endcond
//}
////////////////////////////////////////
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
float halfWidth)
{
util::NullInterrupter nullInterrupter;
std::vector<Vec4I> quads(0);
return doMeshConversion<GridType>(nullInterrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
float halfWidth)
{
std::vector<Vec4I> quads(0);
return doMeshConversion<GridType>(interrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec4I>& quads,
float halfWidth)
{
util::NullInterrupter nullInterrupter;
std::vector<Vec3I> triangles(0);
return doMeshConversion<GridType>(nullInterrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec4I>& quads,
float halfWidth)
{
std::vector<Vec3I> triangles(0);
return doMeshConversion<GridType>(interrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType>
inline typename GridType::Ptr
meshToLevelSet(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float halfWidth)
{
util::NullInterrupter nullInterrupter;
return doMeshConversion<GridType>(nullInterrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToLevelSet(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float halfWidth)
{
return doMeshConversion<GridType>(interrupter, xform, points, triangles, quads,
halfWidth, halfWidth);
}
template<typename GridType>
inline typename GridType::Ptr
meshToSignedDistanceField(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float exBandWidth,
float inBandWidth)
{
util::NullInterrupter nullInterrupter;
return doMeshConversion<GridType>(nullInterrupter, xform, points, triangles,
quads, exBandWidth, inBandWidth);
}
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToSignedDistanceField(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float exBandWidth,
float inBandWidth)
{
return doMeshConversion<GridType>(interrupter, xform, points, triangles,
quads, exBandWidth, inBandWidth);
}
template<typename GridType>
inline typename GridType::Ptr
meshToUnsignedDistanceField(
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float bandWidth)
{
util::NullInterrupter nullInterrupter;
return doMeshConversion<GridType>(nullInterrupter, xform, points, triangles, quads,
bandWidth, bandWidth, true);
}
template<typename GridType, typename Interrupter>
inline typename GridType::Ptr
meshToUnsignedDistanceField(
Interrupter& interrupter,
const openvdb::math::Transform& xform,
const std::vector<Vec3s>& points,
const std::vector<Vec3I>& triangles,
const std::vector<Vec4I>& quads,
float bandWidth)
{
return doMeshConversion<GridType>(interrupter, xform, points, triangles, quads,
bandWidth, bandWidth, true);
}
////////////////////////////////////////////////////////////////////////////////
// Required by several of the tree nodes
inline std::ostream&
operator<<(std::ostream& ostr, const MeshToVoxelEdgeData::EdgeData& rhs)
{
ostr << "{[ " << rhs.mXPrim << ", " << rhs.mXDist << "]";
ostr << " [ " << rhs.mYPrim << ", " << rhs.mYDist << "]";
ostr << " [ " << rhs.mZPrim << ", " << rhs.mZDist << "]}";
return ostr;
}
// Required by math::Abs
inline MeshToVoxelEdgeData::EdgeData
Abs(const MeshToVoxelEdgeData::EdgeData& x)
{
return x;
}
////////////////////////////////////////
class MeshToVoxelEdgeData::GenEdgeData
{
public:
GenEdgeData(
const std::vector<Vec3s>& pointList,
const std::vector<Vec4I>& polygonList);
void run(bool threaded = true);
GenEdgeData(GenEdgeData& rhs, tbb::split);
inline void operator() (const tbb::blocked_range<size_t> &range);
inline void join(GenEdgeData& rhs);
inline TreeType& tree() { return mTree; }
private:
void operator=(const GenEdgeData&) {}
struct Primitive { Vec3d a, b, c, d; Int32 index; };
template<bool IsQuad>
inline void voxelize(const Primitive&);
template<bool IsQuad>
inline bool evalPrimitive(const Coord&, const Primitive&);
inline bool rayTriangleIntersection( const Vec3d& origin, const Vec3d& dir,
const Vec3d& a, const Vec3d& b, const Vec3d& c, double& t);
TreeType mTree;
Accessor mAccessor;
const std::vector<Vec3s>& mPointList;
const std::vector<Vec4I>& mPolygonList;
// Used internally for acceleration
using IntTreeT = TreeType::ValueConverter<Int32>::Type;
IntTreeT mLastPrimTree;
tree::ValueAccessor<IntTreeT> mLastPrimAccessor;
}; // class MeshToVoxelEdgeData::GenEdgeData
inline
MeshToVoxelEdgeData::GenEdgeData::GenEdgeData(
const std::vector<Vec3s>& pointList,
const std::vector<Vec4I>& polygonList)
: mTree(EdgeData())
, mAccessor(mTree)
, mPointList(pointList)
, mPolygonList(polygonList)
, mLastPrimTree(Int32(util::INVALID_IDX))
, mLastPrimAccessor(mLastPrimTree)
{
}
inline
MeshToVoxelEdgeData::GenEdgeData::GenEdgeData(GenEdgeData& rhs, tbb::split)
: mTree(EdgeData())
, mAccessor(mTree)
, mPointList(rhs.mPointList)
, mPolygonList(rhs.mPolygonList)
, mLastPrimTree(Int32(util::INVALID_IDX))
, mLastPrimAccessor(mLastPrimTree)
{
}
inline void
MeshToVoxelEdgeData::GenEdgeData::run(bool threaded)
{
if (threaded) {
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mPolygonList.size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mPolygonList.size()));
}
}
inline void
MeshToVoxelEdgeData::GenEdgeData::join(GenEdgeData& rhs)
{
using RootNodeType = TreeType::RootNodeType;
using NodeChainType = RootNodeType::NodeChainType;
static_assert(NodeChainType::Size > 1, "expected tree height > 1");
using InternalNodeType = typename NodeChainType::template Get<1>;
Coord ijk;
Index offset;
rhs.mTree.clearAllAccessors();
TreeType::LeafIter leafIt = rhs.mTree.beginLeaf();
for ( ; leafIt; ++leafIt) {
ijk = leafIt->origin();
TreeType::LeafNodeType* lhsLeafPt = mTree.probeLeaf(ijk);
if (!lhsLeafPt) {
mAccessor.addLeaf(rhs.mAccessor.probeLeaf(ijk));
InternalNodeType* node = rhs.mAccessor.getNode<InternalNodeType>();
node->stealNode<TreeType::LeafNodeType>(ijk, EdgeData(), false);
rhs.mAccessor.clear();
} else {
TreeType::LeafNodeType::ValueOnCIter it = leafIt->cbeginValueOn();
for ( ; it; ++it) {
offset = it.pos();
const EdgeData& rhsValue = it.getValue();
if (!lhsLeafPt->isValueOn(offset)) {
lhsLeafPt->setValueOn(offset, rhsValue);
} else {
EdgeData& lhsValue = const_cast<EdgeData&>(lhsLeafPt->getValue(offset));
if (rhsValue.mXDist < lhsValue.mXDist) {
lhsValue.mXDist = rhsValue.mXDist;
lhsValue.mXPrim = rhsValue.mXPrim;
}
if (rhsValue.mYDist < lhsValue.mYDist) {
lhsValue.mYDist = rhsValue.mYDist;
lhsValue.mYPrim = rhsValue.mYPrim;
}
if (rhsValue.mZDist < lhsValue.mZDist) {
lhsValue.mZDist = rhsValue.mZDist;
lhsValue.mZPrim = rhsValue.mZPrim;
}
}
} // end value iteration
}
} // end leaf iteration
}
inline void
MeshToVoxelEdgeData::GenEdgeData::operator()(const tbb::blocked_range<size_t> &range)
{
Primitive prim;
for (size_t n = range.begin(); n < range.end(); ++n) {
const Vec4I& verts = mPolygonList[n];
prim.index = Int32(n);
prim.a = Vec3d(mPointList[verts[0]]);
prim.b = Vec3d(mPointList[verts[1]]);
prim.c = Vec3d(mPointList[verts[2]]);
if (util::INVALID_IDX != verts[3]) {
prim.d = Vec3d(mPointList[verts[3]]);
voxelize<true>(prim);
} else {
voxelize<false>(prim);
}
}
}
template<bool IsQuad>
inline void
MeshToVoxelEdgeData::GenEdgeData::voxelize(const Primitive& prim)
{
std::deque<Coord> coordList;
Coord ijk, nijk;
ijk = Coord::floor(prim.a);
coordList.push_back(ijk);
evalPrimitive<IsQuad>(ijk, prim);
while (!coordList.empty()) {
ijk = coordList.back();
coordList.pop_back();
for (Int32 i = 0; i < 26; ++i) {
nijk = ijk + util::COORD_OFFSETS[i];
if (prim.index != mLastPrimAccessor.getValue(nijk)) {
mLastPrimAccessor.setValue(nijk, prim.index);
if(evalPrimitive<IsQuad>(nijk, prim)) coordList.push_back(nijk);
}
}
}
}
template<bool IsQuad>
inline bool
MeshToVoxelEdgeData::GenEdgeData::evalPrimitive(const Coord& ijk, const Primitive& prim)
{
Vec3d uvw, org(ijk[0], ijk[1], ijk[2]);
bool intersecting = false;
double t;
EdgeData edgeData;
mAccessor.probeValue(ijk, edgeData);
// Evaluate first triangle
double dist = (org -
closestPointOnTriangleToPoint(prim.a, prim.c, prim.b, org, uvw)).lengthSqr();
if (rayTriangleIntersection(org, Vec3d(1.0, 0.0, 0.0), prim.a, prim.c, prim.b, t)) {
if (t < edgeData.mXDist) {
edgeData.mXDist = float(t);
edgeData.mXPrim = prim.index;
intersecting = true;
}
}
if (rayTriangleIntersection(org, Vec3d(0.0, 1.0, 0.0), prim.a, prim.c, prim.b, t)) {
if (t < edgeData.mYDist) {
edgeData.mYDist = float(t);
edgeData.mYPrim = prim.index;
intersecting = true;
}
}
if (rayTriangleIntersection(org, Vec3d(0.0, 0.0, 1.0), prim.a, prim.c, prim.b, t)) {
if (t < edgeData.mZDist) {
edgeData.mZDist = float(t);
edgeData.mZPrim = prim.index;
intersecting = true;
}
}
if (IsQuad) {
// Split quad into a second triangle and calculate distance.
double secondDist = (org -
closestPointOnTriangleToPoint(prim.a, prim.d, prim.c, org, uvw)).lengthSqr();
if (secondDist < dist) dist = secondDist;
if (rayTriangleIntersection(org, Vec3d(1.0, 0.0, 0.0), prim.a, prim.d, prim.c, t)) {
if (t < edgeData.mXDist) {
edgeData.mXDist = float(t);
edgeData.mXPrim = prim.index;
intersecting = true;
}
}
if (rayTriangleIntersection(org, Vec3d(0.0, 1.0, 0.0), prim.a, prim.d, prim.c, t)) {
if (t < edgeData.mYDist) {
edgeData.mYDist = float(t);
edgeData.mYPrim = prim.index;
intersecting = true;
}
}
if (rayTriangleIntersection(org, Vec3d(0.0, 0.0, 1.0), prim.a, prim.d, prim.c, t)) {
if (t < edgeData.mZDist) {
edgeData.mZDist = float(t);
edgeData.mZPrim = prim.index;
intersecting = true;
}
}
}
if (intersecting) mAccessor.setValue(ijk, edgeData);
return (dist < 0.86602540378443861);
}
inline bool
MeshToVoxelEdgeData::GenEdgeData::rayTriangleIntersection(
const Vec3d& origin, const Vec3d& dir,
const Vec3d& a, const Vec3d& b, const Vec3d& c,
double& t)
{
// Check if ray is parallel with triangle
Vec3d e1 = b - a;
Vec3d e2 = c - a;
Vec3d s1 = dir.cross(e2);
double divisor = s1.dot(e1);
if (!(std::abs(divisor) > 0.0)) return false;
// Compute barycentric coordinates
double inv_divisor = 1.0 / divisor;
Vec3d d = origin - a;
double b1 = d.dot(s1) * inv_divisor;
if (b1 < 0.0 || b1 > 1.0) return false;
Vec3d s2 = d.cross(e1);
double b2 = dir.dot(s2) * inv_divisor;
if (b2 < 0.0 || (b1 + b2) > 1.0) return false;
// Compute distance to intersection point
t = e2.dot(s2) * inv_divisor;
return (t < 0.0) ? false : true;
}
////////////////////////////////////////
inline
MeshToVoxelEdgeData::MeshToVoxelEdgeData()
: mTree(EdgeData())
{
}
inline void
MeshToVoxelEdgeData::convert(
const std::vector<Vec3s>& pointList,
const std::vector<Vec4I>& polygonList)
{
GenEdgeData converter(pointList, polygonList);
converter.run();
mTree.clear();
mTree.merge(converter.tree());
}
inline void
MeshToVoxelEdgeData::getEdgeData(
Accessor& acc,
const Coord& ijk,
std::vector<Vec3d>& points,
std::vector<Index32>& primitives)
{
EdgeData data;
Vec3d point;
Coord coord = ijk;
if (acc.probeValue(coord, data)) {
if (data.mXPrim != util::INVALID_IDX) {
point[0] = double(coord[0]) + data.mXDist;
point[1] = double(coord[1]);
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mXPrim);
}
if (data.mYPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]) + data.mYDist;
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mYPrim);
}
if (data.mZPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]);
point[2] = double(coord[2]) + data.mZDist;
points.push_back(point);
primitives.push_back(data.mZPrim);
}
}
coord[0] += 1;
if (acc.probeValue(coord, data)) {
if (data.mYPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]) + data.mYDist;
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mYPrim);
}
if (data.mZPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]);
point[2] = double(coord[2]) + data.mZDist;
points.push_back(point);
primitives.push_back(data.mZPrim);
}
}
coord[2] += 1;
if (acc.probeValue(coord, data)) {
if (data.mYPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]) + data.mYDist;
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mYPrim);
}
}
coord[0] -= 1;
if (acc.probeValue(coord, data)) {
if (data.mXPrim != util::INVALID_IDX) {
point[0] = double(coord[0]) + data.mXDist;
point[1] = double(coord[1]);
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mXPrim);
}
if (data.mYPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]) + data.mYDist;
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mYPrim);
}
}
coord[1] += 1;
if (acc.probeValue(coord, data)) {
if (data.mXPrim != util::INVALID_IDX) {
point[0] = double(coord[0]) + data.mXDist;
point[1] = double(coord[1]);
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mXPrim);
}
}
coord[2] -= 1;
if (acc.probeValue(coord, data)) {
if (data.mXPrim != util::INVALID_IDX) {
point[0] = double(coord[0]) + data.mXDist;
point[1] = double(coord[1]);
point[2] = double(coord[2]);
points.push_back(point);
primitives.push_back(data.mXPrim);
}
if (data.mZPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]);
point[2] = double(coord[2]) + data.mZDist;
points.push_back(point);
primitives.push_back(data.mZPrim);
}
}
coord[0] += 1;
if (acc.probeValue(coord, data)) {
if (data.mZPrim != util::INVALID_IDX) {
point[0] = double(coord[0]);
point[1] = double(coord[1]);
point[2] = double(coord[2]) + data.mZDist;
points.push_back(point);
primitives.push_back(data.mZPrim);
}
}
}
template<typename GridType, typename VecType>
inline typename GridType::Ptr
createLevelSetBox(const math::BBox<VecType>& bbox,
const openvdb::math::Transform& xform,
typename VecType::ValueType halfWidth)
{
const Vec3s pmin = Vec3s(xform.worldToIndex(bbox.min()));
const Vec3s pmax = Vec3s(xform.worldToIndex(bbox.max()));
Vec3s points[8];
points[0] = Vec3s(pmin[0], pmin[1], pmin[2]);
points[1] = Vec3s(pmin[0], pmin[1], pmax[2]);
points[2] = Vec3s(pmax[0], pmin[1], pmax[2]);
points[3] = Vec3s(pmax[0], pmin[1], pmin[2]);
points[4] = Vec3s(pmin[0], pmax[1], pmin[2]);
points[5] = Vec3s(pmin[0], pmax[1], pmax[2]);
points[6] = Vec3s(pmax[0], pmax[1], pmax[2]);
points[7] = Vec3s(pmax[0], pmax[1], pmin[2]);
Vec4I faces[6];
faces[0] = Vec4I(0, 1, 2, 3); // bottom
faces[1] = Vec4I(7, 6, 5, 4); // top
faces[2] = Vec4I(4, 5, 1, 0); // front
faces[3] = Vec4I(6, 7, 3, 2); // back
faces[4] = Vec4I(0, 3, 7, 4); // left
faces[5] = Vec4I(1, 5, 6, 2); // right
QuadAndTriangleDataAdapter<Vec3s, Vec4I> mesh(points, 8, faces, 6);
return meshToVolume<GridType>(mesh, xform, halfWidth, halfWidth);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MESH_TO_VOLUME_HAS_BEEN_INCLUDED
| 141,537 | C | 32.389479 | 114 | 0.594085 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ParticleAtlas.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file ParticleAtlas.h
///
/// @brief Space-partitioning acceleration structure for particles, points with
/// radius. Partitions particle indices into voxels to accelerate range
/// and nearest neighbor searches.
///
/// @note This acceleration structure only stores integer offsets into an external particle
/// data structure that conforms to the ParticleArray interface.
///
/// @details Constructs and maintains a sequence of @c PointIndexGrids each of progressively
/// lower resolution. Particles are uniquely assigned to a particular resolution
/// level based on their radius. This strategy has proven efficient for accelerating
/// spatial queries on particle data sets with varying radii.
///
/// @details The data structure automatically detects and adapts to particle data sets with
/// uniform radii. The construction is simplified and spatial queries pre-cache the
/// uniform particle radius to avoid redundant access calls to the
/// ParticleArray::getRadius method.
///
/// @author Mihai Alden
#ifndef OPENVDB_TOOLS_PARTICLE_ATLAS_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_PARTICLE_ATLAS_HAS_BEEN_INCLUDED
#include "PointIndexGrid.h"
#include <openvdb/Grid.h>
#include <openvdb/Types.h>
#include <openvdb/math/Transform.h>
#include <openvdb/tree/Tree.h>
#include <openvdb/tree/LeafNode.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <algorithm> // for std::min(), std::max()
#include <cmath> // for std::sqrt()
#include <deque>
#include <limits>
#include <memory>
#include <utility> // for std::pair
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
////////////////////////////////////////
/// @brief Partition particles and performs range and nearest-neighbor searches.
///
/// @interface ParticleArray
/// Expected interface for the ParticleArray container:
/// @code
/// template<typename VectorType>
/// struct ParticleArray
/// {
/// // The type used to represent world-space positions
/// using PosType = VectorType;
/// using ScalarType = typename PosType::value_type;
///
/// // Return the number of particles in the array
/// size_t size() const;
///
/// // Return the world-space position for the nth particle.
/// void getPos(size_t n, PosType& xyz) const;
///
/// // Return the world-space radius for the nth particle.
/// void getRadius(size_t n, ScalarType& radius) const;
/// };
/// @endcode
///
/// @details Constructs a collection of @c PointIndexGrids of different resolutions
/// to accelerate spatial searches for particles with varying radius.
template<typename PointIndexGridType = PointIndexGrid>
struct ParticleAtlas
{
using Ptr = SharedPtr<ParticleAtlas>;
using ConstPtr = SharedPtr<const ParticleAtlas>;
using PointIndexGridPtr = typename PointIndexGridType::Ptr;
using IndexType = typename PointIndexGridType::ValueType;
struct Iterator;
//////////
ParticleAtlas() : mIndexGridArray(), mMinRadiusArray(), mMaxRadiusArray() {}
/// @brief Partitions particle indices
///
/// @param particles container conforming to the ParticleArray interface
/// @param minVoxelSize minimum voxel size limit
/// @param maxLevels maximum number of resolution levels
template<typename ParticleArrayType>
void construct(const ParticleArrayType& particles, double minVoxelSize, size_t maxLevels = 50);
/// @brief Create a new @c ParticleAtlas from the given @a particles.
///
/// @param particles container conforming to the ParticleArray interface
/// @param minVoxelSize minimum voxel size limit
/// @param maxLevels maximum number of resolution levels
template<typename ParticleArrayType>
static Ptr create(const ParticleArrayType& particles,
double minVoxelSize, size_t maxLevels = 50);
/// @brief Returns the number of resolution levels.
size_t levels() const { return mIndexGridArray.size(); }
/// @brief true if the container size is 0, false otherwise.
bool empty() const { return mIndexGridArray.empty(); }
/// @brief Returns minimum particle radius for level @a n.
double minRadius(size_t n) const { return mMinRadiusArray[n]; }
/// @brief Returns maximum particle radius for level @a n.
double maxRadius(size_t n) const { return mMaxRadiusArray[n]; }
/// @brief Returns the @c PointIndexGrid that represents the given level @a n.
PointIndexGridType& pointIndexGrid(size_t n) { return *mIndexGridArray[n]; }
/// @brief Returns the @c PointIndexGrid that represents the given level @a n.
const PointIndexGridType& pointIndexGrid(size_t n) const { return *mIndexGridArray[n]; }
private:
// Disallow copying
ParticleAtlas(const ParticleAtlas&);
ParticleAtlas& operator=(const ParticleAtlas&);
std::vector<PointIndexGridPtr> mIndexGridArray;
std::vector<double> mMinRadiusArray, mMaxRadiusArray;
}; // struct ParticleAtlas
using ParticleIndexAtlas = ParticleAtlas<PointIndexGrid>;
////////////////////////////////////////
/// @brief Provides accelerated range and nearest-neighbor searches for
/// particles that are partitioned using the ParticleAtlas.
///
/// @note Prefer to construct the iterator object once and reuse it
/// for subsequent queries.
template<typename PointIndexGridType>
struct ParticleAtlas<PointIndexGridType>::Iterator
{
using TreeType = typename PointIndexGridType::TreeType;
using ConstAccessor = tree::ValueAccessor<const TreeType>;
using ConstAccessorPtr = std::unique_ptr<ConstAccessor>;
/////
/// @brief Construct an iterator from the given @a atlas.
explicit Iterator(const ParticleAtlas& atlas);
/// @brief Clear the iterator and update it with the result of the given
/// world-space radial query.
/// @param center world-space center
/// @param radius world-space search radius
/// @param particles container conforming to the ParticleArray interface
template<typename ParticleArrayType>
void worldSpaceSearchAndUpdate(const Vec3d& center, double radius,
const ParticleArrayType& particles);
/// @brief Clear the iterator and update it with the result of the given
/// world-space radial query.
/// @param bbox world-space bounding box
/// @param particles container conforming to the ParticleArray interface
template<typename ParticleArrayType>
void worldSpaceSearchAndUpdate(const BBoxd& bbox, const ParticleArrayType& particles);
/// @brief Returns the total number of resolution levels.
size_t levels() const { return mAtlas->levels(); }
/// @brief Clear the iterator and update it with all particles that reside
/// at the given resolution @a level.
void updateFromLevel(size_t level);
/// Reset the iterator to point to the first item.
void reset();
/// Return a const reference to the item to which this iterator is pointing.
const IndexType& operator*() const { return *mRange.first; }
/// @{
/// @brief Return @c true if this iterator is not yet exhausted.
bool test() const { return mRange.first < mRange.second || mIter != mRangeList.end(); }
operator bool() const { return this->test(); }
/// @}
/// Advance iterator to next item.
void increment();
/// Advance iterator to next item.
void operator++() { this->increment(); }
/// @brief Advance iterator to next item.
/// @return @c true if this iterator is not yet exhausted.
bool next();
/// Return the number of point indices in the iterator range.
size_t size() const;
/// Return @c true if both iterators point to the same element.
bool operator==(const Iterator& p) const { return mRange.first == p.mRange.first; }
bool operator!=(const Iterator& p) const { return !this->operator==(p); }
private:
Iterator(const Iterator& rhs);
Iterator& operator=(const Iterator& rhs);
void clear();
using Range = std::pair<const IndexType*, const IndexType*>;
using RangeDeque = std::deque<Range>;
using RangeDequeCIter = typename RangeDeque::const_iterator;
using IndexArray = std::unique_ptr<IndexType[]>;
ParticleAtlas const * const mAtlas;
std::unique_ptr<ConstAccessorPtr[]> mAccessorList;
// Primary index collection
Range mRange;
RangeDeque mRangeList;
RangeDequeCIter mIter;
// Secondary index collection
IndexArray mIndexArray;
size_t mIndexArraySize, mAccessorListSize;
}; // struct ParticleAtlas::Iterator
////////////////////////////////////////
// Internal operators and implementation details
namespace particle_atlas_internal {
template<typename ParticleArrayT>
struct ComputeExtremas
{
using PosType = typename ParticleArrayT::PosType;
using ScalarType = typename PosType::value_type;
ComputeExtremas(const ParticleArrayT& particles)
: particleArray(&particles)
, minRadius(std::numeric_limits<ScalarType>::max())
, maxRadius(-std::numeric_limits<ScalarType>::max())
{
}
ComputeExtremas(ComputeExtremas& rhs, tbb::split)
: particleArray(rhs.particleArray)
, minRadius(std::numeric_limits<ScalarType>::max())
, maxRadius(-std::numeric_limits<ScalarType>::max())
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
ScalarType radius, tmpMin = minRadius, tmpMax = maxRadius;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
particleArray->getRadius(n, radius);
tmpMin = std::min(radius, tmpMin);
tmpMax = std::max(radius, tmpMax);
}
minRadius = std::min(minRadius, tmpMin);
maxRadius = std::max(maxRadius, tmpMax);
}
void join(const ComputeExtremas& rhs) {
minRadius = std::min(minRadius, rhs.minRadius);
maxRadius = std::max(maxRadius, rhs.maxRadius);
}
ParticleArrayT const * const particleArray;
ScalarType minRadius, maxRadius;
}; // struct ComputeExtremas
template<typename ParticleArrayT, typename PointIndex>
struct SplittableParticleArray
{
using Ptr = SharedPtr<SplittableParticleArray>;
using ConstPtr = SharedPtr<const SplittableParticleArray>;
using ParticleArray = ParticleArrayT;
using PosType = typename ParticleArray::PosType;
using ScalarType = typename PosType::value_type;
SplittableParticleArray(const ParticleArrayT& particles)
: mIndexMap(), mParticleArray(&particles), mSize(particles.size())
{
updateExtremas();
}
SplittableParticleArray(const ParticleArrayT& particles, double minR, double maxR)
: mIndexMap(), mParticleArray(&particles), mSize(particles.size())
{
mMinRadius = ScalarType(minR);
mMaxRadius = ScalarType(maxR);
}
const ParticleArrayT& particleArray() const { return *mParticleArray; }
size_t size() const { return mSize; }
void getPos(size_t n, PosType& xyz) const
{ return mParticleArray->getPos(getGlobalIndex(n), xyz); }
void getRadius(size_t n, ScalarType& radius) const
{ return mParticleArray->getRadius(getGlobalIndex(n), radius); }
ScalarType minRadius() const { return mMinRadius; }
ScalarType maxRadius() const { return mMaxRadius; }
size_t getGlobalIndex(size_t n) const { return mIndexMap ? size_t(mIndexMap[n]) : n; }
/// Move all particle indices that have a radius larger or equal to @a maxRadiusLimit
/// into a separate container.
Ptr split(ScalarType maxRadiusLimit) {
if (mMaxRadius < maxRadiusLimit) return Ptr();
std::unique_ptr<bool[]> mask{new bool[mSize]};
tbb::parallel_for(tbb::blocked_range<size_t>(0, mSize),
MaskParticles(*this, mask, maxRadiusLimit));
Ptr output(new SplittableParticleArray(*this, mask));
if (output->size() == 0) return Ptr();
size_t newSize = 0;
for (size_t n = 0, N = mSize; n < N; ++n) {
newSize += size_t(!mask[n]);
}
std::unique_ptr<PointIndex[]> newIndexMap{new PointIndex[newSize]};
setIndexMap(newIndexMap, mask, false);
mSize = newSize;
mIndexMap.swap(newIndexMap);
updateExtremas();
return output;
}
private:
// Disallow copying
SplittableParticleArray(const SplittableParticleArray&);
SplittableParticleArray& operator=(const SplittableParticleArray&);
// Masked copy constructor
SplittableParticleArray(const SplittableParticleArray& other,
const std::unique_ptr<bool[]>& mask)
: mIndexMap(), mParticleArray(&other.particleArray()), mSize(0)
{
for (size_t n = 0, N = other.size(); n < N; ++n) {
mSize += size_t(mask[n]);
}
if (mSize != 0) {
mIndexMap.reset(new PointIndex[mSize]);
other.setIndexMap(mIndexMap, mask, true);
}
updateExtremas();
}
struct MaskParticles {
MaskParticles(const SplittableParticleArray& particles,
const std::unique_ptr<bool[]>& mask, ScalarType radius)
: particleArray(&particles)
, particleMask(mask.get())
, radiusLimit(radius)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
const ScalarType maxRadius = radiusLimit;
ScalarType radius;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
particleArray->getRadius(n, radius);
particleMask[n] = !(radius < maxRadius);
}
}
SplittableParticleArray const * const particleArray;
bool * const particleMask;
ScalarType const radiusLimit;
}; // struct MaskParticles
inline void updateExtremas() {
ComputeExtremas<SplittableParticleArray> op(*this);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mSize), op);
mMinRadius = op.minRadius;
mMaxRadius = op.maxRadius;
}
void setIndexMap(std::unique_ptr<PointIndex[]>& newIndexMap,
const std::unique_ptr<bool[]>& mask, bool maskValue) const
{
if (mIndexMap.get() != nullptr) {
const PointIndex* indices = mIndexMap.get();
for (size_t idx = 0, n = 0, N = mSize; n < N; ++n) {
if (mask[n] == maskValue) newIndexMap[idx++] = indices[n];
}
} else {
for (size_t idx = 0, n = 0, N = mSize; n < N; ++n) {
if (mask[n] == maskValue) {
newIndexMap[idx++] = PointIndex(static_cast<typename PointIndex::IntType>(n));
}
}
}
}
//////////
std::unique_ptr<PointIndex[]> mIndexMap;
ParticleArrayT const * const mParticleArray;
size_t mSize;
ScalarType mMinRadius, mMaxRadius;
}; // struct SplittableParticleArray
template<typename ParticleArrayType, typename PointIndexLeafNodeType>
struct RemapIndices {
RemapIndices(const ParticleArrayType& particles, std::vector<PointIndexLeafNodeType*>& nodes)
: mParticles(&particles)
, mNodes(nodes.empty() ? nullptr : &nodes.front())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using PointIndexType = typename PointIndexLeafNodeType::ValueType;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
PointIndexLeafNodeType& node = *mNodes[n];
const size_t numIndices = node.indices().size();
if (numIndices > 0) {
PointIndexType* begin = &node.indices().front();
const PointIndexType* end = begin + numIndices;
while (begin < end) {
*begin = PointIndexType(static_cast<typename PointIndexType::IntType>(
mParticles->getGlobalIndex(*begin)));
++begin;
}
}
}
}
ParticleArrayType const * const mParticles;
PointIndexLeafNodeType * const * const mNodes;
}; // struct RemapIndices
template<typename ParticleArrayType, typename IndexT>
struct RadialRangeFilter
{
using PosType = typename ParticleArrayType::PosType;
using ScalarType = typename PosType::value_type;
using Range = std::pair<const IndexT*, const IndexT*>;
using RangeDeque = std::deque<Range>;
using IndexDeque = std::deque<IndexT>;
RadialRangeFilter(RangeDeque& ranges, IndexDeque& indices, const PosType& xyz,
ScalarType radius, const ParticleArrayType& particles, bool hasUniformRadius = false)
: mRanges(ranges)
, mIndices(indices)
, mCenter(xyz)
, mRadius(radius)
, mParticles(&particles)
, mHasUniformRadius(hasUniformRadius)
{
if (mHasUniformRadius) {
ScalarType uniformRadius;
mParticles->getRadius(0, uniformRadius);
mRadius = mRadius + uniformRadius;
mRadius *= mRadius;
}
}
template <typename LeafNodeType>
void filterLeafNode(const LeafNodeType& leaf)
{
const size_t numIndices = leaf.indices().size();
if (numIndices > 0) {
const IndexT* begin = &leaf.indices().front();
filterVoxel(leaf.origin(), begin, begin + numIndices);
}
}
void filterVoxel(const Coord&, const IndexT* begin, const IndexT* end)
{
PosType pos;
if (mHasUniformRadius) {
const ScalarType searchRadiusSqr = mRadius;
while (begin < end) {
mParticles->getPos(size_t(*begin), pos);
const ScalarType distSqr = (mCenter - pos).lengthSqr();
if (distSqr < searchRadiusSqr) {
mIndices.push_back(*begin);
}
++begin;
}
} else {
while (begin < end) {
const size_t idx = size_t(*begin);
mParticles->getPos(idx, pos);
ScalarType radius;
mParticles->getRadius(idx, radius);
ScalarType searchRadiusSqr = mRadius + radius;
searchRadiusSqr *= searchRadiusSqr;
const ScalarType distSqr = (mCenter - pos).lengthSqr();
if (distSqr < searchRadiusSqr) {
mIndices.push_back(*begin);
}
++begin;
}
}
}
private:
RadialRangeFilter(const RadialRangeFilter&);
RadialRangeFilter& operator=(const RadialRangeFilter&);
RangeDeque& mRanges;
IndexDeque& mIndices;
PosType const mCenter;
ScalarType mRadius;
ParticleArrayType const * const mParticles;
bool const mHasUniformRadius;
}; // struct RadialRangeFilter
template<typename ParticleArrayType, typename IndexT>
struct BBoxFilter
{
using PosType = typename ParticleArrayType::PosType;
using ScalarType = typename PosType::value_type;
using Range = std::pair<const IndexT*, const IndexT*>;
using RangeDeque = std::deque<Range>;
using IndexDeque = std::deque<IndexT>;
BBoxFilter(RangeDeque& ranges, IndexDeque& indices,
const BBoxd& bbox, const ParticleArrayType& particles, bool hasUniformRadius = false)
: mRanges(ranges)
, mIndices(indices)
, mBBox(PosType(bbox.min()), PosType(bbox.max()))
, mCenter(mBBox.getCenter())
, mParticles(&particles)
, mHasUniformRadius(hasUniformRadius)
, mUniformRadiusSqr(ScalarType(0.0))
{
if (mHasUniformRadius) {
mParticles->getRadius(0, mUniformRadiusSqr);
mUniformRadiusSqr *= mUniformRadiusSqr;
}
}
template <typename LeafNodeType>
void filterLeafNode(const LeafNodeType& leaf)
{
const size_t numIndices = leaf.indices().size();
if (numIndices > 0) {
const IndexT* begin = &leaf.indices().front();
filterVoxel(leaf.origin(), begin, begin + numIndices);
}
}
void filterVoxel(const Coord&, const IndexT* begin, const IndexT* end)
{
PosType pos;
if (mHasUniformRadius) {
const ScalarType radiusSqr = mUniformRadiusSqr;
while (begin < end) {
mParticles->getPos(size_t(*begin), pos);
if (mBBox.isInside(pos)) {
mIndices.push_back(*begin++);
continue;
}
const ScalarType distSqr = pointToBBoxDistSqr(pos);
if (!(distSqr > radiusSqr)) {
mIndices.push_back(*begin);
}
++begin;
}
} else {
while (begin < end) {
const size_t idx = size_t(*begin);
mParticles->getPos(idx, pos);
if (mBBox.isInside(pos)) {
mIndices.push_back(*begin++);
continue;
}
ScalarType radius;
mParticles->getRadius(idx, radius);
const ScalarType distSqr = pointToBBoxDistSqr(pos);
if (!(distSqr > (radius * radius))) {
mIndices.push_back(*begin);
}
++begin;
}
}
}
private:
BBoxFilter(const BBoxFilter&);
BBoxFilter& operator=(const BBoxFilter&);
ScalarType pointToBBoxDistSqr(const PosType& pos) const
{
ScalarType distSqr = ScalarType(0.0);
for (int i = 0; i < 3; ++i) {
const ScalarType a = pos[i];
ScalarType b = mBBox.min()[i];
if (a < b) {
ScalarType delta = b - a;
distSqr += delta * delta;
}
b = mBBox.max()[i];
if (a > b) {
ScalarType delta = a - b;
distSqr += delta * delta;
}
}
return distSqr;
}
RangeDeque& mRanges;
IndexDeque& mIndices;
math::BBox<PosType> const mBBox;
PosType const mCenter;
ParticleArrayType const * const mParticles;
bool const mHasUniformRadius;
ScalarType mUniformRadiusSqr;
}; // struct BBoxFilter
} // namespace particle_atlas_internal
////////////////////////////////////////
template<typename PointIndexGridType>
template<typename ParticleArrayType>
inline void
ParticleAtlas<PointIndexGridType>::construct(
const ParticleArrayType& particles, double minVoxelSize, size_t maxLevels)
{
using SplittableParticleArray =
typename particle_atlas_internal::SplittableParticleArray<ParticleArrayType, IndexType>;
using SplittableParticleArrayPtr = typename SplittableParticleArray::Ptr;
using ScalarType = typename ParticleArrayType::ScalarType;
/////
particle_atlas_internal::ComputeExtremas<ParticleArrayType> extremas(particles);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, particles.size()), extremas);
const double firstMin = extremas.minRadius;
const double firstMax = extremas.maxRadius;
const double firstVoxelSize = std::max(minVoxelSize, firstMin);
if (!(firstMax < (firstVoxelSize * double(2.0))) && maxLevels > 1) {
std::vector<SplittableParticleArrayPtr> levels;
levels.push_back(SplittableParticleArrayPtr(
new SplittableParticleArray(particles, firstMin, firstMax)));
std::vector<double> voxelSizeArray;
voxelSizeArray.push_back(firstVoxelSize);
for (size_t n = 0; n < maxLevels; ++n) {
const double maxParticleRadius = double(levels.back()->maxRadius());
const double particleRadiusLimit = voxelSizeArray.back() * double(2.0);
if (maxParticleRadius < particleRadiusLimit) break;
SplittableParticleArrayPtr newLevel =
levels.back()->split(ScalarType(particleRadiusLimit));
if (!newLevel) break;
levels.push_back(newLevel);
voxelSizeArray.push_back(double(newLevel->minRadius()));
}
size_t numPoints = 0;
using PointIndexTreeType = typename PointIndexGridType::TreeType;
using PointIndexLeafNodeType = typename PointIndexTreeType::LeafNodeType;
std::vector<PointIndexLeafNodeType*> nodes;
for (size_t n = 0, N = levels.size(); n < N; ++n) {
const SplittableParticleArray& particleArray = *levels[n];
numPoints += particleArray.size();
mMinRadiusArray.push_back(double(particleArray.minRadius()));
mMaxRadiusArray.push_back(double(particleArray.maxRadius()));
PointIndexGridPtr grid =
createPointIndexGrid<PointIndexGridType>(particleArray, voxelSizeArray[n]);
nodes.clear();
grid->tree().getNodes(nodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
particle_atlas_internal::RemapIndices<SplittableParticleArray,
PointIndexLeafNodeType>(particleArray, nodes));
mIndexGridArray.push_back(grid);
}
} else {
mMinRadiusArray.push_back(firstMin);
mMaxRadiusArray.push_back(firstMax);
mIndexGridArray.push_back(
createPointIndexGrid<PointIndexGridType>(particles, firstVoxelSize));
}
}
template<typename PointIndexGridType>
template<typename ParticleArrayType>
inline typename ParticleAtlas<PointIndexGridType>::Ptr
ParticleAtlas<PointIndexGridType>::create(
const ParticleArrayType& particles, double minVoxelSize, size_t maxLevels)
{
Ptr ret(new ParticleAtlas());
ret->construct(particles, minVoxelSize, maxLevels);
return ret;
}
////////////////////////////////////////
// ParticleAtlas::Iterator implementation
template<typename PointIndexGridType>
inline
ParticleAtlas<PointIndexGridType>::Iterator::Iterator(const ParticleAtlas& atlas)
: mAtlas(&atlas)
, mAccessorList()
, mRange(static_cast<IndexType*>(nullptr), static_cast<IndexType*>(nullptr))
, mRangeList()
, mIter(mRangeList.begin())
, mIndexArray()
, mIndexArraySize(0)
, mAccessorListSize(atlas.levels())
{
if (mAccessorListSize > 0) {
mAccessorList.reset(new ConstAccessorPtr[mAccessorListSize]);
for (size_t n = 0, N = mAccessorListSize; n < N; ++n) {
mAccessorList[n].reset(new ConstAccessor(atlas.pointIndexGrid(n).tree()));
}
}
}
template<typename PointIndexGridType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::reset()
{
mIter = mRangeList.begin();
if (!mRangeList.empty()) {
mRange = mRangeList.front();
} else if (mIndexArray) {
mRange.first = mIndexArray.get();
mRange.second = mRange.first + mIndexArraySize;
} else {
mRange.first = static_cast<IndexType*>(nullptr);
mRange.second = static_cast<IndexType*>(nullptr);
}
}
template<typename PointIndexGridType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::increment()
{
++mRange.first;
if (mRange.first >= mRange.second && mIter != mRangeList.end()) {
++mIter;
if (mIter != mRangeList.end()) {
mRange = *mIter;
} else if (mIndexArray) {
mRange.first = mIndexArray.get();
mRange.second = mRange.first + mIndexArraySize;
}
}
}
template<typename PointIndexGridType>
inline bool
ParticleAtlas<PointIndexGridType>::Iterator::next()
{
if (!this->test()) return false;
this->increment();
return this->test();
}
template<typename PointIndexGridType>
inline size_t
ParticleAtlas<PointIndexGridType>::Iterator::size() const
{
size_t count = 0;
typename RangeDeque::const_iterator it =
mRangeList.begin(), end = mRangeList.end();
for ( ; it != end; ++it) {
count += it->second - it->first;
}
return count + mIndexArraySize;
}
template<typename PointIndexGridType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::clear()
{
mRange.first = static_cast<IndexType*>(nullptr);
mRange.second = static_cast<IndexType*>(nullptr);
mRangeList.clear();
mIter = mRangeList.end();
mIndexArray.reset();
mIndexArraySize = 0;
}
template<typename PointIndexGridType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::updateFromLevel(size_t level)
{
using TreeT = typename PointIndexGridType::TreeType;
using LeafNodeType = typename TreeType::LeafNodeType;
this->clear();
if (mAccessorListSize > 0) {
const size_t levelIdx = std::min(mAccessorListSize - 1, level);
const TreeT& tree = mAtlas->pointIndexGrid(levelIdx).tree();
std::vector<const LeafNodeType*> nodes;
tree.getNodes(nodes);
for (size_t n = 0, N = nodes.size(); n < N; ++n) {
const LeafNodeType& node = *nodes[n];
const size_t numIndices = node.indices().size();
if (numIndices > 0) {
const IndexType* begin = &node.indices().front();
mRangeList.push_back(Range(begin, (begin + numIndices)));
}
}
}
this->reset();
}
template<typename PointIndexGridType>
template<typename ParticleArrayType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::worldSpaceSearchAndUpdate(
const Vec3d& center, double radius, const ParticleArrayType& particles)
{
using PosType = typename ParticleArrayType::PosType;
using ScalarType = typename ParticleArrayType::ScalarType;
/////
this->clear();
std::deque<IndexType> filteredIndices;
std::vector<CoordBBox> searchRegions;
const double iRadius = radius * double(1.0 / std::sqrt(3.0));
const Vec3d ibMin(center[0] - iRadius, center[1] - iRadius, center[2] - iRadius);
const Vec3d ibMax(center[0] + iRadius, center[1] + iRadius, center[2] + iRadius);
const Vec3d bMin(center[0] - radius, center[1] - radius, center[2] - radius);
const Vec3d bMax(center[0] + radius, center[1] + radius, center[2] + radius);
const PosType pos = PosType(center);
const ScalarType dist = ScalarType(radius);
for (size_t n = 0, N = mAccessorListSize; n < N; ++n) {
const double maxRadius = mAtlas->maxRadius(n);
const bool uniformRadius = math::isApproxEqual(mAtlas->minRadius(n), maxRadius);
const openvdb::math::Transform& xform = mAtlas->pointIndexGrid(n).transform();
ConstAccessor& acc = *mAccessorList[n];
openvdb::CoordBBox inscribedRegion(
xform.worldToIndexCellCentered(ibMin),
xform.worldToIndexCellCentered(ibMax));
inscribedRegion.expand(-1); // erode by one voxel
// collect indices that don't need to be tested
point_index_grid_internal::pointIndexSearch(mRangeList, acc, inscribedRegion);
searchRegions.clear();
const openvdb::CoordBBox region(
xform.worldToIndexCellCentered(bMin - maxRadius),
xform.worldToIndexCellCentered(bMax + maxRadius));
inscribedRegion.expand(1);
point_index_grid_internal::constructExclusiveRegions(
searchRegions, region, inscribedRegion);
using FilterType = particle_atlas_internal::RadialRangeFilter<ParticleArrayType, IndexType>;
FilterType filter(mRangeList, filteredIndices, pos, dist, particles, uniformRadius);
for (size_t i = 0, I = searchRegions.size(); i < I; ++i) {
point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[i]);
}
}
point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize);
this->reset();
}
template<typename PointIndexGridType>
template<typename ParticleArrayType>
inline void
ParticleAtlas<PointIndexGridType>::Iterator::worldSpaceSearchAndUpdate(
const BBoxd& bbox, const ParticleArrayType& particles)
{
this->clear();
std::deque<IndexType> filteredIndices;
std::vector<CoordBBox> searchRegions;
for (size_t n = 0, N = mAccessorListSize; n < N; ++n) {
const double maxRadius = mAtlas->maxRadius(n);
const bool uniformRadius = math::isApproxEqual(mAtlas->minRadius(n), maxRadius);
const openvdb::math::Transform& xform = mAtlas->pointIndexGrid(n).transform();
ConstAccessor& acc = *mAccessorList[n];
openvdb::CoordBBox inscribedRegion(
xform.worldToIndexCellCentered(bbox.min()),
xform.worldToIndexCellCentered(bbox.max()));
inscribedRegion.expand(-1); // erode by one voxel
// collect indices that don't need to be tested
point_index_grid_internal::pointIndexSearch(mRangeList, acc, inscribedRegion);
searchRegions.clear();
const openvdb::CoordBBox region(
xform.worldToIndexCellCentered(bbox.min() - maxRadius),
xform.worldToIndexCellCentered(bbox.max() + maxRadius));
inscribedRegion.expand(1);
point_index_grid_internal::constructExclusiveRegions(
searchRegions, region, inscribedRegion);
using FilterType = particle_atlas_internal::BBoxFilter<ParticleArrayType, IndexType>;
FilterType filter(mRangeList, filteredIndices, bbox, particles, uniformRadius);
for (size_t i = 0, I = searchRegions.size(); i < I; ++i) {
point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[i]);
}
}
point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize);
this->reset();
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_PARTICLE_ATLAS_HAS_BEEN_INCLUDED
| 34,282 | C | 32.155706 | 100 | 0.634065 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/RayIntersector.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
///
/// @file RayIntersector.h
///
/// @author Ken Museth
///
/// @brief Accelerated intersection of a ray with a narrow-band level
/// set or a generic (e.g. density) volume. This will of course be
/// useful for respectively surface and volume rendering.
///
/// @details This file defines two main classes,
/// LevelSetRayIntersector and VolumeRayIntersector, as well as the
/// three support classes LevelSetHDDA, VolumeHDDA and LinearSearchImpl.
/// The LevelSetRayIntersector is templated on the LinearSearchImpl class
/// and calls instances of the LevelSetHDDA class. The reason to split
/// level set ray intersection into three classes is twofold. First
/// LevelSetRayIntersector defines the public API for client code and
/// LinearSearchImpl defines the actual algorithm used for the
/// ray level-set intersection. In other words this design will allow
/// for the public API to be fixed while the intersection algorithm
/// can change without resolving to (slow) virtual methods. Second,
/// LevelSetHDDA, which implements a hierarchical Differential Digital
/// Analyzer, relies on partial template specialization, so it has to
/// be a standalone class (as opposed to a member class of
/// LevelSetRayIntersector). The VolumeRayIntersector is conceptually
/// much simpler than the LevelSetRayIntersector, and hence it only
/// depends on VolumeHDDA that implements the hierarchical
/// Differential Digital Analyzer.
#ifndef OPENVDB_TOOLS_RAYINTERSECTOR_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_RAYINTERSECTOR_HAS_BEEN_INCLUDED
#include <openvdb/math/DDA.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Ray.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/Grid.h>
#include <openvdb/Types.h>
#include "Morphology.h"
#include <iostream>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
// Helper class that implements the actual search of the zero-crossing
// of the level set along the direction of a ray. This particular
// implementation uses iterative linear search.
template<typename GridT, int Iterations = 0, typename RealT = double>
class LinearSearchImpl;
///////////////////////////////////// LevelSetRayIntersector /////////////////////////////////////
/// @brief This class provides the public API for intersecting a ray
/// with a narrow-band level set.
///
/// @details It wraps a SearchImplT with a simple public API and
/// performs the actual hierarchical tree node and voxel traversal.
///
/// @warning Use the (default) copy-constructor to make sure each
/// computational thread has their own instance of this class. This is
/// important since the SearchImplT contains a ValueAccessor that is
/// not thread-safe. However copying is very efficient.
///
/// @see tools/RayTracer.h for examples of intended usage.
///
/// @todo Add TrilinearSearchImpl, as an alternative to LinearSearchImpl,
/// that performs analytical 3D trilinear intersection tests, i.e., solves
/// cubic equations. This is slower but also more accurate than the 1D
/// linear interpolation in LinearSearchImpl.
template<typename GridT,
typename SearchImplT = LinearSearchImpl<GridT>,
int NodeLevel = GridT::TreeType::RootNodeType::ChildNodeType::LEVEL,
typename RayT = math::Ray<Real> >
class LevelSetRayIntersector
{
public:
using GridType = GridT;
using RayType = RayT;
using RealType = typename RayT::RealType;
using Vec3Type = typename RayT::Vec3T;
using ValueT = typename GridT::ValueType;
using TreeT = typename GridT::TreeType;
static_assert(NodeLevel >= -1 && NodeLevel < int(TreeT::DEPTH)-1, "NodeLevel out of range");
static_assert(std::is_floating_point<ValueT>::value,
"level set grids must have scalar, floating-point value types");
/// @brief Constructor
/// @param grid level set grid to intersect rays against.
/// @param isoValue optional iso-value for the ray-intersection.
LevelSetRayIntersector(const GridT& grid, const ValueT& isoValue = zeroVal<ValueT>())
: mTester(grid, isoValue)
{
if (!grid.hasUniformVoxels() ) {
OPENVDB_THROW(RuntimeError,
"LevelSetRayIntersector only supports uniform voxels!");
}
if (grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_THROW(RuntimeError,
"LevelSetRayIntersector only supports level sets!"
"\nUse Grid::setGridClass(openvdb::GRID_LEVEL_SET)");
}
}
/// @brief Return the iso-value used for ray-intersections
const ValueT& getIsoValue() const { return mTester.getIsoValue(); }
/// @brief Return @c true if the index-space ray intersects the level set.
/// @param iRay ray represented in index space.
bool intersectsIS(const RayType& iRay) const
{
if (!mTester.setIndexRay(iRay)) return false;//missed bbox
return math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester);
}
/// @brief Return @c true if the index-space ray intersects the level set
/// @param iRay ray represented in index space.
/// @param iTime if an intersection was found it is assigned the time of the
/// intersection along the index ray.
bool intersectsIS(const RayType& iRay, RealType &iTime) const
{
if (!mTester.setIndexRay(iRay)) return false;//missed bbox
iTime = mTester.getIndexTime();
return math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester);
}
/// @brief Return @c true if the index-space ray intersects the level set.
/// @param iRay ray represented in index space.
/// @param xyz if an intersection was found it is assigned the
/// intersection point in index space, otherwise it is unchanged.
bool intersectsIS(const RayType& iRay, Vec3Type& xyz) const
{
if (!mTester.setIndexRay(iRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getIndexPos(xyz);
return true;
}
/// @brief Return @c true if the index-space ray intersects the level set.
/// @param iRay ray represented in index space.
/// @param xyz if an intersection was found it is assigned the
/// intersection point in index space, otherwise it is unchanged.
/// @param iTime if an intersection was found it is assigned the time of the
/// intersection along the index ray.
bool intersectsIS(const RayType& iRay, Vec3Type& xyz, RealType &iTime) const
{
if (!mTester.setIndexRay(iRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getIndexPos(xyz);
iTime = mTester.getIndexTime();
return true;
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
bool intersectsWS(const RayType& wRay) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
return math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester);
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
/// @param wTime if an intersection was found it is assigned the time of the
/// intersection along the world ray.
bool intersectsWS(const RayType& wRay, RealType &wTime) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
wTime = mTester.getWorldTime();
return math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester);
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
/// @param world if an intersection was found it is assigned the
/// intersection point in world space, otherwise it is unchanged
bool intersectsWS(const RayType& wRay, Vec3Type& world) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getWorldPos(world);
return true;
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
/// @param world if an intersection was found it is assigned the
/// intersection point in world space, otherwise it is unchanged.
/// @param wTime if an intersection was found it is assigned the time of the
/// intersection along the world ray.
bool intersectsWS(const RayType& wRay, Vec3Type& world, RealType &wTime) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getWorldPos(world);
wTime = mTester.getWorldTime();
return true;
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
/// @param world if an intersection was found it is assigned the
/// intersection point in world space, otherwise it is unchanged.
/// @param normal if an intersection was found it is assigned the normal
/// of the level set surface in world space, otherwise it is unchanged.
bool intersectsWS(const RayType& wRay, Vec3Type& world, Vec3Type& normal) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getWorldPosAndNml(world, normal);
return true;
}
/// @brief Return @c true if the world-space ray intersects the level set.
/// @param wRay ray represented in world space.
/// @param world if an intersection was found it is assigned the
/// intersection point in world space, otherwise it is unchanged.
/// @param normal if an intersection was found it is assigned the normal
/// of the level set surface in world space, otherwise it is unchanged.
/// @param wTime if an intersection was found it is assigned the time of the
/// intersection along the world ray.
bool intersectsWS(const RayType& wRay, Vec3Type& world, Vec3Type& normal, RealType &wTime) const
{
if (!mTester.setWorldRay(wRay)) return false;//missed bbox
if (!math::LevelSetHDDA<TreeT, NodeLevel>::test(mTester)) return false;//missed level set
mTester.getWorldPosAndNml(world, normal);
wTime = mTester.getWorldTime();
return true;
}
private:
mutable SearchImplT mTester;
};// LevelSetRayIntersector
////////////////////////////////////// VolumeRayIntersector //////////////////////////////////////
/// @brief This class provides the public API for intersecting a ray
/// with a generic (e.g. density) volume.
/// @details Internally it performs the actual hierarchical tree node traversal.
/// @warning Use the (default) copy-constructor to make sure each
/// computational thread has their own instance of this class. This is
/// important since it contains a ValueAccessor that is
/// not thread-safe and a CoordBBox of the active voxels that should
/// not be re-computed for each thread. However copying is very efficient.
/// @par Example:
/// @code
/// // Create an instance for the master thread
/// VolumeRayIntersector inter(grid);
/// // For each additional thread use the copy constructor. This
/// // amortizes the overhead of computing the bbox of the active voxels!
/// VolumeRayIntersector inter2(inter);
/// // Before each ray-traversal set the index ray.
/// iter.setIndexRay(ray);
/// // or world ray
/// iter.setWorldRay(ray);
/// // Now you can begin the ray-marching using consecutive calls to VolumeRayIntersector::march
/// double t0=0, t1=0;// note the entry and exit times are with respect to the INDEX ray
/// while ( inter.march(t0, t1) ) {
/// // perform line-integration between t0 and t1
/// }}
/// @endcode
template<typename GridT,
int NodeLevel = GridT::TreeType::RootNodeType::ChildNodeType::LEVEL,
typename RayT = math::Ray<Real> >
class VolumeRayIntersector
{
public:
using GridType = GridT;
using RayType = RayT;
using RealType = typename RayT::RealType;
using RootType = typename GridT::TreeType::RootNodeType;
using TreeT = tree::Tree<typename RootType::template ValueConverter<bool>::Type>;
static_assert(NodeLevel >= 0 && NodeLevel < int(TreeT::DEPTH)-1, "NodeLevel out of range");
/// @brief Grid constructor
/// @param grid Generic grid to intersect rays against.
/// @param dilationCount The number of voxel dilations performed
/// on (a boolean copy of) the input grid. This allows the
/// intersector to account for the size of interpolation kernels
/// in client code.
/// @throw RuntimeError if the voxels of the grid are not uniform
/// or the grid is empty.
VolumeRayIntersector(const GridT& grid, int dilationCount = 0)
: mIsMaster(true)
, mTree(new TreeT(grid.tree(), false, TopologyCopy()))
, mGrid(&grid)
, mAccessor(*mTree)
{
if (!grid.hasUniformVoxels() ) {
OPENVDB_THROW(RuntimeError,
"VolumeRayIntersector only supports uniform voxels!");
}
if ( grid.empty() ) {
OPENVDB_THROW(RuntimeError, "LinearSearchImpl does not supports empty grids");
}
// Dilate active voxels to better account for the size of interpolation kernels
tools::dilateVoxels(*mTree, dilationCount);
mTree->root().evalActiveBoundingBox(mBBox, /*visit individual voxels*/false);
mBBox.max().offset(1);//padding so the bbox of a node becomes (origin,origin + node_dim)
}
/// @brief Grid and BBox constructor
/// @param grid Generic grid to intersect rays against.
/// @param bbox The axis-aligned bounding-box in the index space of the grid.
/// @warning It is assumed that bbox = (min, min + dim) where min denotes
/// to the smallest grid coordinates and dim are the integer length of the bbox.
/// @throw RuntimeError if the voxels of the grid are not uniform
/// or the grid is empty.
VolumeRayIntersector(const GridT& grid, const math::CoordBBox& bbox)
: mIsMaster(true)
, mTree(new TreeT(grid.tree(), false, TopologyCopy()))
, mGrid(&grid)
, mAccessor(*mTree)
, mBBox(bbox)
{
if (!grid.hasUniformVoxels() ) {
OPENVDB_THROW(RuntimeError,
"VolumeRayIntersector only supports uniform voxels!");
}
if ( grid.empty() ) {
OPENVDB_THROW(RuntimeError, "LinearSearchImpl does not supports empty grids");
}
}
/// @brief Shallow copy constructor
/// @warning This copy constructor creates shallow copies of data
/// members of the instance passed as the argument. For
/// performance reasons we are not using shared pointers (their
/// mutex-lock impairs multi-threading).
VolumeRayIntersector(const VolumeRayIntersector& other)
: mIsMaster(false)
, mTree(other.mTree)//shallow copy
, mGrid(other.mGrid)//shallow copy
, mAccessor(*mTree)//initialize new (vs deep copy)
, mRay(other.mRay)//deep copy
, mTmax(other.mTmax)//deep copy
, mBBox(other.mBBox)//deep copy
{
}
/// @brief Destructor
~VolumeRayIntersector() { if (mIsMaster) delete mTree; }
/// @brief Return @c false if the index ray misses the bbox of the grid.
/// @param iRay Ray represented in index space.
/// @warning Call this method (or setWorldRay) before the ray
/// traversal starts and use the return value to decide if further
/// marching is required.
inline bool setIndexRay(const RayT& iRay)
{
mRay = iRay;
const bool hit = mRay.clip(mBBox);
if (hit) mTmax = mRay.t1();
return hit;
}
/// @brief Return @c false if the world ray misses the bbox of the grid.
/// @param wRay Ray represented in world space.
/// @warning Call this method (or setIndexRay) before the ray
/// traversal starts and use the return value to decide if further
/// marching is required.
/// @details Since hit times are computed with respect to the ray
/// represented in index space of the current grid, it is
/// recommended that either the client code uses getIndexPos to
/// compute index position from hit times or alternatively keeps
/// an instance of the index ray and instead uses setIndexRay to
/// initialize the ray.
inline bool setWorldRay(const RayT& wRay)
{
return this->setIndexRay(wRay.worldToIndex(*mGrid));
}
inline typename RayT::TimeSpan march()
{
const typename RayT::TimeSpan t = mHDDA.march(mRay, mAccessor);
if (t.t1>0) mRay.setTimes(t.t1 + math::Delta<RealType>::value(), mTmax);
return t;
}
/// @brief Return @c true if the ray intersects active values,
/// i.e. either active voxels or tiles. Only when a hit is
/// detected are t0 and t1 updated with the corresponding entry
/// and exit times along the INDEX ray!
/// @note Note that t0 and t1 are only resolved at the node level
/// (e.g. a LeafNode with active voxels) as opposed to the individual
/// active voxels.
/// @param t0 If the return value > 0 this is the time of the
/// first hit of an active tile or leaf.
/// @param t1 If the return value > t0 this is the time of the
/// first hit (> t0) of an inactive tile or exit point of the
/// BBOX for the leaf nodes.
/// @warning t0 and t1 are computed with respect to the ray represented in
/// index space of the current grid, not world space!
inline bool march(RealType& t0, RealType& t1)
{
const typename RayT::TimeSpan t = this->march();
t.get(t0, t1);
return t.valid();
}
/// @brief Generates a list of hits along the ray.
///
/// @param list List of hits represented as time spans.
///
/// @note ListType is a list of RayType::TimeSpan and is required to
/// have the two methods: clear() and push_back(). Thus, it could
/// be std::vector<typename RayType::TimeSpan> or
/// std::deque<typename RayType::TimeSpan>.
template <typename ListType>
inline void hits(ListType& list)
{
mHDDA.hits(mRay, mAccessor, list);
}
/// @brief Return the floating-point index position along the
/// current index ray at the specified time.
inline Vec3R getIndexPos(RealType time) const { return mRay(time); }
/// @brief Return the floating-point world position along the
/// current index ray at the specified time.
inline Vec3R getWorldPos(RealType time) const { return mGrid->indexToWorld(mRay(time)); }
inline RealType getWorldTime(RealType time) const
{
return time*mGrid->transform().baseMap()->applyJacobian(mRay.dir()).length();
}
/// @brief Return a const reference to the input grid.
const GridT& grid() const { return *mGrid; }
/// @brief Return a const reference to the (potentially dilated)
/// bool tree used to accelerate the ray marching.
const TreeT& tree() const { return *mTree; }
/// @brief Return a const reference to the BBOX of the grid
const math::CoordBBox& bbox() const { return mBBox; }
/// @brief Print bbox, statistics, memory usage and other information.
/// @param os a stream to which to write textual information
/// @param verboseLevel 1: print bbox only; 2: include boolean tree
/// statistics; 3: include memory usage
void print(std::ostream& os = std::cout, int verboseLevel = 1)
{
if (verboseLevel>0) {
os << "BBox: " << mBBox << std::endl;
if (verboseLevel==2) {
mTree->print(os, 1);
} else if (verboseLevel>2) {
mTree->print(os, 2);
}
}
}
private:
using AccessorT = typename tree::ValueAccessor<const TreeT,/*IsSafe=*/false>;
const bool mIsMaster;
TreeT* mTree;
const GridT* mGrid;
AccessorT mAccessor;
RayT mRay;
RealType mTmax;
math::CoordBBox mBBox;
math::VolumeHDDA<TreeT, RayType, NodeLevel> mHDDA;
};// VolumeRayIntersector
//////////////////////////////////////// LinearSearchImpl ////////////////////////////////////////
/// @brief Implements linear iterative search for an iso-value of
/// the level set along the direction of the ray.
///
/// @note Since this class is used internally in
/// LevelSetRayIntersector (define above) and LevelSetHDDA (defined below)
/// client code should never interact directly with its API. This also
/// explains why we are not concerned with the fact that several of
/// its methods are unsafe to call unless roots were already detected.
///
/// @details It is approximate due to the limited number of iterations
/// which can can be defined with a template parameter. However the default value
/// has proven surprisingly accurate and fast. In fact more iterations
/// are not guaranteed to give significantly better results.
///
/// @warning Since the root-searching algorithm is approximate
/// (first-order) it is possible to miss intersections if the
/// iso-value is too close to the inside or outside of the narrow
/// band (typically a distance less than a voxel unit).
///
/// @warning Since this class internally stores a ValueAccessor it is NOT thread-safe,
/// so make sure to give each thread its own instance. This of course also means that
/// the cost of allocating an instance should (if possible) be amortized over
/// as many ray intersections as possible.
template<typename GridT, int Iterations, typename RealT>
class LinearSearchImpl
{
public:
using RayT = math::Ray<RealT>;
using VecT = math::Vec3<RealT>;
using ValueT = typename GridT::ValueType;
using AccessorT = typename GridT::ConstAccessor;
using StencilT = math::BoxStencil<GridT>;
/// @brief Constructor from a grid.
/// @throw RunTimeError if the grid is empty.
/// @throw ValueError if the isoValue is not inside the narrow-band.
LinearSearchImpl(const GridT& grid, const ValueT& isoValue = zeroVal<ValueT>())
: mStencil(grid),
mIsoValue(isoValue),
mMinValue(isoValue - ValueT(2 * grid.voxelSize()[0])),
mMaxValue(isoValue + ValueT(2 * grid.voxelSize()[0]))
{
if ( grid.empty() ) {
OPENVDB_THROW(RuntimeError, "LinearSearchImpl does not supports empty grids");
}
if (mIsoValue<= -grid.background() ||
mIsoValue>= grid.background() ){
OPENVDB_THROW(ValueError, "The iso-value must be inside the narrow-band!");
}
grid.tree().root().evalActiveBoundingBox(mBBox, /*visit individual voxels*/false);
}
/// @brief Return the iso-value used for ray-intersections
const ValueT& getIsoValue() const { return mIsoValue; }
/// @brief Return @c false if the ray misses the bbox of the grid.
/// @param iRay Ray represented in index space.
/// @warning Call this method before the ray traversal starts.
inline bool setIndexRay(const RayT& iRay)
{
mRay = iRay;
return mRay.clip(mBBox);//did it hit the bbox
}
/// @brief Return @c false if the ray misses the bbox of the grid.
/// @param wRay Ray represented in world space.
/// @warning Call this method before the ray traversal starts.
inline bool setWorldRay(const RayT& wRay)
{
mRay = wRay.worldToIndex(mStencil.grid());
return mRay.clip(mBBox);//did it hit the bbox
}
/// @brief Get the intersection point in index space.
/// @param xyz The position in index space of the intersection.
inline void getIndexPos(VecT& xyz) const { xyz = mRay(mTime); }
/// @brief Get the intersection point in world space.
/// @param xyz The position in world space of the intersection.
inline void getWorldPos(VecT& xyz) const { xyz = mStencil.grid().indexToWorld(mRay(mTime)); }
/// @brief Get the intersection point and normal in world space
/// @param xyz The position in world space of the intersection.
/// @param nml The surface normal in world space of the intersection.
inline void getWorldPosAndNml(VecT& xyz, VecT& nml)
{
this->getIndexPos(xyz);
mStencil.moveTo(xyz);
nml = mStencil.gradient(xyz);
nml.normalize();
xyz = mStencil.grid().indexToWorld(xyz);
}
/// @brief Return the time of intersection along the index ray.
inline RealT getIndexTime() const { return mTime; }
/// @brief Return the time of intersection along the world ray.
inline RealT getWorldTime() const
{
return mTime*mStencil.grid().transform().baseMap()->applyJacobian(mRay.dir()).length();
}
private:
/// @brief Initiate the local voxel intersection test.
/// @warning Make sure to call this method before the local voxel intersection test.
inline void init(RealT t0)
{
mT[0] = t0;
mV[0] = static_cast<ValueT>(this->interpValue(t0));
}
inline void setRange(RealT t0, RealT t1) { mRay.setTimes(t0, t1); }
/// @brief Return a const reference to the ray.
inline const RayT& ray() const { return mRay; }
/// @brief Return true if a node of the specified type exists at ijk.
template <typename NodeT>
inline bool hasNode(const Coord& ijk)
{
return mStencil.accessor().template probeConstNode<NodeT>(ijk) != nullptr;
}
/// @brief Return @c true if an intersection is detected.
/// @param ijk Grid coordinate of the node origin or voxel being tested.
/// @param time Time along the index ray being tested.
/// @warning Only if an intersection is detected is it safe to
/// call getIndexPos, getWorldPos and getWorldPosAndNml!
inline bool operator()(const Coord& ijk, RealT time)
{
ValueT V;
if (mStencil.accessor().probeValue(ijk, V) &&//within narrow band
V>mMinValue && V<mMaxValue) {// and close to iso-value?
mT[1] = time;
mV[1] = static_cast<ValueT>(this->interpValue(time));
if (math::ZeroCrossing(mV[0], mV[1])) {
mTime = this->interpTime();
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
for (int n=0; Iterations>0 && n<Iterations; ++n) {//resolved at compile-time
V = static_cast<ValueT>(this->interpValue(mTime));
const int m = math::ZeroCrossing(mV[0], V) ? 1 : 0;
mV[m] = V;
mT[m] = mTime;
mTime = this->interpTime();
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
return true;
}
mT[0] = mT[1];
mV[0] = mV[1];
}
return false;
}
inline RealT interpTime()
{
assert( math::isApproxLarger(mT[1], mT[0], RealT(1e-6) ) );
return mT[0]+(mT[1]-mT[0])*mV[0]/(mV[0]-mV[1]);
}
inline RealT interpValue(RealT time)
{
const VecT pos = mRay(time);
mStencil.moveTo(pos);
return mStencil.interpolation(pos) - mIsoValue;
}
template<typename, int> friend struct math::LevelSetHDDA;
RayT mRay;
StencilT mStencil;
RealT mTime;//time of intersection
ValueT mV[2];
RealT mT[2];
const ValueT mIsoValue, mMinValue, mMaxValue;
math::CoordBBox mBBox;
};// LinearSearchImpl
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_RAYINTERSECTOR_HAS_BEEN_INCLUDED
| 28,299 | C | 41.113095 | 100 | 0.655536 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ValueTransformer.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file ValueTransformer.h
///
/// @author Peter Cucka
///
/// tools::foreach() and tools::transformValues() transform the values in a grid
/// by iterating over the grid with a user-supplied iterator and applying a
/// user-supplied functor at each step of the iteration. With tools::foreach(),
/// the transformation is done in-place on the input grid, whereas with
/// tools::transformValues(), transformed values are written to an output grid
/// (which can, for example, have a different value type than the input grid).
/// Both functions can optionally transform multiple values of the grid in parallel.
///
/// tools::accumulate() can be used to accumulate the results of applying a functor
/// at each step of a grid iteration. (The functor is responsible for storing and
/// updating intermediate results.) When the iteration is done serially the behavior is
/// the same as with tools::foreach(), but when multiple values are processed in parallel,
/// an additional step is performed: when any two threads finish processing,
/// @c op.join(otherOp) is called on one thread's functor to allow it to coalesce
/// its intermediate result with the other thread's.
///
/// Finally, tools::setValueOnMin(), tools::setValueOnMax(), tools::setValueOnSum()
/// and tools::setValueOnMult() are wrappers around Tree::modifyValue() (or
/// ValueAccessor::modifyValue()) for some commmon in-place operations.
/// These are typically significantly faster than calling getValue() followed by setValue().
#ifndef OPENVDB_TOOLS_VALUETRANSFORMER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_VALUETRANSFORMER_HAS_BEEN_INCLUDED
#include <algorithm> // for std::min(), std::max()
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// Iterate over a grid and at each step call @c op(iter).
/// @param iter an iterator over a grid or its tree (@c Grid::ValueOnCIter,
/// @c Tree::NodeIter, etc.)
/// @param op a functor of the form <tt>void op(const IterT&)</tt>, where @c IterT is
/// the type of @a iter
/// @param threaded if true, transform multiple values of the grid in parallel
/// @param shareOp if true and @a threaded is true, all threads use the same functor;
/// otherwise, each thread gets its own copy of the @e original functor
///
/// @par Example:
/// Multiply all values (both set and unset) of a scalar, floating-point grid by two.
/// @code
/// struct Local {
/// static inline void op(const FloatGrid::ValueAllIter& iter) {
/// iter.setValue(*iter * 2);
/// }
/// };
/// FloatGrid grid = ...;
/// tools::foreach(grid.beginValueAll(), Local::op);
/// @endcode
///
/// @par Example:
/// Rotate all active vectors of a vector grid by 45 degrees about the y axis.
/// @code
/// namespace {
/// struct MatMul {
/// math::Mat3s M;
/// MatMul(const math::Mat3s& mat): M(mat) {}
/// inline void operator()(const VectorGrid::ValueOnIter& iter) const {
/// iter.setValue(M.transform(*iter));
/// }
/// };
/// }
/// {
/// VectorGrid grid = ...;
/// tools::foreach(grid.beginValueOn(),
/// MatMul(math::rotation<math::Mat3s>(math::Y, M_PI_4)));
/// }
/// @endcode
///
/// @note For more complex operations that require finer control over threading,
/// consider using @c tbb::parallel_for() or @c tbb::parallel_reduce() in conjunction
/// with a tree::IteratorRange that wraps a grid or tree iterator.
template<typename IterT, typename XformOp>
inline void foreach(const IterT& iter, XformOp& op,
bool threaded = true, bool shareOp = true);
template<typename IterT, typename XformOp>
inline void foreach(const IterT& iter, const XformOp& op,
bool threaded = true, bool shareOp = true);
/// Iterate over a grid and at each step call <tt>op(iter, accessor)</tt> to
/// populate (via the accessor) the given output grid, whose @c ValueType
/// need not be the same as the input grid's.
/// @param inIter a non-<tt>const</tt> or (preferably) @c const iterator over an
/// input grid or its tree (@c Grid::ValueOnCIter, @c Tree::NodeIter, etc.)
/// @param outGrid an empty grid to be populated
/// @param op a functor of the form
/// <tt>void op(const InIterT&, OutGridT::ValueAccessor&)</tt>,
/// where @c InIterT is the type of @a inIter
/// @param threaded if true, transform multiple values of the input grid in parallel
/// @param shareOp if true and @a threaded is true, all threads use the same functor;
/// otherwise, each thread gets its own copy of the @e original functor
/// @param merge how to merge intermediate results from multiple threads (see Types.h)
///
/// @par Example:
/// Populate a scalar floating-point grid with the lengths of the vectors from all
/// active voxels of a vector-valued input grid.
/// @code
/// struct Local {
/// static void op(
/// const Vec3fGrid::ValueOnCIter& iter,
/// FloatGrid::ValueAccessor& accessor)
/// {
/// if (iter.isVoxelValue()) { // set a single voxel
/// accessor.setValue(iter.getCoord(), iter->length());
/// } else { // fill an entire tile
/// CoordBBox bbox;
/// iter.getBoundingBox(bbox);
/// accessor.getTree()->fill(bbox, iter->length());
/// }
/// }
/// };
/// Vec3fGrid inGrid = ...;
/// FloatGrid outGrid;
/// tools::transformValues(inGrid.cbeginValueOn(), outGrid, Local::op);
/// @endcode
///
/// @note For more complex operations that require finer control over threading,
/// consider using @c tbb::parallel_for() or @c tbb::parallel_reduce() in conjunction
/// with a tree::IteratorRange that wraps a grid or tree iterator.
template<typename InIterT, typename OutGridT, typename XformOp>
inline void transformValues(const InIterT& inIter, OutGridT& outGrid,
XformOp& op, bool threaded = true, bool shareOp = true,
MergePolicy merge = MERGE_ACTIVE_STATES);
#ifndef _MSC_VER
template<typename InIterT, typename OutGridT, typename XformOp>
inline void transformValues(const InIterT& inIter, OutGridT& outGrid,
const XformOp& op, bool threaded = true, bool shareOp = true,
MergePolicy merge = MERGE_ACTIVE_STATES);
#endif
/// Iterate over a grid and at each step call @c op(iter). If threading is enabled,
/// call @c op.join(otherOp) to accumulate intermediate results from pairs of threads.
/// @param iter an iterator over a grid or its tree (@c Grid::ValueOnCIter,
/// @c Tree::NodeIter, etc.)
/// @param op a functor with a join method of the form <tt>void join(XformOp&)</tt>
/// and a call method of the form <tt>void op(const IterT&)</tt>,
/// where @c IterT is the type of @a iter
/// @param threaded if true, transform multiple values of the grid in parallel
/// @note If @a threaded is true, each thread gets its own copy of the @e original functor.
/// The order in which threads are joined is unspecified.
/// @note If @a threaded is false, the join method is never called.
///
/// @par Example:
/// Compute the average of the active values of a scalar, floating-point grid
/// using the math::Stats class.
/// @code
/// namespace {
/// struct Average {
/// math::Stats stats;
///
/// // Accumulate voxel and tile values into this functor's Stats object.
/// inline void operator()(const FloatGrid::ValueOnCIter& iter) {
/// if (iter.isVoxelValue()) stats.add(*iter);
/// else stats.add(*iter, iter.getVoxelCount());
/// }
///
/// // Accumulate another functor's Stats object into this functor's.
/// inline void join(Average& other) { stats.add(other.stats); }
///
/// // Return the cumulative result.
/// inline double average() const { return stats.mean(); }
/// };
/// }
/// {
/// FloatGrid grid = ...;
/// Average op;
/// tools::accumulate(grid.cbeginValueOn(), op);
/// double average = op.average();
/// }
/// @endcode
///
/// @note For more complex operations that require finer control over threading,
/// consider using @c tbb::parallel_for() or @c tbb::parallel_reduce() in conjunction
/// with a tree::IteratorRange that wraps a grid or tree iterator.
template<typename IterT, typename XformOp>
inline void accumulate(const IterT& iter, XformOp& op, bool threaded = true);
/// @brief Set the value of the voxel at the given coordinates in @a tree to
/// the minimum of its current value and @a value, and mark the voxel as active.
/// @details This is typically significantly faster than calling getValue()
/// followed by setValueOn().
/// @note @a TreeT can be either a Tree or a ValueAccessor.
template<typename TreeT>
inline void setValueOnMin(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value);
/// @brief Set the value of the voxel at the given coordinates in @a tree to
/// the maximum of its current value and @a value, and mark the voxel as active.
/// @details This is typically significantly faster than calling getValue()
/// followed by setValueOn().
/// @note @a TreeT can be either a Tree or a ValueAccessor.
template<typename TreeT>
inline void setValueOnMax(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value);
/// @brief Set the value of the voxel at the given coordinates in @a tree to
/// the sum of its current value and @a value, and mark the voxel as active.
/// @details This is typically significantly faster than calling getValue()
/// followed by setValueOn().
/// @note @a TreeT can be either a Tree or a ValueAccessor.
template<typename TreeT>
inline void setValueOnSum(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value);
/// @brief Set the value of the voxel at the given coordinates in @a tree to
/// the product of its current value and @a value, and mark the voxel as active.
/// @details This is typically significantly faster than calling getValue()
/// followed by setValueOn().
/// @note @a TreeT can be either a Tree or a ValueAccessor.
template<typename TreeT>
inline void setValueOnMult(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value);
////////////////////////////////////////
namespace valxform {
template<typename ValueType>
struct MinOp {
const ValueType val;
MinOp(const ValueType& v): val(v) {}
inline void operator()(ValueType& v) const { v = std::min<ValueType>(v, val); }
};
template<typename ValueType>
struct MaxOp {
const ValueType val;
MaxOp(const ValueType& v): val(v) {}
inline void operator()(ValueType& v) const { v = std::max<ValueType>(v, val); }
};
template<typename ValueType>
struct SumOp {
const ValueType val;
SumOp(const ValueType& v): val(v) {}
inline void operator()(ValueType& v) const { v += val; }
};
template<>
struct SumOp<bool> {
using ValueType = bool;
const ValueType val;
SumOp(const ValueType& v): val(v) {}
inline void operator()(ValueType& v) const { v = v || val; }
};
template<typename ValueType>
struct MultOp {
const ValueType val;
MultOp(const ValueType& v): val(v) {}
inline void operator()(ValueType& v) const { v *= val; }
};
}
template<typename TreeT>
inline void
setValueOnMin(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value)
{
tree.modifyValue(xyz, valxform::MinOp<typename TreeT::ValueType>(value));
}
template<typename TreeT>
inline void
setValueOnMax(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value)
{
tree.modifyValue(xyz, valxform::MaxOp<typename TreeT::ValueType>(value));
}
template<typename TreeT>
inline void
setValueOnSum(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value)
{
tree.modifyValue(xyz, valxform::SumOp<typename TreeT::ValueType>(value));
}
template<typename TreeT>
inline void
setValueOnMult(TreeT& tree, const Coord& xyz, const typename TreeT::ValueType& value)
{
tree.modifyValue(xyz, valxform::MultOp<typename TreeT::ValueType>(value));
}
////////////////////////////////////////
namespace valxform {
template<typename IterT, typename OpT>
class SharedOpApplier
{
public:
using IterRange = typename tree::IteratorRange<IterT>;
SharedOpApplier(const IterT& iter, OpT& op): mIter(iter), mOp(op) {}
void process(bool threaded = true)
{
IterRange range(mIter);
if (threaded) {
tbb::parallel_for(range, *this);
} else {
(*this)(range);
}
}
void operator()(IterRange& r) const { for ( ; r; ++r) mOp(r.iterator()); }
private:
IterT mIter;
OpT& mOp;
};
template<typename IterT, typename OpT>
class CopyableOpApplier
{
public:
using IterRange = typename tree::IteratorRange<IterT>;
CopyableOpApplier(const IterT& iter, const OpT& op): mIter(iter), mOp(op), mOrigOp(&op) {}
// When splitting this task, give the subtask a copy of the original functor,
// not of this task's functor, which might have been modified arbitrarily.
CopyableOpApplier(const CopyableOpApplier& other):
mIter(other.mIter), mOp(*other.mOrigOp), mOrigOp(other.mOrigOp) {}
void process(bool threaded = true)
{
IterRange range(mIter);
if (threaded) {
tbb::parallel_for(range, *this);
} else {
(*this)(range);
}
}
void operator()(IterRange& r) const { for ( ; r; ++r) mOp(r.iterator()); }
private:
IterT mIter;
OpT mOp; // copy of original functor
OpT const * const mOrigOp; // pointer to original functor
};
} // namespace valxform
template<typename IterT, typename XformOp>
inline void
foreach(const IterT& iter, XformOp& op, bool threaded, bool shared)
{
if (shared) {
typename valxform::SharedOpApplier<IterT, XformOp> proc(iter, op);
proc.process(threaded);
} else {
using Processor = typename valxform::CopyableOpApplier<IterT, XformOp>;
Processor proc(iter, op);
proc.process(threaded);
}
}
template<typename IterT, typename XformOp>
inline void
foreach(const IterT& iter, const XformOp& op, bool threaded, bool /*shared*/)
{
// Const ops are shared across threads, not copied.
typename valxform::SharedOpApplier<IterT, const XformOp> proc(iter, op);
proc.process(threaded);
}
////////////////////////////////////////
namespace valxform {
template<typename InIterT, typename OutTreeT, typename OpT>
class SharedOpTransformer
{
public:
using InTreeT = typename InIterT::TreeT;
using IterRange = typename tree::IteratorRange<InIterT>;
using OutValueT = typename OutTreeT::ValueType;
SharedOpTransformer(const InIterT& inIter, OutTreeT& outTree, OpT& op, MergePolicy merge):
mIsRoot(true),
mInputIter(inIter),
mInputTree(inIter.getTree()),
mOutputTree(&outTree),
mOp(op),
mMergePolicy(merge)
{
if (static_cast<const void*>(mInputTree) == static_cast<void*>(mOutputTree)) {
OPENVDB_LOG_INFO("use tools::foreach(), not transformValues(),"
" to transform a grid in place");
}
}
/// Splitting constructor
SharedOpTransformer(SharedOpTransformer& other, tbb::split):
mIsRoot(false),
mInputIter(other.mInputIter),
mInputTree(other.mInputTree),
mOutputTree(new OutTreeT(zeroVal<OutValueT>())),
mOp(other.mOp),
mMergePolicy(other.mMergePolicy)
{}
~SharedOpTransformer()
{
// Delete the output tree only if it was allocated locally
// (the top-level output tree was supplied by the caller).
if (!mIsRoot) {
delete mOutputTree;
mOutputTree = nullptr;
}
}
void process(bool threaded = true)
{
if (!mInputTree || !mOutputTree) return;
IterRange range(mInputIter);
// Independently transform elements in the iterator range,
// either in parallel or serially.
if (threaded) {
tbb::parallel_reduce(range, *this);
} else {
(*this)(range);
}
}
/// Transform each element in the given range.
void operator()(IterRange& range) const
{
if (!mOutputTree) return;
typename tree::ValueAccessor<OutTreeT> outAccessor(*mOutputTree);
for ( ; range; ++range) {
mOp(range.iterator(), outAccessor);
}
}
void join(const SharedOpTransformer& other)
{
if (mOutputTree && other.mOutputTree) {
mOutputTree->merge(*other.mOutputTree, mMergePolicy);
}
}
private:
bool mIsRoot;
InIterT mInputIter;
const InTreeT* mInputTree;
OutTreeT* mOutputTree;
OpT& mOp;
MergePolicy mMergePolicy;
}; // class SharedOpTransformer
template<typename InIterT, typename OutTreeT, typename OpT>
class CopyableOpTransformer
{
public:
using InTreeT = typename InIterT::TreeT;
using IterRange = typename tree::IteratorRange<InIterT>;
using OutValueT = typename OutTreeT::ValueType;
CopyableOpTransformer(const InIterT& inIter, OutTreeT& outTree,
const OpT& op, MergePolicy merge):
mIsRoot(true),
mInputIter(inIter),
mInputTree(inIter.getTree()),
mOutputTree(&outTree),
mOp(op),
mOrigOp(&op),
mMergePolicy(merge)
{
if (static_cast<const void*>(mInputTree) == static_cast<void*>(mOutputTree)) {
OPENVDB_LOG_INFO("use tools::foreach(), not transformValues(),"
" to transform a grid in place");
}
}
// When splitting this task, give the subtask a copy of the original functor,
// not of this task's functor, which might have been modified arbitrarily.
CopyableOpTransformer(CopyableOpTransformer& other, tbb::split):
mIsRoot(false),
mInputIter(other.mInputIter),
mInputTree(other.mInputTree),
mOutputTree(new OutTreeT(zeroVal<OutValueT>())),
mOp(*other.mOrigOp),
mOrigOp(other.mOrigOp),
mMergePolicy(other.mMergePolicy)
{}
~CopyableOpTransformer()
{
// Delete the output tree only if it was allocated locally
// (the top-level output tree was supplied by the caller).
if (!mIsRoot) {
delete mOutputTree;
mOutputTree = nullptr;
}
}
void process(bool threaded = true)
{
if (!mInputTree || !mOutputTree) return;
IterRange range(mInputIter);
// Independently transform elements in the iterator range,
// either in parallel or serially.
if (threaded) {
tbb::parallel_reduce(range, *this);
} else {
(*this)(range);
}
}
/// Transform each element in the given range.
void operator()(IterRange& range)
{
if (!mOutputTree) return;
typename tree::ValueAccessor<OutTreeT> outAccessor(*mOutputTree);
for ( ; range; ++range) {
mOp(range.iterator(), outAccessor);
}
}
void join(const CopyableOpTransformer& other)
{
if (mOutputTree && other.mOutputTree) {
mOutputTree->merge(*other.mOutputTree, mMergePolicy);
}
}
private:
bool mIsRoot;
InIterT mInputIter;
const InTreeT* mInputTree;
OutTreeT* mOutputTree;
OpT mOp; // copy of original functor
OpT const * const mOrigOp; // pointer to original functor
MergePolicy mMergePolicy;
}; // class CopyableOpTransformer
} // namespace valxform
////////////////////////////////////////
template<typename InIterT, typename OutGridT, typename XformOp>
inline void
transformValues(const InIterT& inIter, OutGridT& outGrid, XformOp& op,
bool threaded, bool shared, MergePolicy merge)
{
using Adapter = TreeAdapter<OutGridT>;
using OutTreeT = typename Adapter::TreeType;
if (shared) {
using Processor = typename valxform::SharedOpTransformer<InIterT, OutTreeT, XformOp>;
Processor proc(inIter, Adapter::tree(outGrid), op, merge);
proc.process(threaded);
} else {
using Processor = typename valxform::CopyableOpTransformer<InIterT, OutTreeT, XformOp>;
Processor proc(inIter, Adapter::tree(outGrid), op, merge);
proc.process(threaded);
}
}
#ifndef _MSC_VER
template<typename InIterT, typename OutGridT, typename XformOp>
inline void
transformValues(const InIterT& inIter, OutGridT& outGrid, const XformOp& op,
bool threaded, bool /*share*/, MergePolicy merge)
{
using Adapter = TreeAdapter<OutGridT>;
using OutTreeT = typename Adapter::TreeType;
// Const ops are shared across threads, not copied.
using Processor = typename valxform::SharedOpTransformer<InIterT, OutTreeT, const XformOp>;
Processor proc(inIter, Adapter::tree(outGrid), op, merge);
proc.process(threaded);
}
#endif
////////////////////////////////////////
namespace valxform {
template<typename IterT, typename OpT>
class OpAccumulator
{
public:
using IterRange = typename tree::IteratorRange<IterT>;
// The root task makes a const copy of the original functor (mOrigOp)
// and keeps a pointer to the original functor (mOp), which it then modifies.
// Each subtask keeps a const pointer to the root task's mOrigOp
// and makes and then modifies a non-const copy (mOp) of it.
OpAccumulator(const IterT& iter, OpT& op):
mIsRoot(true),
mIter(iter),
mOp(&op),
mOrigOp(new OpT(op))
{}
// When splitting this task, give the subtask a copy of the original functor,
// not of this task's functor, which might have been modified arbitrarily.
OpAccumulator(OpAccumulator& other, tbb::split):
mIsRoot(false),
mIter(other.mIter),
mOp(new OpT(*other.mOrigOp)),
mOrigOp(other.mOrigOp)
{}
~OpAccumulator() { if (mIsRoot) delete mOrigOp; else delete mOp; }
void process(bool threaded = true)
{
IterRange range(mIter);
if (threaded) {
tbb::parallel_reduce(range, *this);
} else {
(*this)(range);
}
}
void operator()(IterRange& r) { for ( ; r; ++r) (*mOp)(r.iterator()); }
void join(OpAccumulator& other) { mOp->join(*other.mOp); }
private:
const bool mIsRoot;
const IterT mIter;
OpT* mOp; // pointer to original functor, which might get modified
OpT const * const mOrigOp; // const copy of original functor
}; // class OpAccumulator
} // namespace valxform
////////////////////////////////////////
template<typename IterT, typename XformOp>
inline void
accumulate(const IterT& iter, XformOp& op, bool threaded)
{
typename valxform::OpAccumulator<IterT, XformOp> proc(iter, op);
proc.process(threaded);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_VALUETRANSFORMER_HAS_BEEN_INCLUDED
| 23,178 | C | 32.78863 | 98 | 0.654759 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Diagnostics.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
///
/// @file Diagnostics.h
///
/// @author Ken Museth
///
/// @brief Various diagnostic tools to identify potential issues with
/// for example narrow-band level sets or fog volumes
///
#ifndef OPENVDB_TOOLS_DIAGNOSTICS_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_DIAGNOSTICS_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Vec3.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/math/Operators.h>
#include <openvdb/tree/LeafManager.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
#include <cmath> // for std::isnan(), std::isfinite()
#include <set>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
////////////////////////////////////////////////////////////////////////////////
/// @brief Perform checks on a grid to see if it is a valid symmetric,
/// narrow-band level set.
///
/// @param grid Grid to be checked
/// @param number Number of the checks to be performed (see below)
/// @return string with a message indicating the nature of the
/// issue. If no issue is detected the return string is empty.
///
/// @details @a number refers to the following ordered list of
/// checks - always starting from the top.
/// Fast checks
/// 1: value type is floating point
/// 2: has level set class type
/// 3: has uniform scale
/// 4: background value is positive and n*dx
///
/// Slower checks
/// 5: no active tiles
/// 6: all the values are finite, i.e not NaN or infinite
/// 7: active values in range between +-background
/// 8: abs of inactive values = background, i.e. assuming a symmetric
/// narrow band!
///
/// Relatively slow check (however multithreaded)
/// 9: norm gradient is close to one, i.e. satisfied the Eikonal equation.
template<class GridType>
std::string
checkLevelSet(const GridType& grid, size_t number=9);
////////////////////////////////////////////////////////////////////////////////
/// @brief Perform checks on a grid to see if it is a valid fog volume.
///
/// @param grid Grid to be checked
/// @param number Number of the checks to be performed (see below)
/// @return string with a message indicating the nature of the
/// issue. If no issue is detected the return string is empty.
///
/// @details @a number refers to the following ordered list of
/// checks - always starting from the top.
/// Fast checks
/// 1: value type is floating point
/// 2: has FOG volume class type
/// 3: background value is zero
///
/// Slower checks
/// 4: all the values are finite, i.e not NaN or infinite
/// 5: inactive values are zero
/// 6: active values are in the range [0,1]
template<class GridType>
std::string
checkFogVolume(const GridType& grid, size_t number=6);
////////////////////////////////////////////////////////////////////////////////
/// @brief Threaded method to find unique inactive values.
///
/// @param grid A VDB volume.
/// @param values List of unique inactive values, returned by this method.
/// @param numValues Number of values to look for.
/// @return @c false if the @a grid has more than @a numValues inactive values.
template<class GridType>
bool
uniqueInactiveValues(const GridType& grid,
std::vector<typename GridType::ValueType>& values, size_t numValues);
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks NaN values
template<typename GridT, typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckNan
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<
typename TreeIterT::NodeT, typename TreeIterT::ValueIterT>::template
NodeConverter<typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Default constructor
CheckNan() {}
/// Return true if the scalar value is NaN
inline bool operator()(const ElementType& v) const { return std::isnan(v); }
/// @brief This allows for vector values to be checked component-wise
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const
{
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;//should unroll
return false;
}
/// @brief Return true if the tile at the iterator location is NaN
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the voxel at the iterator location is NaN
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const { return "NaN"; }
};// CheckNan
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks for infinite values, e.g. 1/0 or -1/0
template <typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckInf
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Default constructor
CheckInf() {}
/// Return true if the value is infinite
inline bool operator()(const ElementType& v) const { return std::isinf(v); }
/// Return true if any of the vector components are infinite.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const
{
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the tile at the iterator location is infinite
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the tile at the iterator location is infinite
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const { return "infinite"; }
};// CheckInf
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks for both NaN and inf values, i.e. any value that is not finite.
template <typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckFinite
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Default constructor
CheckFinite() {}
/// Return true if the value is NOT finite, i.e. it's NaN or infinite
inline bool operator()(const ElementType& v) const { return !std::isfinite(v); }
/// Return true if any of the vector components are NaN or infinite.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const {
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the tile at the iterator location is NaN or infinite.
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the tile at the iterator location is NaN or infinite.
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const { return "not finite"; }
};// CheckFinite
////////////////////////////////////////////////////////////////////////////////
/// @brief Check that the magnitude of a value, a, is close to a fixed
/// magnitude, b, given a fixed tolerance c. That is | |a| - |b| | <= c
template <typename GridT,
typename TreeIterT = typename GridT::ValueOffCIter>
struct CheckMagnitude
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Default constructor
CheckMagnitude(const ElementType& a,
const ElementType& t = math::Tolerance<ElementType>::value())
: absVal(math::Abs(a)), tolVal(math::Abs(t))
{
}
/// Return true if the magnitude of the value is not approximately
/// equal to totVal.
inline bool operator()(const ElementType& v) const
{
return math::Abs(math::Abs(v) - absVal) > tolVal;
}
/// Return true if any of the vector components are infinite.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const
{
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the tile at the iterator location is infinite
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the tile at the iterator location is infinite
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "not equal to +/-"<<absVal<<" with a tolerance of "<<tolVal;
return ss.str();
}
const ElementType absVal, tolVal;
};// CheckMagnitude
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks a value against a range
template <typename GridT,
bool MinInclusive = true,//is min part of the range?
bool MaxInclusive = true,//is max part of the range?
typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckRange
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
// @brief Constructor taking a range to be tested against.
CheckRange(const ElementType& _min, const ElementType& _max) : minVal(_min), maxVal(_max)
{
if (minVal > maxVal) {
OPENVDB_THROW(ValueError, "CheckRange: Invalid range (min > max)");
}
}
/// Return true if the value is smaller than min or larger than max.
inline bool operator()(const ElementType& v) const
{
return (MinInclusive ? v<minVal : v<=minVal) ||
(MaxInclusive ? v>maxVal : v>=maxVal);
}
/// Return true if any of the vector components are out of range.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const {
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the voxel at the iterator location is out of range.
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the tile at the iterator location is out of range.
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "outside the value range " << (MinInclusive ? "[" : "]")
<< minVal << "," << maxVal << (MaxInclusive ? "]" : "[");
return ss.str();
}
const ElementType minVal, maxVal;
};// CheckRange
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks a value against a minimum
template <typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckMin
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
// @brief Constructor taking a minimum to be tested against.
CheckMin(const ElementType& _min) : minVal(_min) {}
/// Return true if the value is smaller than min.
inline bool operator()(const ElementType& v) const { return v<minVal; }
/// Return true if any of the vector components are smaller than min.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const {
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the voxel at the iterator location is smaller than min.
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the tile at the iterator location is smaller than min.
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "smaller than "<<minVal;
return ss.str();
}
const ElementType minVal;
};// CheckMin
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks a value against a maximum
template <typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter>
struct CheckMax
{
using ElementType = typename VecTraits<typename GridT::ValueType>::ElementType;
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Constructor taking a maximum to be tested against.
CheckMax(const ElementType& _max) : maxVal(_max) {}
/// Return true if the value is larger than max.
inline bool operator()(const ElementType& v) const { return v>maxVal; }
/// Return true if any of the vector components are larger than max.
template<typename T>
inline typename std::enable_if<VecTraits<T>::IsVec, bool>::type
operator()(const T& v) const {
for (int i=0; i<VecTraits<T>::Size; ++i) if ((*this)(v[i])) return true;
return false;
}
/// @brief Return true if the tile at the iterator location is larger than max.
bool operator()(const TreeIterT &iter) const { return (*this)(*iter); }
/// @brief Return true if the voxel at the iterator location is larger than max.
bool operator()(const VoxelIterT &iter) const { return (*this)(*iter); }
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "larger than "<<maxVal;
return ss.str();
}
const ElementType maxVal;
};// CheckMax
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks the norm of the gradient against a range, i.e.,
/// |∇Φ| ∈ [min, max]
///
/// @note Internally the test is performed as
/// |∇Φ|² ∈ [min², max²] for optimization reasons.
template<typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter,
math::BiasedGradientScheme GradScheme = math::FIRST_BIAS>//math::WENO5_BIAS>
struct CheckNormGrad
{
using ValueType = typename GridT::ValueType;
static_assert(std::is_floating_point<ValueType>::value,
"openvdb::tools::CheckNormGrad requires a scalar, floating-point grid");
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
using AccT = typename GridT::ConstAccessor;
/// @brief Constructor taking a grid and a range to be tested against.
CheckNormGrad(const GridT& grid, const ValueType& _min, const ValueType& _max)
: acc(grid.getConstAccessor())
, invdx2(ValueType(1.0/math::Pow2(grid.voxelSize()[0])))
, minVal2(_min*_min)
, maxVal2(_max*_max)
{
if ( !grid.hasUniformVoxels() ) {
OPENVDB_THROW(ValueError, "CheckNormGrad: The transform must have uniform scale");
}
if (_min > _max) {
OPENVDB_THROW(ValueError, "CheckNormGrad: Invalid range (min > max)");
}
}
CheckNormGrad(const CheckNormGrad& other)
: acc(other.acc.tree())
, invdx2(other.invdx2)
, minVal2(other.minVal2)
, maxVal2(other.maxVal2)
{
}
/// Return true if the value is smaller than min or larger than max.
inline bool operator()(const ValueType& v) const { return v<minVal2 || v>maxVal2; }
/// @brief Return true if zero is outside the range.
/// @note We assume that the norm of the gradient of a tile is always zero.
inline bool operator()(const TreeIterT&) const { return (*this)(ValueType(0)); }
/// @brief Return true if the norm of the gradient at a voxel
/// location of the iterator is out of range.
inline bool operator()(const VoxelIterT &iter) const
{
const Coord ijk = iter.getCoord();
return (*this)(invdx2 * math::ISGradientNormSqrd<GradScheme>::result(acc, ijk));
}
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "outside the range of NormGrad ["<<math::Sqrt(minVal2)<<","<<math::Sqrt(maxVal2)<<"]";
return ss.str();
}
AccT acc;
const ValueType invdx2, minVal2, maxVal2;
};// CheckNormGrad
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks the norm of the gradient at zero-crossing voxels against a range
/// @details CheckEikonal differs from CheckNormGrad in that it only
/// checks the norm of the gradient at voxel locations where the
/// FD-stencil crosses the zero isosurface!
template<typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter,
typename StencilT = math::WenoStencil<GridT> >//math::GradStencil<GridT>
struct CheckEikonal
{
using ValueType = typename GridT::ValueType;
static_assert(std::is_floating_point<ValueType>::value,
"openvdb::tools::CheckEikonal requires a scalar, floating-point grid");
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT> ::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
/// @brief Constructor taking a grid and a range to be tested against.
CheckEikonal(const GridT& grid, const ValueType& _min, const ValueType& _max)
: stencil(grid), minVal(_min), maxVal(_max)
{
if ( !grid.hasUniformVoxels() ) {
OPENVDB_THROW(ValueError, "CheckEikonal: The transform must have uniform scale");
}
if (minVal > maxVal) {
OPENVDB_THROW(ValueError, "CheckEikonal: Invalid range (min > max)");
}
}
CheckEikonal(const CheckEikonal& other)
: stencil(other.stencil.grid()), minVal(other.minVal), maxVal(other.maxVal)
{
}
/// Return true if the value is smaller than min or larger than max.
inline bool operator()(const ValueType& v) const { return v<minVal || v>maxVal; }
/// @brief Return true if zero is outside the range.
/// @note We assume that the norm of the gradient of a tile is always zero.
inline bool operator()(const TreeIterT&) const { return (*this)(ValueType(0)); }
/// @brief Return true if the norm of the gradient at a
/// zero-crossing voxel location of the iterator is out of range.
inline bool operator()(const VoxelIterT &iter) const
{
stencil.moveTo(iter);
if (!stencil.zeroCrossing()) return false;
return (*this)(stencil.normSqGrad());
}
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "outside the range of NormGrad ["<<minVal<<","<<maxVal<<"]";
return ss.str();
}
mutable StencilT stencil;
const ValueType minVal, maxVal;
};// CheckEikonal
////////////////////////////////////////////////////////////////////////////////
/// @brief Checks the divergence against a range
template<typename GridT,
typename TreeIterT = typename GridT::ValueOnCIter,
math::DScheme DiffScheme = math::CD_2ND>
struct CheckDivergence
{
using ValueType = typename GridT::ValueType;
using ElementType = typename VecTraits<ValueType>::ElementType;
static_assert(std::is_floating_point<ElementType>::value,
"openvdb::tools::CheckDivergence requires a floating-point vector grid");
using TileIterT = TreeIterT;
using VoxelIterT = typename tree::IterTraits<typename TreeIterT::NodeT,
typename TreeIterT::ValueIterT>::template NodeConverter<
typename GridT::TreeType::LeafNodeType>::Type;
using AccT = typename GridT::ConstAccessor;
/// @brief Constructor taking a grid and a range to be tested against.
CheckDivergence(const GridT& grid,
const ValueType& _min,
const ValueType& _max)
: acc(grid.getConstAccessor())
, invdx(ValueType(1.0/grid.voxelSize()[0]))
, minVal(_min)
, maxVal(_max)
{
if ( !grid.hasUniformVoxels() ) {
OPENVDB_THROW(ValueError, "CheckDivergence: The transform must have uniform scale");
}
if (minVal > maxVal) {
OPENVDB_THROW(ValueError, "CheckDivergence: Invalid range (min > max)");
}
}
/// Return true if the value is smaller than min or larger than max.
inline bool operator()(const ElementType& v) const { return v<minVal || v>maxVal; }
/// @brief Return true if zero is outside the range.
/// @note We assume that the divergence of a tile is always zero.
inline bool operator()(const TreeIterT&) const { return (*this)(ElementType(0)); }
/// @brief Return true if the divergence at a voxel location of
/// the iterator is out of range.
inline bool operator()(const VoxelIterT &iter) const
{
const Coord ijk = iter.getCoord();
return (*this)(invdx * math::ISDivergence<DiffScheme>::result(acc, ijk));
}
/// @brief Return a string describing a failed check.
std::string str() const
{
std::ostringstream ss;
ss << "outside the range of divergence ["<<minVal<<","<<maxVal<<"]";
return ss.str();
}
AccT acc;
const ValueType invdx, minVal, maxVal;
};// CheckDivergence
////////////////////////////////////////////////////////////////////////////////
/// @brief Performs multithreaded diagnostics of a grid
/// @note More documentation will be added soon!
template <typename GridT>
class Diagnose
{
public:
using MaskType = typename GridT::template ValueConverter<bool>::Type;
Diagnose(const GridT& grid) : mGrid(&grid), mMask(new MaskType()), mCount(0)
{
mMask->setTransform(grid.transformPtr()->copy());
}
template <typename CheckT>
std::string check(const CheckT& check,
bool updateMask = false,
bool checkVoxels = true,
bool checkTiles = true,
bool checkBackground = true)
{
typename MaskType::TreeType* mask = updateMask ? &(mMask->tree()) : nullptr;
CheckValues<CheckT> cc(mask, mGrid, check);
std::ostringstream ss;
if (checkBackground) ss << cc.checkBackground();
if (checkTiles) ss << cc.checkTiles();
if (checkVoxels) ss << cc.checkVoxels();
mCount += cc.mCount;
return ss.str();
}
//@{
/// @brief Return a boolean mask of all the values
/// (i.e. tiles and/or voxels) that have failed one or
/// more checks.
typename MaskType::ConstPtr mask() const { return mMask; }
typename MaskType::Ptr mask() { return mMask; }
//@}
/// @brief Return the number of values (i.e. background, tiles or
/// voxels) that have failed one or more checks.
Index64 valueCount() const { return mMask->activeVoxelCount(); }
/// @brief Return total number of failed checks
/// @note If only one check was performed and the mask was updated
/// failureCount equals valueCount.
Index64 failureCount() const { return mCount; }
/// @brief Return a const reference to the grid
const GridT& grid() const { return *mGrid; }
/// @brief Clear the mask and error counter
void clear() { mMask = new MaskType(); mCount = 0; }
private:
// disallow copy construction and copy by assignment!
Diagnose(const Diagnose&);// not implemented
Diagnose& operator=(const Diagnose&);// not implemented
const GridT* mGrid;
typename MaskType::Ptr mMask;
Index64 mCount;
/// @brief Private class that performs the multithreaded checks
template <typename CheckT>
struct CheckValues
{
using MaskT = typename MaskType::TreeType;
using LeafT = typename GridT::TreeType::LeafNodeType;
using LeafManagerT = typename tree::LeafManager<const typename GridT::TreeType>;
const bool mOwnsMask;
MaskT* mMask;
const GridT* mGrid;
const CheckT mCheck;
Index64 mCount;
CheckValues(MaskT* mask, const GridT* grid, const CheckT& check)
: mOwnsMask(false)
, mMask(mask)
, mGrid(grid)
, mCheck(check)
, mCount(0)
{
}
CheckValues(CheckValues& other, tbb::split)
: mOwnsMask(true)
, mMask(other.mMask ? new MaskT() : nullptr)
, mGrid(other.mGrid)
, mCheck(other.mCheck)
, mCount(0)
{
}
~CheckValues() { if (mOwnsMask) delete mMask; }
std::string checkBackground()
{
std::ostringstream ss;
if (mCheck(mGrid->background())) {
++mCount;
ss << "Background is " + mCheck.str() << std::endl;
}
return ss.str();
}
std::string checkTiles()
{
std::ostringstream ss;
const Index64 n = mCount;
typename CheckT::TileIterT i(mGrid->tree());
for (i.setMaxDepth(GridT::TreeType::RootNodeType::LEVEL - 1); i; ++i) {
if (mCheck(i)) {
++mCount;
if (mMask) mMask->fill(i.getBoundingBox(), true, true);
}
}
if (const Index64 m = mCount - n) {
ss << m << " tile" << (m==1 ? " is " : "s are ") + mCheck.str() << std::endl;
}
return ss.str();
}
std::string checkVoxels()
{
std::ostringstream ss;
LeafManagerT leafs(mGrid->tree());
const Index64 n = mCount;
tbb::parallel_reduce(leafs.leafRange(), *this);
if (const Index64 m = mCount - n) {
ss << m << " voxel" << (m==1 ? " is " : "s are ") + mCheck.str() << std::endl;
}
return ss.str();
}
void operator()(const typename LeafManagerT::LeafRange& r)
{
using VoxelIterT = typename CheckT::VoxelIterT;
if (mMask) {
for (typename LeafManagerT::LeafRange::Iterator i=r.begin(); i; ++i) {
typename MaskT::LeafNodeType* maskLeaf = nullptr;
for (VoxelIterT j = tree::IterTraits<LeafT, VoxelIterT>::begin(*i); j; ++j) {
if (mCheck(j)) {
++mCount;
if (maskLeaf == nullptr) maskLeaf = mMask->touchLeaf(j.getCoord());
maskLeaf->setValueOn(j.pos(), true);
}
}
}
} else {
for (typename LeafManagerT::LeafRange::Iterator i=r.begin(); i; ++i) {
for (VoxelIterT j = tree::IterTraits<LeafT, VoxelIterT>::begin(*i); j; ++j) {
if (mCheck(j)) ++mCount;
}
}
}
}
void join(const CheckValues& other)
{
if (mMask) mMask->merge(*(other.mMask), openvdb::MERGE_ACTIVE_STATES_AND_NODES);
mCount += other.mCount;
}
};//End of private class CheckValues
};// End of public class Diagnose
////////////////////////////////////////////////////////////////////////////////
/// @brief Class that performs various types of checks on narrow-band level sets.
///
/// @note The most common usage is to simply call CheckLevelSet::check()
template<class GridType>
class CheckLevelSet
{
public:
using ValueType = typename GridType::ValueType;
using MaskType = typename GridType::template ValueConverter<bool>::Type;
CheckLevelSet(const GridType& grid) : mDiagnose(grid) {}
//@{
/// @brief Return a boolean mask of all the values
/// (i.e. tiles and/or voxels) that have failed one or
/// more checks.
typename MaskType::ConstPtr mask() const { return mDiagnose.mask(); }
typename MaskType::Ptr mask() { return mDiagnose.mask(); }
//@}
/// @brief Return the number of values (i.e. background, tiles or
/// voxels) that have failed one or more checks.
Index64 valueCount() const { return mDiagnose.valueCount(); }
/// @brief Return total number of failed checks
/// @note If only one check was performed and the mask was updated
/// failureCount equals valueCount.
Index64 failureCount() const { return mDiagnose.failureCount(); }
/// @brief Return a const reference to the grid
const GridType& grid() const { return mDiagnose.grid(); }
/// @brief Clear the mask and error counter
void clear() { mDiagnose.clear(); }
/// @brief Return a nonempty message if the grid's value type is a floating point.
///
/// @note No run-time overhead
static std::string checkValueType()
{
static const bool test = std::is_floating_point<ValueType>::value;
return test ? "" : "Value type is not floating point\n";
}
/// @brief Return message if the grid's class is a level set.
///
/// @note Small run-time overhead
std::string checkClassType() const
{
const bool test = mDiagnose.grid().getGridClass() == GRID_LEVEL_SET;
return test ? "" : "Class type is not \"GRID_LEVEL_SET\"\n";
}
/// @brief Return a nonempty message if the grid's transform does not have uniform scaling.
///
/// @note Small run-time overhead
std::string checkTransform() const
{
return mDiagnose.grid().hasUniformVoxels() ? "" : "Does not have uniform voxels\n";
}
/// @brief Return a nonempty message if the background value is larger than or
/// equal to the halfWidth*voxelSize.
///
/// @note Small run-time overhead
std::string checkBackground(Real halfWidth = LEVEL_SET_HALF_WIDTH) const
{
const Real w = mDiagnose.grid().background() / mDiagnose.grid().voxelSize()[0];
if (w < halfWidth) {
std::ostringstream ss;
ss << "The background value ("<< mDiagnose.grid().background()<<") is less than "
<< halfWidth << " voxel units\n";
return ss.str();
}
return "";
}
/// @brief Return a nonempty message if the grid has no active tile values.
///
/// @note Medium run-time overhead
std::string checkTiles() const
{
const bool test = mDiagnose.grid().tree().hasActiveTiles();
return test ? "Has active tile values\n" : "";
}
/// @brief Return a nonempty message if any of the values are not finite. i.e. NaN or inf.
///
/// @note Medium run-time overhead
std::string checkFinite(bool updateMask = false)
{
CheckFinite<GridType,typename GridType::ValueAllCIter> c;
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/true, /*background*/true);
}
/// @brief Return a nonempty message if the active voxel values are out-of-range.
///
/// @note Medium run-time overhead
std::string checkRange(bool updateMask = false)
{
const ValueType& background = mDiagnose.grid().background();
CheckRange<GridType> c(-background, background);
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/false, /*background*/false);
}
/// @brief Return a nonempty message if the the inactive values do not have a
/// magnitude equal to the background value.
///
/// @note Medium run-time overhead
std::string checkInactiveValues(bool updateMask = false)
{
const ValueType& background = mDiagnose.grid().background();
CheckMagnitude<GridType, typename GridType::ValueOffCIter> c(background);
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/true, /*background*/false);
}
/// @brief Return a nonempty message if the norm of the gradient of the
/// active voxels is out of the range minV to maxV.
///
/// @note Significant run-time overhead
std::string checkEikonal(bool updateMask = false, ValueType minV = 0.5, ValueType maxV = 1.5)
{
CheckEikonal<GridType> c(mDiagnose.grid(), minV, maxV);
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/false, /*background*/false);
}
/// @brief Return a nonempty message if an error or issue is detected. Only
/// runs tests with a number lower than or equal to n, where:
///
/// Fast checks
/// 1: value type is floating point
/// 2: has level set class type
/// 3: has uniform scale
/// 4: background value is positive and n*dx
///
/// Slower checks
/// 5: no active tiles
/// 6: all the values are finite, i.e not NaN or infinite
/// 7: active values in range between +-background
/// 8: abs of inactive values = background, i.e. assuming a symmetric narrow band!
///
/// Relatively slow check (however multi-threaded)
/// 9: norm of gradient at zero-crossings is one, i.e. satisfied the Eikonal equation.
std::string check(size_t n=9, bool updateMask = false)
{
std::string str = this->checkValueType();
if (str.empty() && n>1) str = this->checkClassType();
if (str.empty() && n>2) str = this->checkTransform();
if (str.empty() && n>3) str = this->checkBackground();
if (str.empty() && n>4) str = this->checkTiles();
if (str.empty() && n>5) str = this->checkFinite(updateMask);
if (str.empty() && n>6) str = this->checkRange(updateMask);
if (str.empty() && n>7) str = this->checkInactiveValues(updateMask);
if (str.empty() && n>8) str = this->checkEikonal(updateMask);
return str;
}
private:
// disallow copy construction and copy by assignment!
CheckLevelSet(const CheckLevelSet&);// not implemented
CheckLevelSet& operator=(const CheckLevelSet&);// not implemented
// Member data
Diagnose<GridType> mDiagnose;
};// CheckLevelSet
template<class GridType>
std::string
checkLevelSet(const GridType& grid, size_t n)
{
CheckLevelSet<GridType> c(grid);
return c.check(n, false);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Class that performs various types of checks on fog volumes.
///
/// @note The most common usage is to simply call CheckFogVolume::check()
template<class GridType>
class CheckFogVolume
{
public:
using ValueType = typename GridType::ValueType;
using MaskType = typename GridType::template ValueConverter<bool>::Type;
CheckFogVolume(const GridType& grid) : mDiagnose(grid) {}
//@{
/// @brief Return a boolean mask of all the values
/// (i.e. tiles and/or voxels) that have failed one or
/// more checks.
typename MaskType::ConstPtr mask() const { return mDiagnose.mask(); }
typename MaskType::Ptr mask() { return mDiagnose.mask(); }
//@}
/// @brief Return the number of values (i.e. background, tiles or
/// voxels) that have failed one or more checks.
Index64 valueCount() const { return mDiagnose.valueCount(); }
/// @brief Return total number of failed checks
/// @note If only one check was performed and the mask was updated
/// failureCount equals valueCount.
Index64 failureCount() const { return mDiagnose.failureCount(); }
/// @brief Return a const reference to the grid
const GridType& grid() const { return mDiagnose.grid(); }
/// @brief Clear the mask and error counter
void clear() { mDiagnose.clear(); }
/// @brief Return a nonempty message if the grid's value type is a floating point.
///
/// @note No run-time overhead
static std::string checkValueType()
{
static const bool test = std::is_floating_point<ValueType>::value;
return test ? "" : "Value type is not floating point";
}
/// @brief Return a nonempty message if the grid's class is a level set.
///
/// @note Small run-time overhead
std::string checkClassType() const
{
const bool test = mDiagnose.grid().getGridClass() == GRID_FOG_VOLUME;
return test ? "" : "Class type is not \"GRID_LEVEL_SET\"";
}
/// @brief Return a nonempty message if the background value is not zero.
///
/// @note Small run-time overhead
std::string checkBackground() const
{
if (!math::isApproxZero(mDiagnose.grid().background())) {
std::ostringstream ss;
ss << "The background value ("<< mDiagnose.grid().background()<<") is not zero";
return ss.str();
}
return "";
}
/// @brief Return a nonempty message if any of the values are not finite. i.e. NaN or inf.
///
/// @note Medium run-time overhead
std::string checkFinite(bool updateMask = false)
{
CheckFinite<GridType,typename GridType::ValueAllCIter> c;
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/true, /*background*/true);
}
/// @brief Return a nonempty message if any of the inactive values are not zero.
///
/// @note Medium run-time overhead
std::string checkInactiveValues(bool updateMask = false)
{
CheckMagnitude<GridType, typename GridType::ValueOffCIter> c(0);
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/true, /*background*/true);
}
/// @brief Return a nonempty message if the active voxel values
/// are out-of-range, i.e. not in the range [0,1].
///
/// @note Medium run-time overhead
std::string checkRange(bool updateMask = false)
{
CheckRange<GridType> c(0, 1);
return mDiagnose.check(c, updateMask, /*voxel*/true, /*tiles*/true, /*background*/false);
}
/// @brief Return a nonempty message if an error or issue is detected. Only
/// runs tests with a number lower than or equal to n, where:
///
/// Fast checks
/// 1: value type is floating point
/// 2: has FOG volume class type
/// 3: background value is zero
///
/// Slower checks
/// 4: all the values are finite, i.e not NaN or infinite
/// 5: inactive values are zero
/// 6: active values are in the range [0,1]
std::string check(size_t n=6, bool updateMask = false)
{
std::string str = this->checkValueType();
if (str.empty() && n>1) str = this->checkClassType();
if (str.empty() && n>2) str = this->checkBackground();
if (str.empty() && n>3) str = this->checkFinite(updateMask);
if (str.empty() && n>4) str = this->checkInactiveValues(updateMask);
if (str.empty() && n>5) str = this->checkRange(updateMask);
return str;
}
private:
// disallow copy construction and copy by assignment!
CheckFogVolume(const CheckFogVolume&);// not implemented
CheckFogVolume& operator=(const CheckFogVolume&);// not implemented
// Member data
Diagnose<GridType> mDiagnose;
};// CheckFogVolume
template<class GridType>
std::string
checkFogVolume(const GridType& grid, size_t n)
{
CheckFogVolume<GridType> c(grid);
return c.check(n, false);
}
////////////////////////////////////////////////////////////////////////////////
// Internal utility objects and implementation details
namespace diagnostics_internal {
template<typename TreeType>
class InactiveVoxelValues
{
public:
using LeafArray = tree::LeafManager<TreeType>;
using ValueType = typename TreeType::ValueType;
using SetType = std::set<ValueType>;
InactiveVoxelValues(LeafArray&, size_t numValues);
void runParallel();
void runSerial();
void getInactiveValues(SetType&) const;
inline InactiveVoxelValues(const InactiveVoxelValues<TreeType>&, tbb::split);
inline void operator()(const tbb::blocked_range<size_t>&);
inline void join(const InactiveVoxelValues<TreeType>&);
private:
LeafArray& mLeafArray;
SetType mInactiveValues;
size_t mNumValues;
};// InactiveVoxelValues
template<typename TreeType>
InactiveVoxelValues<TreeType>::InactiveVoxelValues(LeafArray& leafs, size_t numValues)
: mLeafArray(leafs)
, mInactiveValues()
, mNumValues(numValues)
{
}
template <typename TreeType>
inline
InactiveVoxelValues<TreeType>::InactiveVoxelValues(
const InactiveVoxelValues<TreeType>& rhs, tbb::split)
: mLeafArray(rhs.mLeafArray)
, mInactiveValues()
, mNumValues(rhs.mNumValues)
{
}
template<typename TreeType>
void
InactiveVoxelValues<TreeType>::runParallel()
{
tbb::parallel_reduce(mLeafArray.getRange(), *this);
}
template<typename TreeType>
void
InactiveVoxelValues<TreeType>::runSerial()
{
(*this)(mLeafArray.getRange());
}
template<typename TreeType>
inline void
InactiveVoxelValues<TreeType>::operator()(const tbb::blocked_range<size_t>& range)
{
typename TreeType::LeafNodeType::ValueOffCIter iter;
for (size_t n = range.begin(); n < range.end() && !tbb::task::self().is_cancelled(); ++n) {
for (iter = mLeafArray.leaf(n).cbeginValueOff(); iter; ++iter) {
mInactiveValues.insert(iter.getValue());
}
if (mInactiveValues.size() > mNumValues) {
tbb::task::self().cancel_group_execution();
}
}
}
template<typename TreeType>
inline void
InactiveVoxelValues<TreeType>::join(const InactiveVoxelValues<TreeType>& rhs)
{
mInactiveValues.insert(rhs.mInactiveValues.begin(), rhs.mInactiveValues.end());
}
template<typename TreeType>
inline void
InactiveVoxelValues<TreeType>::getInactiveValues(SetType& values) const
{
values.insert(mInactiveValues.begin(), mInactiveValues.end());
}
////////////////////////////////////////
template<typename TreeType>
class InactiveTileValues
{
public:
using IterRange = tree::IteratorRange<typename TreeType::ValueOffCIter>;
using ValueType = typename TreeType::ValueType;
using SetType = std::set<ValueType>;
InactiveTileValues(size_t numValues);
void runParallel(IterRange&);
void runSerial(IterRange&);
void getInactiveValues(SetType&) const;
inline InactiveTileValues(const InactiveTileValues<TreeType>&, tbb::split);
inline void operator()(IterRange&);
inline void join(const InactiveTileValues<TreeType>&);
private:
SetType mInactiveValues;
size_t mNumValues;
};
template<typename TreeType>
InactiveTileValues<TreeType>::InactiveTileValues(size_t numValues)
: mInactiveValues()
, mNumValues(numValues)
{
}
template <typename TreeType>
inline
InactiveTileValues<TreeType>::InactiveTileValues(
const InactiveTileValues<TreeType>& rhs, tbb::split)
: mInactiveValues()
, mNumValues(rhs.mNumValues)
{
}
template<typename TreeType>
void
InactiveTileValues<TreeType>::runParallel(IterRange& range)
{
tbb::parallel_reduce(range, *this);
}
template<typename TreeType>
void
InactiveTileValues<TreeType>::runSerial(IterRange& range)
{
(*this)(range);
}
template<typename TreeType>
inline void
InactiveTileValues<TreeType>::operator()(IterRange& range)
{
for (; range && !tbb::task::self().is_cancelled(); ++range) {
typename TreeType::ValueOffCIter iter = range.iterator();
for (; iter; ++iter) {
mInactiveValues.insert(iter.getValue());
}
if (mInactiveValues.size() > mNumValues) {
tbb::task::self().cancel_group_execution();
}
}
}
template<typename TreeType>
inline void
InactiveTileValues<TreeType>::join(const InactiveTileValues<TreeType>& rhs)
{
mInactiveValues.insert(rhs.mInactiveValues.begin(), rhs.mInactiveValues.end());
}
template<typename TreeType>
inline void
InactiveTileValues<TreeType>::getInactiveValues(SetType& values) const
{
values.insert(mInactiveValues.begin(), mInactiveValues.end());
}
} // namespace diagnostics_internal
////////////////////////////////////////
template<class GridType>
bool
uniqueInactiveValues(const GridType& grid,
std::vector<typename GridType::ValueType>& values, size_t numValues)
{
using TreeType = typename GridType::TreeType;
using ValueType = typename GridType::ValueType;
using SetType = std::set<ValueType>;
SetType uniqueValues;
{ // Check inactive voxels
TreeType& tree = const_cast<TreeType&>(grid.tree());
tree::LeafManager<TreeType> leafs(tree);
diagnostics_internal::InactiveVoxelValues<TreeType> voxelOp(leafs, numValues);
voxelOp.runParallel();
voxelOp.getInactiveValues(uniqueValues);
}
// Check inactive tiles
if (uniqueValues.size() <= numValues) {
typename TreeType::ValueOffCIter iter(grid.tree());
iter.setMaxDepth(TreeType::ValueAllIter::LEAF_DEPTH - 1);
diagnostics_internal::InactiveTileValues<TreeType> tileOp(numValues);
tree::IteratorRange<typename TreeType::ValueOffCIter> range(iter);
tileOp.runParallel(range);
tileOp.getInactiveValues(uniqueValues);
}
values.clear();
values.reserve(uniqueValues.size());
typename SetType::iterator it = uniqueValues.begin();
for ( ; it != uniqueValues.end(); ++it) {
values.push_back(*it);
}
return values.size() <= numValues;
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_DIAGNOSTICS_HAS_BEEN_INCLUDED
| 47,311 | C | 34.519519 | 100 | 0.624506 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetFracture.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file tools/LevelSetFracture.h
///
/// @brief Divide volumes represented by level set grids into multiple,
/// disjoint pieces by intersecting them with one or more "cutter" volumes,
/// also represented by level sets.
#ifndef OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/math/Quat.h>
#include <openvdb/util/NullInterrupter.h>
#include "Composite.h" // for csgIntersectionCopy() and csgDifferenceCopy()
#include "GridTransformer.h" // for resampleToMatch()
#include "LevelSetUtil.h" // for sdfSegmentation()
#include <algorithm> // for std::max(), std::min()
#include <limits>
#include <list>
#include <vector>
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Level set fracturing
template<class GridType, class InterruptType = util::NullInterrupter>
class LevelSetFracture
{
public:
using Vec3sList = std::vector<Vec3s>;
using QuatsList = std::vector<math::Quats>;
using GridPtrList = std::list<typename GridType::Ptr>;
using GridPtrListIter = typename GridPtrList::iterator;
/// @brief Default constructor
///
/// @param interrupter optional interrupter object
explicit LevelSetFracture(InterruptType* interrupter = nullptr);
/// @brief Divide volumes represented by level set grids into multiple,
/// disjoint pieces by intersecting them with one or more "cutter" volumes,
/// also represented by level sets.
/// @details If desired, the process can be applied iteratively, so that
/// fragments created with one cutter are subdivided by other cutters.
///
/// @note The incoming @a grids and the @a cutter are required to have matching
/// transforms and narrow band widths!
///
/// @param grids list of grids to fracture. The residuals of the
/// fractured grids will remain in this list
/// @param cutter a level set grid to use as the cutter object
/// @param segment toggle to split disjoint fragments into their own grids
/// @param points optional list of world space points at which to instance the
/// cutter object (if null, use the cutter's current position only)
/// @param rotations optional list of custom rotations for each cutter instance
/// @param cutterOverlap toggle to allow consecutive cutter instances to fracture
/// previously generated fragments
void fracture(GridPtrList& grids, const GridType& cutter, bool segment = false,
const Vec3sList* points = nullptr, const QuatsList* rotations = nullptr,
bool cutterOverlap = true);
/// Return a list of new fragments, not including the residuals from the input grids.
GridPtrList& fragments() { return mFragments; }
/// Remove all elements from the fragment list.
void clear() { mFragments.clear(); }
private:
// disallow copy by assignment
void operator=(const LevelSetFracture&) {}
bool wasInterrupted(int percent = -1) const {
return mInterrupter && mInterrupter->wasInterrupted(percent);
}
bool isValidFragment(GridType&) const;
void segmentFragments(GridPtrList&) const;
void process(GridPtrList&, const GridType& cutter);
InterruptType* mInterrupter;
GridPtrList mFragments;
};
////////////////////////////////////////
// Internal utility objects and implementation details
namespace level_set_fracture_internal {
template<typename LeafNodeType>
struct FindMinMaxVoxelValue {
using ValueType = typename LeafNodeType::ValueType;
FindMinMaxVoxelValue(const std::vector<const LeafNodeType*>& nodes)
: minValue(std::numeric_limits<ValueType>::max())
, maxValue(-minValue)
, mNodes(nodes.empty() ? nullptr : &nodes.front())
{
}
FindMinMaxVoxelValue(FindMinMaxVoxelValue& rhs, tbb::split)
: minValue(std::numeric_limits<ValueType>::max())
, maxValue(-minValue)
, mNodes(rhs.mNodes)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const ValueType* data = mNodes[n]->buffer().data();
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
minValue = std::min(minValue, data[i]);
maxValue = std::max(maxValue, data[i]);
}
}
}
void join(FindMinMaxVoxelValue& rhs) {
minValue = std::min(minValue, rhs.minValue);
maxValue = std::max(maxValue, rhs.maxValue);
}
ValueType minValue, maxValue;
LeafNodeType const * const * const mNodes;
}; // struct FindMinMaxVoxelValue
} // namespace level_set_fracture_internal
////////////////////////////////////////
template<class GridType, class InterruptType>
LevelSetFracture<GridType, InterruptType>::LevelSetFracture(InterruptType* interrupter)
: mInterrupter(interrupter)
, mFragments()
{
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::fracture(GridPtrList& grids, const GridType& cutter,
bool segmentation, const Vec3sList* points, const QuatsList* rotations, bool cutterOverlap)
{
// We can process all incoming grids with the same cutter instance,
// this optimization is enabled by the requirement of having matching
// transforms between all incoming grids and the cutter object.
if (points && points->size() != 0) {
math::Transform::Ptr originalCutterTransform = cutter.transform().copy();
GridType cutterGrid(*const_cast<GridType*>(&cutter), ShallowCopy());
const bool hasInstanceRotations =
points && rotations && points->size() == rotations->size();
// for each instance point..
for (size_t p = 0, P = points->size(); p < P; ++p) {
int percent = int((float(p) / float(P)) * 100.0);
if (wasInterrupted(percent)) break;
GridType instCutterGrid;
instCutterGrid.setTransform(originalCutterTransform->copy());
math::Transform::Ptr xform = originalCutterTransform->copy();
if (hasInstanceRotations) {
const Vec3s& rot = (*rotations)[p].eulerAngles(math::XYZ_ROTATION);
xform->preRotate(rot[0], math::X_AXIS);
xform->preRotate(rot[1], math::Y_AXIS);
xform->preRotate(rot[2], math::Z_AXIS);
xform->postTranslate((*points)[p]);
} else {
xform->postTranslate((*points)[p]);
}
cutterGrid.setTransform(xform);
// Since there is no scaling, use the generic resampler instead of
// the more expensive level set rebuild tool.
if (mInterrupter != nullptr) {
if (hasInstanceRotations) {
doResampleToMatch<BoxSampler>(cutterGrid, instCutterGrid, *mInterrupter);
} else {
doResampleToMatch<PointSampler>(cutterGrid, instCutterGrid, *mInterrupter);
}
} else {
util::NullInterrupter interrupter;
if (hasInstanceRotations) {
doResampleToMatch<BoxSampler>(cutterGrid, instCutterGrid, interrupter);
} else {
doResampleToMatch<PointSampler>(cutterGrid, instCutterGrid, interrupter);
}
}
if (wasInterrupted(percent)) break;
if (cutterOverlap && !mFragments.empty()) process(mFragments, instCutterGrid);
process(grids, instCutterGrid);
}
} else {
// use cutter in place
if (cutterOverlap && !mFragments.empty()) process(mFragments, cutter);
process(grids, cutter);
}
if (segmentation) {
segmentFragments(mFragments);
segmentFragments(grids);
}
}
template<class GridType, class InterruptType>
bool
LevelSetFracture<GridType, InterruptType>::isValidFragment(GridType& grid) const
{
using LeafNodeType = typename GridType::TreeType::LeafNodeType;
if (grid.tree().leafCount() < 9) {
std::vector<const LeafNodeType*> nodes;
grid.tree().getNodes(nodes);
Index64 activeVoxelCount = 0;
for (size_t n = 0, N = nodes.size(); n < N; ++n) {
activeVoxelCount += nodes[n]->onVoxelCount();
}
if (activeVoxelCount < 27) return false;
level_set_fracture_internal::FindMinMaxVoxelValue<LeafNodeType> op(nodes);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op);
if ((op.minValue < 0) == (op.maxValue < 0)) return false;
}
return true;
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::segmentFragments(GridPtrList& grids) const
{
GridPtrList newFragments;
for (GridPtrListIter it = grids.begin(); it != grids.end(); ++it) {
std::vector<typename GridType::Ptr> segments;
segmentSDF(*(*it), segments);
for (size_t n = 0, N = segments.size(); n < N; ++n) {
newFragments.push_back(segments[n]);
}
}
grids.swap(newFragments);
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::process(
GridPtrList& grids, const GridType& cutter)
{
using GridPtr = typename GridType::Ptr;
GridPtrList newFragments;
for (GridPtrListIter it = grids.begin(); it != grids.end(); ++it) {
if (wasInterrupted()) break;
GridPtr& grid = *it;
GridPtr fragment = csgIntersectionCopy(*grid, cutter);
if (!isValidFragment(*fragment)) continue;
GridPtr residual = csgDifferenceCopy(*grid, cutter);
if (!isValidFragment(*residual)) continue;
newFragments.push_back(fragment);
grid->tree().clear();
grid->tree().merge(residual->tree());
}
if (!newFragments.empty()) {
mFragments.splice(mFragments.end(), newFragments);
}
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
| 10,433 | C | 31.811321 | 95 | 0.641522 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/FindActiveValues.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
///////////////////////////////////////////////////////////////////////////
//
/// @file FindActiveValues.h
///
/// @author Ken Museth
///
/// @brief Finds the active values and tiles in a tree that intersects a bounding box.
/// Methods are provided that count the number of active values and tiles,
/// test for the existence of active values and tiles, and return a list of
/// the active tiles that intersect a bbox.
///
/// @warning For repeated calls to the free-standing functions defined below
/// consider instead creating an instance of FindActiveValues
/// and then repeatedly call its member methods. This assumes the tree
/// to be constant between calls but is sightly faster.
///
///////////////////////////////////////////////////////////////////////////
#ifndef OPENVDB_TOOLS_FINDACTIVEVALUES_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_FINDACTIVEVALUES_HAS_BEEN_INCLUDED
#include <vector>
#include <openvdb/version.h> // for OPENVDB_VERSION_NAME
#include <openvdb/Types.h>
#include <openvdb/tree/ValueAccessor.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Struct that encodes a bounding box, value and level of a tile
///
/// @details The bbox of a tiles is trimmed to the bounding box that probed it.
/// The level is typically defined as: 1 is 8^3, 2 is 128^3, and 3 is 4096^3.
template<typename ValueType>
struct TileData;
/// @brief Returns true if the bounding box intersects any of the active
/// values in a tree, i.e. either active voxels or active tiles.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call anyActiveValues(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active values.
/// @param bbox index bounding box which is intersected against the active values.
template<typename TreeT>
inline bool
anyActiveValues(const TreeT& tree, const CoordBBox &bbox);
/// @brief Returns true if the bounding box intersects any of the active
/// voxels in a tree, i.e. ignores active tile values.
///
/// @note In VDB voxels by definition reside in the leaf nodes ONLY. So this method
/// ignores active tile values that reside higher up in the VDB tree structure.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call anyActiveVoxels(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active voxels.
/// @param bbox index bounding box which is intersected against the active voxels.
template<typename TreeT>
inline bool
anyActiveVoxels(const TreeT& tree, const CoordBBox &bbox);
/// @brief Returns true if the bounding box intersects any of the active
/// tiles in a tree, i.e. ignores active leaf values.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call anyActiveTiles(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active tiles.
/// @param bbox index bounding box which is intersected against the active tiles.
template<typename TreeT>
inline bool
anyActiveTiles(const TreeT& tree, const CoordBBox &bbox);
/// @brief Returns true if the bounding box intersects none of the active
/// values in a tree, i.e. neither active voxels or active tiles.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call noActiveValues(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active values.
/// @param bbox index bounding box which is intersected against the active values.
template<typename TreeT>
inline bool
noActiveValues(const TreeT& tree, const CoordBBox &bbox);
/// @brief Returns the number of active values that intersects a bounding box intersects,
/// i.e. the count includes both active voxels and virtual voxels in active tiles.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call count(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active values.
/// @param bbox index bounding box which is intersected against the active values.
template<typename TreeT>
inline Index64
countActiveValues(const TreeT& tree, const CoordBBox &bbox);
/// @brief Return a vector with bounding boxes that represents all the intersections
/// between active tiles in the tree and the specified bounding box.
///
/// @warning For repeated calls to this method consider instead creating an instance of
/// FindActiveValues and then repeatedly call count(). This assumes the tree
/// to be constant between calls but is slightly faster.
///
/// @param tree const tree to be tested for active tiles.
/// @param bbox index bounding box which is intersected against the active tiles.
template<typename TreeT>
inline std::vector<TileData<typename TreeT::ValueType>>
activeTiles(const TreeT& tree, const CoordBBox &bbox);
//////////////////////////////////////////////////////////////////////////////////////////
/// @brief Finds the active values in a tree which intersects a bounding box.
///
/// @details Two methods are provided, one that count the number of active values
/// and one that simply tests if any active values intersect the bbox.
///
/// @warning Tree nodes are cached by this class so it's important that the tree is not
/// modified after this class is instantiated and before its methods are called.
template<typename TreeT>
class FindActiveValues
{
public:
using TileDataT = TileData<typename TreeT::ValueType>;
/// @brief Constructor from a const tree, which is assumed not to be modified after construction.
FindActiveValues(const TreeT& tree);
/// @brief Default destructor
~FindActiveValues();
/// @brief Initiate this class with a new (or modified) tree.
void update(const TreeT& tree);
/// @brief Returns true if the specified bounding box intersects any active values.
///
/// @warning Using a ValueAccessor (i.e. useAccessor = true) can improve performance for especially
/// small bounding boxes, but at the cost of no thread-safety. So if multiple threads are
/// calling this method concurrently use the default setting, useAccessor = false.
bool anyActiveValues(const CoordBBox &bbox, bool useAccessor = false) const;
/// @brief Returns true if the specified bounding box intersects any active tiles only.
bool anyActiveVoxels(const CoordBBox &bbox) const;
/// @brief Returns true if the specified bounding box intersects any active tiles only.
bool anyActiveTiles(const CoordBBox &bbox) const;
/// @brief Returns true if the specified bounding box does not intersect any active values.
///
/// @warning Using a ValueAccessor (i.e. useAccessor = true) can improve performance for especially
/// small bounding boxes, but at the cost of no thread-safety. So if multiple threads are
/// calling this method concurrently use the default setting, useAccessor = false.
bool noActiveValues(const CoordBBox &bbox, bool useAccessor = false) const { return !this->anyActiveValues(bbox, useAccessor); }
/// @brief Returns the number of active voxels intersected by the specified bounding box.
Index64 count(const CoordBBox &bbox) const;
/// @brief Return a vector with bounding boxes that represents all the intersections
/// between active tiles in the tree and the specified bounding box.
std::vector<TileDataT> activeTiles(const CoordBBox &bbox) const;
[[deprecated("Use anyActiveValues() instead")]] inline bool any(const CoordBBox &bbox, bool useAccessor = false) const
{
return this->anyActiveValues(bbox, useAccessor);
}
[[deprecated("Use noActiveValues() instead")]] inline bool none(const CoordBBox &bbox, bool useAccessor = false) const
{
return this->noActiveValues(bbox, useAccessor);
}
private:
// Cleans up internal data structures
void clear();
// builds internal data structures
void init(const TreeT &tree);
template<typename NodeT>
typename NodeT::NodeMaskType getBBoxMask(const CoordBBox &bbox, const NodeT* node) const;
// process leaf nodes
inline bool anyActiveValues(const typename TreeT::LeafNodeType* leaf, const CoordBBox &bbox ) const { return this->anyActiveVoxels(leaf, bbox); }
inline bool anyActiveVoxels(const typename TreeT::LeafNodeType* leaf, const CoordBBox &bbox ) const;
static bool anyActiveTiles( const typename TreeT::LeafNodeType*, const CoordBBox& ) {return false;}
void activeTiles(const typename TreeT::LeafNodeType*, const CoordBBox&, std::vector<TileDataT>&) const {;}// no-op
inline Index64 count(const typename TreeT::LeafNodeType* leaf, const CoordBBox &bbox ) const;
// process internal nodes
template<typename NodeT>
bool anyActiveValues(const NodeT* node, const CoordBBox &bbox) const;
template<typename NodeT>
bool anyActiveVoxels(const NodeT* node, const CoordBBox &bbox) const;
template<typename NodeT>
bool anyActiveTiles(const NodeT* node, const CoordBBox &bbox) const;
template<typename NodeT>
void activeTiles(const NodeT* node, const CoordBBox &bbox, std::vector<TileDataT> &tiles) const;
template<typename NodeT>
Index64 count(const NodeT* node, const CoordBBox &bbox) const;
using AccT = tree::ValueAccessor<const TreeT, false/* IsSafe */>;
using RootChildType = typename TreeT::RootNodeType::ChildNodeType;
struct RootChild;
AccT mAcc;
std::vector<TileDataT> mRootTiles;// cache bbox of child nodes (faster to cache than access RootNode)
std::vector<RootChild> mRootNodes;// cache bbox of acive tiles (faster to cache than access RootNode)
};// FindActiveValues class
//////////////////////////////////////////////////////////////////////////////////////////
template<typename TreeT>
FindActiveValues<TreeT>::FindActiveValues(const TreeT& tree) : mAcc(tree), mRootTiles(), mRootNodes()
{
this->init(tree);
}
template<typename TreeT>
FindActiveValues<TreeT>::~FindActiveValues()
{
this->clear();
}
template<typename TreeT>
void FindActiveValues<TreeT>::update(const TreeT& tree)
{
this->clear();
mAcc = AccT(tree);
this->init(tree);
}
template<typename TreeT>
void FindActiveValues<TreeT>::clear()
{
mRootNodes.clear();
mRootTiles.clear();
}
template<typename TreeT>
void FindActiveValues<TreeT>::init(const TreeT& tree)
{
const auto &root = tree.root();
for (auto i = root.cbeginChildOn(); i; ++i) {
mRootNodes.emplace_back(i.getCoord(), &*i);
}
for (auto i = root.cbeginValueOn(); i; ++i) {
mRootTiles.emplace_back(root, i.getCoord(), *i);
}
}
template<typename TreeT>
bool FindActiveValues<TreeT>::anyActiveValues(const CoordBBox &bbox, bool useAccessor) const
{
// test early-out: the center of the bbox is active
if (useAccessor) {
if (mAcc.isValueOn( (bbox.min() + bbox.max())>>1 )) return true;
} else {
if (mAcc.tree().isValueOn( (bbox.min() + bbox.max())>>1 )) return true;
}
for (auto& tile : mRootTiles) {
if (tile.bbox.hasOverlap(bbox)) return true;
}
for (auto& node : mRootNodes) {
if (!node.bbox.hasOverlap(bbox)) {// no overlap
continue;
} else if (node.bbox.isInside(bbox)) {// bbox is inside the child node
return this->anyActiveValues(node.child, bbox);
} else if (this->anyActiveValues(node.child, bbox)) {// bbox overlaps the child node
return true;
}
}
return false;
}
template<typename TreeT>
bool FindActiveValues<TreeT>::anyActiveVoxels(const CoordBBox &bbox) const
{
for (auto& node : mRootNodes) {
if (!node.bbox.hasOverlap(bbox)) {// no overlap
continue;
} else if (node.bbox.isInside(bbox)) {// bbox is inside the child node
return this->anyActiveVoxels(node.child, bbox);
} else if (this->anyActiveVoxels(node.child, bbox)) {// bbox overlaps the child node
return true;
}
}
return false;
}
template<typename TreeT>
bool FindActiveValues<TreeT>::anyActiveTiles(const CoordBBox &bbox) const
{
for (auto& tile : mRootTiles) {
if (tile.bbox.hasOverlap(bbox)) return true;
}
for (auto& node : mRootNodes) {
if (!node.bbox.hasOverlap(bbox)) {// no overlap
continue;
} else if (node.bbox.isInside(bbox)) {// bbox is inside the child node
return this->anyActiveTiles(node.child, bbox);
} else if (this->anyActiveTiles(node.child, bbox)) {// bbox overlaps the child node
return true;
}
}
return false;
}
template<typename TreeT>
Index64 FindActiveValues<TreeT>::count(const CoordBBox &bbox) const
{
Index64 count = 0;
for (auto& tile : mRootTiles) {//loop over active tiles only
if (!tile.bbox.hasOverlap(bbox)) {
continue;//ignore non-overlapping tiles
} else if (tile.bbox.isInside(bbox)) {
return bbox.volume();// bbox is completely inside the active tile
} else if (bbox.isInside(tile.bbox)) {
count += RootChildType::NUM_VOXELS;
} else {// partial overlap between tile and bbox
auto tmp = tile.bbox;
tmp.intersect(bbox);
count += tmp.volume();
}
}
for (auto &node : mRootNodes) {//loop over child nodes of the root node only
if ( !node.bbox.hasOverlap(bbox) ) {
continue;//ignore non-overlapping child nodes
} else if ( node.bbox.isInside(bbox) ) {
return this->count(node.child, bbox);// bbox is completely inside the child node
} else {
count += this->count(node.child, bbox);
}
}
return count;
}
template<typename TreeT>
std::vector<TileData<typename TreeT::ValueType> >
FindActiveValues<TreeT>::activeTiles(const CoordBBox &bbox) const
{
std::vector<TileDataT> tiles;
for (auto& tile : mRootTiles) {//loop over active tiles only
if (!tile.bbox.hasOverlap(bbox)) {
continue;//ignore non-overlapping tiles
} else if (tile.bbox.isInside(bbox)) {// bbox is completely inside the active tile
tiles.emplace_back(bbox, tile.value, tile.level);
return tiles;
} else if (bbox.isInside(tile.bbox)) {// active tile is completely inside the bbox
tiles.push_back(tile);
} else {// partial overlap between tile and bbox
auto tmp = tile.bbox;
tmp.intersect(bbox);
tiles.emplace_back(tmp, tile.value, tile.level);
}
}
for (auto &node : mRootNodes) {//loop over child nodes of the root node only
if ( !node.bbox.hasOverlap(bbox) ) {
continue;//ignore non-overlapping child nodes
} else if ( node.bbox.isInside(bbox) ) {// bbox is completely inside the child node
this->activeTiles(node.child, bbox, tiles);
return tiles;
} else {// partial overlap between tile and child node
this->activeTiles(node.child, bbox, tiles);
}
}
return tiles;
}
template<typename TreeT>
template<typename NodeT>
typename NodeT::NodeMaskType FindActiveValues<TreeT>::getBBoxMask(const CoordBBox &bbox, const NodeT* node) const
{
typename NodeT::NodeMaskType mask;// typically 32^3 or 16^3 bit mask
auto b = node->getNodeBoundingBox();
assert( bbox.hasOverlap(b) );
if ( bbox.isInside(b) ) {
mask.setOn();//node is completely inside the bbox so early out
} else {
b.intersect(bbox);// trim bounding box
// transform bounding box from global to local coordinates
b.min() &= NodeT::DIM-1u;
b.min() >>= NodeT::ChildNodeType::TOTAL;
b.max() &= NodeT::DIM-1u;
b.max() >>= NodeT::ChildNodeType::TOTAL;
assert( b.hasVolume() );
auto it = b.begin();// iterates over all the child nodes or tiles that intersects bbox
for (const Coord& ijk = *it; it; ++it) {
mask.setOn(ijk[2] + (ijk[1] << NodeT::LOG2DIM) + (ijk[0] << 2*NodeT::LOG2DIM));
}
}
return mask;
}
template<typename TreeT>
template<typename NodeT>
bool FindActiveValues<TreeT>::anyActiveValues(const NodeT* node, const CoordBBox &bbox) const
{
// Generate a bit mask of the bbox coverage
auto mask = this->getBBoxMask(bbox, node);
// Check active tiles
const auto tmp = mask & node->getValueMask();// prune active the tile mask with the bbox mask
if (!tmp.isOff()) return true;
// Check child nodes
mask &= node->getChildMask();// prune the child mask with the bbox mask
const auto* table = node->getTable();
bool active = false;
for (auto i = mask.beginOn(); !active && i; ++i) {
active = this->anyActiveValues(table[i.pos()].getChild(), bbox);
}
return active;
}
template<typename TreeT>
template<typename NodeT>
bool FindActiveValues<TreeT>::anyActiveVoxels(const NodeT* node, const CoordBBox &bbox) const
{
// Generate a bit mask of the bbox coverage
auto mask = this->getBBoxMask(bbox, node);
// Check child nodes
mask &= node->getChildMask();// prune the child mask with the bbox mask
const auto* table = node->getTable();
bool active = false;
for (auto i = mask.beginOn(); !active && i; ++i) {
active = this->anyActiveVoxels(table[i.pos()].getChild(), bbox);
}
return active;
}
template<typename TreeT>
inline bool FindActiveValues<TreeT>::anyActiveVoxels(const typename TreeT::LeafNodeType* leaf, const CoordBBox &bbox ) const
{
const auto &mask = leaf->getValueMask();
// check for two common cases that leads to early-out
if (bbox.isInside(leaf->getNodeBoundingBox())) return !mask.isOff();// leaf in inside the bbox
if (mask.isOn()) return true;// all values are active
bool active = false;
for (auto i = leaf->cbeginValueOn(); !active && i; ++i) {
active = bbox.isInside(i.getCoord());
}
return active;
}
template<typename TreeT>
template<typename NodeT>
bool FindActiveValues<TreeT>::anyActiveTiles(const NodeT* node, const CoordBBox &bbox) const
{
// Generate a bit mask of the bbox coverage
auto mask = this->getBBoxMask(bbox, node);
// Check active tiles
const auto tmp = mask & node->getValueMask();// prune active the tile mask with the bbox mask
if (!tmp.isOff()) return true;
bool active = false;
if (NodeT::LEVEL>1) {// Only check child nodes if they are NOT leaf nodes
mask &= node->getChildMask();// prune the child mask with the bbox mask
const auto* table = node->getTable();
for (auto i = mask.beginOn(); !active && i; ++i) {
active = this->anyActiveTiles(table[i.pos()].getChild(), bbox);
}
}
return active;
}
template<typename TreeT>
inline Index64 FindActiveValues<TreeT>::count(const typename TreeT::LeafNodeType* leaf, const CoordBBox &bbox ) const
{
Index64 count = 0;
if (leaf->getValueMask().isOn()) {
auto b = leaf->getNodeBoundingBox();
b.intersect(bbox);
count = b.volume();
} else {
for (auto i = leaf->cbeginValueOn(); i; ++i) {
if (bbox.isInside(i.getCoord())) ++count;
}
}
return count;
}
template<typename TreeT>
template<typename NodeT>
Index64 FindActiveValues<TreeT>::count(const NodeT* node, const CoordBBox &bbox) const
{
Index64 count = 0;
// Generate a bit masks
auto mask = this->getBBoxMask(bbox, node);
const auto childMask = mask & node->getChildMask();// prune the child mask with the bbox mask
mask &= node->getValueMask();// prune active tile mask with the bbox mask
const auto* table = node->getTable();
{// Check child nodes
using ChildT = typename NodeT::ChildNodeType;
using RangeT = tbb::blocked_range<typename std::vector<const ChildT*>::iterator>;
std::vector<const ChildT*> childNodes(childMask.countOn());
int j=0;
for (auto i = childMask.beginOn(); i; ++i, ++j) childNodes[j] = table[i.pos()].getChild();
count += tbb::parallel_reduce( RangeT(childNodes.begin(), childNodes.end()), 0,
[&](const RangeT& r, Index64 sum)->Index64 {
for ( auto i = r.begin(); i != r.end(); ++i ) sum += this->count(*i, bbox);
return sum;
}, []( Index64 a, Index64 b )->Index64 { return a+b; }
);
}
{// Check active tiles
std::vector<Coord> coords(mask.countOn());
using RangeT = tbb::blocked_range<typename std::vector<Coord>::iterator>;
int j=0;
for (auto i = mask.beginOn(); i; ++i, ++j) coords[j] = node->offsetToGlobalCoord(i.pos());
count += tbb::parallel_reduce( RangeT(coords.begin(), coords.end()), 0,
[&bbox](const RangeT& r, Index64 sum)->Index64 {
for ( auto i = r.begin(); i != r.end(); ++i ) {
auto b = CoordBBox::createCube(*i, NodeT::ChildNodeType::DIM);
b.intersect(bbox);
sum += b.volume();
}
return sum;
}, []( Index64 a, Index64 b )->Index64 { return a+b; }
);
}
return count;
}
// process internal node
template<typename TreeT>
template<typename NodeT>
void FindActiveValues<TreeT>::activeTiles(const NodeT* node, const CoordBBox &bbox, std::vector<TileDataT> &tiles) const
{
// Generate a bit masks
auto mask = this->getBBoxMask(bbox, node);
const auto childMask = mask & node->getChildMask();// prune the child mask with the bbox mask
mask &= node->getValueMask();// prune active tile mask with the bbox mask
if (NodeT::LEVEL > 1) {// Only check child nodes if they are NOT leaf nodes
const auto* table = node->getTable();
for (auto i = childMask.beginOn(); i; ++i) this->activeTiles(table[i.pos()].getChild(), bbox, tiles);
}
const size_t tileCount = mask.countOn();
if (tileCount < 8) {// Serial processing of active tiles
for (auto iter = mask.beginOn(); iter; ++iter) {
tiles.emplace_back(*node, iter.pos());
tiles.back().bbox.intersect(bbox);
}
} else {// Parallel processing of active tiles
std::vector<TileDataT> tmp( tileCount );// for temporary thread-safe processing
int n = 0;
for (auto iter = mask.beginOn(); iter; ++iter) tmp[n++].level = iter.pos();// placeholder to support multi-threading
tbb::parallel_for(tbb::blocked_range<size_t>(0, tileCount, 8), [&](const tbb::blocked_range<size_t>& r) {
for ( size_t i = r.begin(); i != r.end(); ++i ) {
tmp[i] = TileDataT(*node, tmp[i].level);
tmp[i].bbox.intersect(bbox);
}
});
tiles.insert(tiles.end(), tmp.begin(), tmp.end());
}
}
template<typename TreeT>
struct FindActiveValues<TreeT>::RootChild
{
const CoordBBox bbox;
const RootChildType* child;
RootChild(const Coord& ijk = Coord(), const RootChildType* ptr = nullptr)
: bbox(CoordBBox::createCube(ijk, RootChildType::DIM)), child(ptr)
{
}
};// RootChild struct
//////////////////////////////////////////////////////////////////////////////////////////
template<typename ValueType>
struct TileData
{
CoordBBox bbox;
ValueType value;
Index level;
bool state;
/// @brief Default constructor
TileData() = default;
/// @brief Member data constructor
TileData(const CoordBBox &b, const ValueType &v, Index l, bool active = true)
: bbox(b), value(v), level(l), state(active) {}
/// @brief Constructor from a parent node and the linear offset to one of its tiles
///
/// @warning This is an expert-only method since it assumes the linear offset to be valid,
/// i.e. within the rand of the dimention of the parent node and NOT corresponding
/// to a child node.
template <typename ParentNodeT>
TileData(const ParentNodeT &parent, Index childIdx)
: bbox(CoordBBox::createCube(parent.offsetToGlobalCoord(childIdx), parent.getChildDim()))
, level(parent.getLevel())
, state(true)
{
assert(childIdx < ParentNodeT::NUM_VALUES);
assert(parent.isChildMaskOff(childIdx));
assert(parent.isValueMaskOn(childIdx));
value = parent.getTable()[childIdx].getValue();
}
/// @brief Constructor form a parent node, the coordinate of the origin of one of its tiles,
/// and said tiles value.
template <typename ParentNodeT>
TileData(const ParentNodeT &parent, const Coord &ijk, const ValueType &v)
: bbox(CoordBBox::createCube(ijk, parent.getChildDim()))
, value(v)
, level(parent.getLevel())
, state(true)
{
}
};// TileData struct
//////////////////////////////////////////////////////////////////////////////////////////
// Implementation of stand-alone function
template<typename TreeT>
inline bool
anyActiveValues(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.anyActiveValues(bbox);
}
// Implementation of stand-alone function
template<typename TreeT>
inline bool
anyActiveVoxels(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.anyActiveVoxels(bbox);
}
// Implementation of stand-alone function
template<typename TreeT>
inline bool
anyActiveTiles(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.anyActiveTiles(bbox);
}
// Implementation of stand-alone function
template<typename TreeT>
inline bool
noActiveValues(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.noActiveValues(bbox);
}
// Implementation of stand-alone function
template<typename TreeT>
inline Index64
countActiveValues(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.count(bbox);
}
// Implementation of stand-alone function
template<typename TreeT>
inline std::vector<TileData<typename TreeT::ValueType>>
activeTiles(const TreeT& tree, const CoordBBox &bbox)
{
FindActiveValues<TreeT> op(tree);
return op.activeTiles(bbox);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_FINDACTIVEVALUES_HAS_BEEN_INCLUDED
| 27,283 | C | 37.755682 | 149 | 0.654877 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Prune.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Prune.h
///
/// @brief Defined various multi-threaded utility functions for trees
///
/// @author Ken Museth
#ifndef OPENVDB_TOOLS_PRUNE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_PRUNE_HAS_BEEN_INCLUDED
#include <openvdb/math/Math.h> // for isNegative and negative
#include <openvdb/Types.h>
#include <openvdb/tree/NodeManager.h>
#include <algorithm> // for std::nth_element()
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Reduce the memory footprint of a @a tree by replacing with tiles
/// any nodes whose values are all the same (optionally to within a tolerance)
/// and have the same active state.
///
/// @note For trees with non-boolean values a child node with (approximately)
/// constant values are replaced with a tile value corresponding to the median
/// of the values in said child node.
///
/// @param tree the tree to be pruned
/// @param tolerance tolerance within which values are considered to be equal
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
template<typename TreeT>
inline void
prune(TreeT& tree,
typename TreeT::ValueType tolerance = zeroVal<typename TreeT::ValueType>(),
bool threaded = true,
size_t grainSize = 1);
/// @brief Reduce the memory footprint of a @a tree by replacing with tiles
/// any non-leaf nodes whose values are all the same (optionally to within a tolerance)
/// and have the same active state.
///
/// @param tree the tree to be pruned
/// @param tolerance tolerance within which values are considered to be equal
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
template<typename TreeT>
inline void
pruneTiles(TreeT& tree,
typename TreeT::ValueType tolerance = zeroVal<typename TreeT::ValueType>(),
bool threaded = true,
size_t grainSize = 1);
/// @brief Reduce the memory footprint of a @a tree by replacing with
/// background tiles any nodes whose values are all inactive.
///
/// @param tree the tree to be pruned
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
template<typename TreeT>
inline void
pruneInactive(TreeT& tree, bool threaded = true, size_t grainSize = 1);
/// @brief Reduce the memory footprint of a @a tree by replacing any nodes
/// whose values are all inactive with tiles of the given @a value.
///
/// @param tree the tree to be pruned
/// @param value value assigned to inactive tiles created during pruning
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
template<typename TreeT>
inline void
pruneInactiveWithValue(
TreeT& tree,
const typename TreeT::ValueType& value,
bool threaded = true,
size_t grainSize = 1);
/// @brief Reduce the memory footprint of a @a tree by replacing nodes
/// whose values are all inactive with inactive tiles having a value equal to
/// the first value encountered in the (inactive) child.
/// @details This method is faster than tolerance-based prune and
/// useful for narrow-band level set applications where inactive
/// values are limited to either an inside or an outside value.
///
/// @param tree the tree to be pruned
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
///
/// @throw ValueError if the background of the @a tree is negative (as defined by math::isNegative)
template<typename TreeT>
inline void
pruneLevelSet(TreeT& tree,
bool threaded = true,
size_t grainSize = 1);
/// @brief Reduce the memory footprint of a @a tree by replacing nodes whose voxel values
/// are all inactive with inactive tiles having the value -| @a insideWidth |
/// if the voxel values are negative and | @a outsideWidth | otherwise.
///
/// @details This method is faster than tolerance-based prune and
/// useful for narrow-band level set applications where inactive
/// values are limited to either an inside or an outside value.
///
/// @param tree the tree to be pruned
/// @param outsideWidth the width of the outside of the narrow band
/// @param insideWidth the width of the inside of the narrow band
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
///
/// @throw ValueError if @a outsideWidth is negative or @a insideWidth is
/// not negative (as defined by math::isNegative).
template<typename TreeT>
inline void
pruneLevelSet(TreeT& tree,
const typename TreeT::ValueType& outsideWidth,
const typename TreeT::ValueType& insideWidth,
bool threaded = true,
size_t grainSize = 1);
////////////////////////////////////////////////
template<typename TreeT, Index TerminationLevel = 0>
class InactivePruneOp
{
public:
using ValueT = typename TreeT::ValueType;
using RootT = typename TreeT::RootNodeType;
using LeafT = typename TreeT::LeafNodeType;
static_assert(RootT::LEVEL > TerminationLevel, "TerminationLevel out of range");
InactivePruneOp(TreeT& tree) : mValue(tree.background())
{
tree.clearAllAccessors();//clear cache of nodes that could be pruned
}
InactivePruneOp(TreeT& tree, const ValueT& v) : mValue(v)
{
tree.clearAllAccessors();//clear cache of nodes that could be pruned
}
// Nothing to do at the leaf node level
void operator()(LeafT&) const {}
// Prune the child nodes of the internal nodes
template<typename NodeT>
void operator()(NodeT& node) const
{
if (NodeT::LEVEL > TerminationLevel) {
for (typename NodeT::ChildOnIter it=node.beginChildOn(); it; ++it) {
if (it->isInactive()) node.addTile(it.pos(), mValue, false);
}
}
}
// Prune the child nodes of the root node
void operator()(RootT& root) const
{
for (typename RootT::ChildOnIter it = root.beginChildOn(); it; ++it) {
if (it->isInactive()) root.addTile(it.getCoord(), mValue, false);
}
root.eraseBackgroundTiles();
}
private:
const ValueT mValue;
};// InactivePruneOp
template<typename TreeT, Index TerminationLevel = 0>
class TolerancePruneOp
{
public:
using ValueT = typename TreeT::ValueType;
using RootT = typename TreeT::RootNodeType;
using LeafT = typename TreeT::LeafNodeType;
static_assert(RootT::LEVEL > TerminationLevel, "TerminationLevel out of range");
TolerancePruneOp(TreeT& tree, const ValueT& tol) : mTolerance(tol)
{
tree.clearAllAccessors();//clear cache of nodes that could be pruned
}
// Prune the child nodes of the root node
inline void operator()(RootT& root) const
{
ValueT value;
bool state;
for (typename RootT::ChildOnIter it = root.beginChildOn(); it; ++it) {
if (this->isConstant(*it, value, state)) root.addTile(it.getCoord(), value, state);
}
root.eraseBackgroundTiles();
}
// Prune the child nodes of the internal nodes
template<typename NodeT>
inline void operator()(NodeT& node) const
{
if (NodeT::LEVEL > TerminationLevel) {
ValueT value;
bool state;
for (typename NodeT::ChildOnIter it=node.beginChildOn(); it; ++it) {
if (this->isConstant(*it, value, state)) node.addTile(it.pos(), value, state);
}
}
}
// Nothing to do at the leaf node level
inline void operator()(LeafT&) const {}
private:
// Private method specialized for leaf nodes
inline ValueT median(LeafT& leaf) const {return leaf.medianAll(leaf.buffer().data());}
// Private method for internal nodes
template<typename NodeT>
inline typename NodeT::ValueType median(NodeT& node) const
{
using UnionT = typename NodeT::UnionType;
UnionT* data = const_cast<UnionT*>(node.getTable());//never do this at home kids :)
static const size_t midpoint = (NodeT::NUM_VALUES - 1) >> 1;
auto op = [](const UnionT& a, const UnionT& b){return a.getValue() < b.getValue();};
std::nth_element(data, data + midpoint, data + NodeT::NUM_VALUES, op);
return data[midpoint].getValue();
}
// Specialization to nodes templated on booleans values
template<typename NodeT>
inline
typename std::enable_if<std::is_same<bool, typename NodeT::ValueType>::value, bool>::type
isConstant(NodeT& node, bool& value, bool& state) const
{
return node.isConstant(value, state, mTolerance);
}
// Nodes templated on non-boolean values
template<typename NodeT>
inline
typename std::enable_if<!std::is_same<bool, typename NodeT::ValueType>::value, bool>::type
isConstant(NodeT& node, ValueT& value, bool& state) const
{
ValueT tmp;
const bool test = node.isConstant(value, tmp, state, mTolerance);
if (test) value = this->median(node);
return test;
}
const ValueT mTolerance;
};// TolerancePruneOp
template<typename TreeT, Index TerminationLevel = 0>
class LevelSetPruneOp
{
public:
using ValueT = typename TreeT::ValueType;
using RootT = typename TreeT::RootNodeType;
using LeafT = typename TreeT::LeafNodeType;
static_assert(RootT::LEVEL > TerminationLevel, "TerminationLevel out of range");
LevelSetPruneOp(TreeT& tree)
: mOutside(tree.background())
, mInside(math::negative(mOutside))
{
if (math::isNegative(mOutside)) {
OPENVDB_THROW(ValueError,
"LevelSetPruneOp: the background value cannot be negative!");
}
tree.clearAllAccessors();//clear cache of nodes that could be pruned
}
LevelSetPruneOp(TreeT& tree, const ValueT& outside, const ValueT& inside)
: mOutside(outside)
, mInside(inside)
{
if (math::isNegative(mOutside)) {
OPENVDB_THROW(ValueError,
"LevelSetPruneOp: the outside value cannot be negative!");
}
if (!math::isNegative(mInside)) {
OPENVDB_THROW(ValueError,
"LevelSetPruneOp: the inside value must be negative!");
}
tree.clearAllAccessors();//clear cache of nodes that could be pruned
}
// Nothing to do at the leaf node level
void operator()(LeafT&) const {}
// Prune the child nodes of the internal nodes
template<typename NodeT>
void operator()(NodeT& node) const
{
if (NodeT::LEVEL > TerminationLevel) {
for (typename NodeT::ChildOnIter it=node.beginChildOn(); it; ++it) {
if (it->isInactive()) node.addTile(it.pos(), this->getTileValue(it), false);
}
}
}
// Prune the child nodes of the root node
void operator()(RootT& root) const
{
for (typename RootT::ChildOnIter it = root.beginChildOn(); it; ++it) {
if (it->isInactive()) root.addTile(it.getCoord(), this->getTileValue(it), false);
}
root.eraseBackgroundTiles();
}
private:
template <typename IterT>
inline ValueT getTileValue(const IterT& iter) const
{
return math::isNegative(iter->getFirstValue()) ? mInside : mOutside;
}
const ValueT mOutside, mInside;
};// LevelSetPruneOp
template<typename TreeT>
inline void
prune(TreeT& tree, typename TreeT::ValueType tol, bool threaded, size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-2> nodes(tree);
TolerancePruneOp<TreeT> op(tree, tol);
nodes.foreachBottomUp(op, threaded, grainSize);
}
template<typename TreeT>
inline void
pruneTiles(TreeT& tree, typename TreeT::ValueType tol, bool threaded, size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-3> nodes(tree);
TolerancePruneOp<TreeT> op(tree, tol);
nodes.foreachBottomUp(op, threaded, grainSize);
}
template<typename TreeT>
inline void
pruneInactive(TreeT& tree, bool threaded, size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-2> nodes(tree);
InactivePruneOp<TreeT> op(tree);
nodes.foreachBottomUp(op, threaded, grainSize);
}
template<typename TreeT>
inline void
pruneInactiveWithValue(TreeT& tree, const typename TreeT::ValueType& v,
bool threaded, size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-2> nodes(tree);
InactivePruneOp<TreeT> op(tree, v);
nodes.foreachBottomUp(op, threaded, grainSize);
}
template<typename TreeT>
inline void
pruneLevelSet(TreeT& tree,
const typename TreeT::ValueType& outside,
const typename TreeT::ValueType& inside,
bool threaded,
size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-2> nodes(tree);
LevelSetPruneOp<TreeT> op(tree, outside, inside);
nodes.foreachBottomUp(op, threaded, grainSize);
}
template<typename TreeT>
inline void
pruneLevelSet(TreeT& tree, bool threaded, size_t grainSize)
{
tree::NodeManager<TreeT, TreeT::DEPTH-2> nodes(tree);
LevelSetPruneOp<TreeT> op(tree);
nodes.foreachBottomUp(op, threaded, grainSize);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_PRUNE_HAS_BEEN_INCLUDED
| 13,831 | C | 33.493766 | 99 | 0.673849 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Morphology.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Morphology.h
///
/// @brief Implementation of morphological dilation and erosion.
///
/// @note By design the morphological operations only change the
/// state of voxels, not their values. If one desires to
/// change the values of voxels that change state an efficient
/// technique is to construct a boolean mask by performing a
/// topology difference between the original and final grids.
///
/// @todo Extend erosion with 18 and 26 neighbors (coming soon!)
///
/// @author Ken Museth
///
#ifndef OPENVDB_TOOLS_MORPHOLOGY_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_MORPHOLOGY_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/math/Math.h> // for isApproxEqual()
#include <openvdb/tree/TreeIterator.h>
#include <openvdb/tree/ValueAccessor.h>
#include <openvdb/tree/LeafManager.h>
#include "Prune.h"// for pruneLevelSet
#include "ValueTransformer.h" // for foreach()
#include <tbb/tbb_thread.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/enumerable_thread_specific.h>
#include <tbb/parallel_for.h>
#include <functional>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Voxel topology of nearest neighbors
/// @details
/// <dl>
/// <dt><b>NN_FACE</b>
/// <dd>face adjacency (6 nearest neighbors, defined as all neighbor
/// voxels connected along one of the primary axes)
///
/// <dt><b>NN_FACE_EDGE</b>
/// <dd>face and edge adjacency (18 nearest neighbors, defined as all
/// neighbor voxels connected along either one or two of the primary axes)
///
/// <dt><b>NN_FACE_EDGE_VERTEX</b>
/// <dd>face, edge and vertex adjacency (26 nearest neighbors, defined
/// as all neighbor voxels connected along either one, two or all
/// three of the primary axes)
/// </dl>
enum NearestNeighbors { NN_FACE = 6, NN_FACE_EDGE = 18, NN_FACE_EDGE_VERTEX = 26 };
/// @brief Different policies when dilating trees with active tiles
/// @details
/// <dl>
/// <dt><b>IGNORE_TILES</b>
/// <dd>Active tiles are ignores, i.e. only active voxels are dilates.
///
/// <dt><b>EXPAND_TILES</b>
/// <dd>Active tiles are expanded into active voxels and then dilated.
///
/// <dt><b>PRESERVE_TILES</b>
/// <dd>Active tiles remain unchanged but they still contribute to the
/// dilation as if they were active voxels.
/// </dl>
enum TilePolicy { IGNORE_TILES, EXPAND_TILES, PRESERVE_TILES };
/// @brief Topologically dilate all active values (i.e. both voxels
/// and tiles) in a tree using one of three nearest neighbor
/// connectivity patterns.
/// @note This method is fully multi-threaded and support active tiles!
///
/// @param tree tree to be dilated
/// @param iterations number of iterations to apply the dilation
/// @param nn connectivity pattern of the dilation: either
/// face-adjacent (6 nearest neighbors), face- and edge-adjacent
/// (18 nearest neighbors) or face-, edge- and vertex-adjacent (26
/// nearest neighbors).
/// @param mode Defined the policy for handling active tiles
/// (see above for details)
///
/// @note The values of any voxels are unchanged.
template<typename TreeType>
inline void dilateActiveValues(TreeType& tree,
int iterations = 1,
NearestNeighbors nn = NN_FACE,
TilePolicy mode = PRESERVE_TILES);
/// @brief Topologically dilate all active values (i.e. both voxels
/// and tiles) in a tree using one of three nearest neighbor
/// connectivity patterns.
///
/// @warning Unlike the method above this one takes a LeafManger,
/// however (unlike dilateVoxels method below) it offers no performance
/// advantage over the one that takes a tree. Its merely included for
/// API compatability. The leaf nodes in the manger are updated
/// after the dilation, which incurres a (very small) overhead.
///
/// @note This method is fully multi-threaded and support active tiles!
///
/// @param manager Leaf node manager for the tree to be dilated.
/// On exit it is updated to include all the leaf
/// nodes of the dilated tree.
/// @param iterations number of iterations to apply the dilation
/// @param nn connectivity pattern of the dilation: either
/// face-adjacent (6 nearest neighbors), face- and edge-adjacent
/// (18 nearest neighbors) or face-, edge- and vertex-adjacent (26
/// nearest neighbors).
/// @param mode Defined the policy for handling active tiles
/// (see above for details)
///
/// @note The values of any voxels are unchanged.
template<typename TreeType>
inline void dilateActiveValues(tree::LeafManager<TreeType>& manager,
int iterations = 1,
NearestNeighbors nn = NN_FACE,
TilePolicy mode = PRESERVE_TILES);
/// @brief Topologically dilate all leaf-level active voxels in a tree
/// using one of three nearest neighbor connectivity patterns.
/// @warning This method is NOT multi-threaded and ignores active tiles!
///
/// @param tree tree to be dilated
/// @param iterations number of iterations to apply the dilation
/// @param nn connectivity pattern of the dilation: either
/// face-adjacent (6 nearest neighbors), face- and edge-adjacent
/// (18 nearest neighbors) or face-, edge- and vertex-adjacent (26
/// nearest neighbors).
///
/// @note The values of any voxels are unchanged.
template<typename TreeType>
inline void dilateVoxels(TreeType& tree,
int iterations = 1,
NearestNeighbors nn = NN_FACE);
/// @brief Topologically dilate all leaf-level active voxels in a tree
/// using one of three nearest neighbor connectivity patterns.
/// @warning This method is NOT multi-threaded and ignores active tiles!
///
/// @param manager LeafManager containing the tree to be dilated.
/// On exit it is updated to include all the leaf
/// nodes of the dilated tree.
/// @param iterations number of iterations to apply the dilation
/// @param nn connectivity pattern of the dilation: either
/// face-adjacent (6 nearest neighbors), face- and edge-adjacent
/// (18 nearest neighbors) or face-, edge- and vertex-adjacent (26
/// nearest neighbors).
///
/// @note The values of any voxels are unchanged.
template<typename TreeType>
inline void dilateVoxels(tree::LeafManager<TreeType>& manager,
int iterations = 1,
NearestNeighbors nn = NN_FACE);
//@{
/// @brief Topologically erode all leaf-level active voxels in the given tree.
/// @details That is, shrink the set of active voxels by @a iterations voxels
/// in the +x, -x, +y, -y, +z and -z directions, but don't change the values
/// of any voxels, only their active states.
/// @todo Currently operates only on leaf voxels; need to extend to tiles.
template<typename TreeType>
inline void erodeVoxels(TreeType& tree,
int iterations=1,
NearestNeighbors nn = NN_FACE);
template<typename TreeType>
inline void erodeVoxels(tree::LeafManager<TreeType>& manager,
int iterations = 1,
NearestNeighbors nn = NN_FACE);
//@}
/// @brief Mark as active any inactive tiles or voxels in the given grid or tree
/// whose values are equal to @a value (optionally to within the given @a tolerance).
template<typename GridOrTree>
inline void activate(
GridOrTree&,
const typename GridOrTree::ValueType& value,
const typename GridOrTree::ValueType& tolerance = zeroVal<typename GridOrTree::ValueType>()
);
/// @brief Mark as inactive any active tiles or voxels in the given grid or tree
/// whose values are equal to @a value (optionally to within the given @a tolerance).
template<typename GridOrTree>
inline void deactivate(
GridOrTree&,
const typename GridOrTree::ValueType& value,
const typename GridOrTree::ValueType& tolerance = zeroVal<typename GridOrTree::ValueType>()
);
////////////////////////////////////////
/// Mapping from a Log2Dim to a data type of size 2^Log2Dim bits
template<Index Log2Dim> struct DimToWord {};
template<> struct DimToWord<3> { using Type = uint8_t; };
template<> struct DimToWord<4> { using Type = uint16_t; };
template<> struct DimToWord<5> { using Type = uint32_t; };
template<> struct DimToWord<6> { using Type = uint64_t; };
////////////////////////////////////////
template<typename TreeType>
class Morphology
{
public:
using ManagerType = tree::LeafManager<TreeType>;
Morphology(TreeType& tree):
mOwnsManager(true), mManager(new ManagerType(tree)), mAcc(tree), mSteps(1) {}
Morphology(ManagerType* mgr):
mOwnsManager(false), mManager(mgr), mAcc(mgr->tree()), mSteps(1) {}
virtual ~Morphology() { if (mOwnsManager) delete mManager; }
/// @brief Face-adjacent dilation pattern
void dilateVoxels6();
/// @brief Face- and edge-adjacent dilation pattern.
void dilateVoxels18();
/// @brief Face-, edge- and vertex-adjacent dilation pattern.
void dilateVoxels26();
void dilateVoxels(int iterations = 1, NearestNeighbors nn = NN_FACE);
/// @brief Face-adjacent erosion pattern.
void erodeVoxels6() { mSteps = 1; this->doErosion(NN_FACE); }
/// @brief Face- and edge-adjacent erosion pattern.
void erodeVoxels18() { mSteps = 1; this->doErosion(NN_FACE_EDGE); }
/// @brief Face-, edge- and vertex-adjacent erosion pattern.
void erodeVoxels26() { mSteps = 1; this->doErosion(NN_FACE_EDGE_VERTEX); }
void erodeVoxels(int iterations = 1, NearestNeighbors nn = NN_FACE)
{
mSteps = iterations;
this->doErosion(nn);
}
protected:
void doErosion(NearestNeighbors nn);
using LeafType = typename TreeType::LeafNodeType;
using MaskType = typename LeafType::NodeMaskType;
using AccessorType = tree::ValueAccessor<TreeType>;
const bool mOwnsManager;
ManagerType* mManager;
AccessorType mAcc;
int mSteps;
static const int LEAF_DIM = LeafType::DIM;
static const int LEAF_LOG2DIM = LeafType::LOG2DIM;
using Word = typename DimToWord<LEAF_LOG2DIM>::Type;
struct Neighbor {
LeafType* leaf;//null if a tile
bool init;//true if initialization is required
bool isOn;//true if an active tile
Neighbor() : leaf(nullptr), init(true) {}
inline void clear() { leaf = nullptr; init = true; }
template<int DX, int DY, int DZ>
void scatter(AccessorType& acc, const Coord &xyz, int indx, Word mask)
{
if (init) {
init = false;
Coord orig = xyz.offsetBy(DX*LEAF_DIM, DY*LEAF_DIM, DZ*LEAF_DIM);
leaf = acc.probeLeaf(orig);
if ((leaf == nullptr) && !acc.isValueOn(orig)) leaf = acc.touchLeaf(orig);
}
static const int N = (LEAF_DIM - 1)*(DY + DX*LEAF_DIM);
if (leaf) leaf->getValueMask().template getWord<Word>(indx-N) |= mask;
}
template<int DX, int DY, int DZ>
Word gather(AccessorType& acc, const Coord &xyz, int indx)
{
if (init) {
init = false;
Coord orig = xyz.offsetBy(DX*LEAF_DIM, DY*LEAF_DIM, DZ*LEAF_DIM);
leaf = acc.probeLeaf(orig);
isOn = leaf ? false : acc.isValueOn(orig);
}
static const int N = (LEAF_DIM -1 )*(DY + DX*LEAF_DIM);
return leaf ? leaf->getValueMask().template getWord<Word>(indx-N)
: isOn ? ~Word(0) : Word(0);
}
};// Neighbor
struct LeafCache
{
LeafCache(size_t n, TreeType& tree) : size(n), leafs(new LeafType*[n]), acc(tree)
{
onTile.setValuesOn();
this->clear();
}
~LeafCache() { delete [] leafs; }
LeafType*& operator[](int offset) { return leafs[offset]; }
inline void clear() { for (size_t i = 0; i < size; ++i) leafs[i] = nullptr; }
inline void setOrigin(const Coord& xyz) { origin = &xyz; }
inline void scatter(int n, int indx)
{
assert(leafs[n]);
leafs[n]->getValueMask().template getWord<Word>(indx) |= mask;
}
template<int DX, int DY, int DZ>
inline void scatter(int n, int indx)
{
if (!leafs[n]) {
const Coord xyz = origin->offsetBy(DX*LEAF_DIM, DY*LEAF_DIM, DZ*LEAF_DIM);
leafs[n] = acc.probeLeaf(xyz);
if (!leafs[n]) leafs[n] = acc.isValueOn(xyz) ? &onTile : acc.touchLeaf(xyz);
}
this->scatter(n, indx - (LEAF_DIM - 1)*(DY + DX*LEAF_DIM));
}
inline Word gather(int n, int indx)
{
assert(leafs[n]);
return leafs[n]->getValueMask().template getWord<Word>(indx);
}
template<int DX, int DY, int DZ>
inline Word gather(int n, int indx)
{
if (!leafs[n]) {
const Coord xyz = origin->offsetBy(DX*LEAF_DIM, DY*LEAF_DIM, DZ*LEAF_DIM);
leafs[n] = acc.probeLeaf(xyz);
if (!leafs[n]) leafs[n] = acc.isValueOn(xyz) ? &onTile : &offTile;
}
return this->gather(n, indx - (LEAF_DIM -1 )*(DY + DX*LEAF_DIM));
}
// Scatters in the xy face-directions relative to leaf i1
void scatterFacesXY(int x, int y, int i1, int n, int i2);
// Scatters in the xy edge-directions relative to leaf i1
void scatterEdgesXY(int x, int y, int i1, int n, int i2);
Word gatherFacesXY(int x, int y, int i1, int n, int i2);
Word gatherEdgesXY(int x, int y, int i1, int n, int i2);
const Coord* origin;
size_t size;
LeafType** leafs;
LeafType onTile, offTile;
AccessorType acc;
Word mask;
};// LeafCache
struct ErodeVoxelsOp {
using RangeT = tbb::blocked_range<size_t>;
ErodeVoxelsOp(std::vector<MaskType>& masks, ManagerType& manager)
: mTask(nullptr), mSavedMasks(masks) , mManager(manager) {}
void runParallel(NearestNeighbors nn);
void operator()(const RangeT& r) const {mTask(const_cast<ErodeVoxelsOp*>(this), r);}
void erode6( const RangeT&) const;
void erode18(const RangeT&) const;
void erode26(const RangeT&) const;
private:
using FuncT = typename std::function<void (ErodeVoxelsOp*, const RangeT&)>;
FuncT mTask;
std::vector<MaskType>& mSavedMasks;
ManagerType& mManager;
};// ErodeVoxelsOp
struct MaskManager {
MaskManager(std::vector<MaskType>& masks, ManagerType& manager)
: mMasks(masks) , mManager(manager), mSaveMasks(true) {}
void save() { mSaveMasks = true; tbb::parallel_for(mManager.getRange(), *this); }
void update() { mSaveMasks = false; tbb::parallel_for(mManager.getRange(), *this); }
void operator()(const tbb::blocked_range<size_t>& range) const
{
if (mSaveMasks) {
for (size_t i = range.begin(); i < range.end(); ++i) {
mMasks[i] = mManager.leaf(i).getValueMask();
}
} else {
for (size_t i = range.begin(); i < range.end(); ++i) {
mManager.leaf(i).setValueMask(mMasks[i]);
}
}
}
private:
std::vector<MaskType>& mMasks;
ManagerType& mManager;
bool mSaveMasks;
};// MaskManager
struct UpdateMasks {
UpdateMasks(const std::vector<MaskType>& masks, ManagerType& manager)
: mMasks(masks), mManager(manager) {}
void update() { tbb::parallel_for(mManager.getRange(), *this); }
void operator()(const tbb::blocked_range<size_t>& r) const {
for (size_t i=r.begin(); i<r.end(); ++i) mManager.leaf(i).setValueMask(mMasks[i]);
}
const std::vector<MaskType>& mMasks;
ManagerType& mManager;
};
struct CopyMasks {
CopyMasks(std::vector<MaskType>& masks, const ManagerType& manager)
: mMasks(masks), mManager(manager) {}
void copy() { tbb::parallel_for(mManager.getRange(), *this); }
void operator()(const tbb::blocked_range<size_t>& r) const {
for (size_t i=r.begin(); i<r.end(); ++i) mMasks[i]=mManager.leaf(i).getValueMask();
}
std::vector<MaskType>& mMasks;
const ManagerType& mManager;
};
void copyMasks(std::vector<MaskType>& a, const ManagerType& b) {CopyMasks c(a, b); c.copy();}
};// Morphology
template<typename TreeType>
inline void
Morphology<TreeType>::dilateVoxels(int iterations, NearestNeighbors nn)
{
for (int i=0; i<iterations; ++i) {
switch (nn) {
case NN_FACE_EDGE:
this->dilateVoxels18();
break;
case NN_FACE_EDGE_VERTEX:
this->dilateVoxels26();
break;
case NN_FACE:
default:
this->dilateVoxels6();
}
}
}
template<typename TreeType>
inline void
Morphology<TreeType>::dilateVoxels6()
{
/// @todo Currently operates only on leaf voxels; need to extend to tiles.
const int leafCount = static_cast<int>(mManager->leafCount());
// Save the value masks of all leaf nodes.
std::vector<MaskType> savedMasks(leafCount);
this->copyMasks(savedMasks, *mManager);
LeafCache cache(7, mManager->tree());
for (int leafIdx = 0; leafIdx < leafCount; ++leafIdx) {
const MaskType& oldMask = savedMasks[leafIdx];//original bit-mask of current leaf node
cache[0] = &mManager->leaf(leafIdx);
cache.setOrigin(cache[0]->origin());
for (int x = 0; x < LEAF_DIM; ++x ) {
for (int y = 0, n = (x << LEAF_LOG2DIM); y < LEAF_DIM; ++y, ++n) {
// Extract the portion of the original mask that corresponds to a row in z.
if (const Word w = oldMask.template getWord<Word>(n)) {
// Dilate the current leaf in the +z and -z direction
cache.mask = Word(w | (w>>1) | (w<<1)); cache.scatter(0, n);
// Dilate into neighbor leaf in the -z direction
if ( (cache.mask = Word(w<<(LEAF_DIM-1))) ) {
cache.template scatter< 0, 0,-1>(1, n);
}
// Dilate into neighbor leaf in the +z direction
if ( (cache.mask = Word(w>>(LEAF_DIM-1))) ) {
cache.template scatter< 0, 0, 1>(2, n);
}
// Dilate in the xy-face directions relative to the center leaf
cache.mask = w; cache.scatterFacesXY(x, y, 0, n, 3);
}
}// loop over y
}//loop over x
cache.clear();
}//loop over leafs
mManager->rebuildLeafArray();
}//dilateVoxels6
template<typename TreeType>
inline void
Morphology<TreeType>::dilateVoxels18()
{
/// @todo Currently operates only on leaf voxels; need to extend to tiles.
const int leafCount = static_cast<int>(mManager->leafCount());
// Save the value masks of all leaf nodes.
std::vector<MaskType> savedMasks(leafCount);
this->copyMasks(savedMasks, *mManager);
LeafCache cache(19, mManager->tree());
Coord orig_mz, orig_pz;//origins of neighbor leaf nodes in the -z and +z directions
for (int leafIdx = 0; leafIdx < leafCount; ++leafIdx) {
const MaskType& oldMask = savedMasks[leafIdx];//original bit-mask of current leaf node
cache[0] = &mManager->leaf(leafIdx);
orig_mz = cache[0]->origin().offsetBy(0, 0, -LEAF_DIM);
orig_pz = cache[0]->origin().offsetBy(0, 0, LEAF_DIM);
for (int x = 0; x < LEAF_DIM; ++x ) {
for (int y = 0, n = (x << LEAF_LOG2DIM); y < LEAF_DIM; ++y, ++n) {
if (const Word w = oldMask.template getWord<Word>(n)) {
{
cache.mask = Word(w | (w>>1) | (w<<1));
cache.setOrigin(cache[0]->origin());
cache.scatter(0, n);
cache.scatterFacesXY(x, y, 0, n, 3);
cache.mask = w;
cache.scatterEdgesXY(x, y, 0, n, 3);
}
if ( (cache.mask = Word(w<<(LEAF_DIM-1))) ) {
cache.setOrigin(cache[0]->origin());
cache.template scatter< 0, 0,-1>(1, n);
cache.setOrigin(orig_mz);
cache.scatterFacesXY(x, y, 1, n, 11);
}
if ( (cache.mask = Word(w>>(LEAF_DIM-1))) ) {
cache.setOrigin(cache[0]->origin());
cache.template scatter< 0, 0, 1>(2, n);
cache.setOrigin(orig_pz);
cache.scatterFacesXY(x, y, 2, n, 15);
}
}
}// loop over y
}//loop over x
cache.clear();
}//loop over leafs
mManager->rebuildLeafArray();
}// dilateVoxels18
template<typename TreeType>
inline void
Morphology<TreeType>::dilateVoxels26()
{
const int leafCount = static_cast<int>(mManager->leafCount());
// Save the value masks of all leaf nodes.
std::vector<MaskType> savedMasks(leafCount);
this->copyMasks(savedMasks, *mManager);
LeafCache cache(27, mManager->tree());
Coord orig_mz, orig_pz;//origins of neighbor leaf nodes in the -z and +z directions
for (int leafIdx = 0; leafIdx < leafCount; ++leafIdx) {
const MaskType& oldMask = savedMasks[leafIdx];//original bit-mask of current leaf node
cache[0] = &mManager->leaf(leafIdx);
orig_mz = cache[0]->origin().offsetBy(0, 0, -LEAF_DIM);
orig_pz = cache[0]->origin().offsetBy(0, 0, LEAF_DIM);
for (int x = 0; x < LEAF_DIM; ++x ) {
for (int y = 0, n = (x << LEAF_LOG2DIM); y < LEAF_DIM; ++y, ++n) {
if (const Word w = oldMask.template getWord<Word>(n)) {
{
cache.mask = Word(w | (w>>1) | (w<<1));
cache.setOrigin(cache[0]->origin());
cache.scatter(0, n);
cache.scatterFacesXY(x, y, 0, n, 3);
cache.scatterEdgesXY(x, y, 0, n, 3);
}
if ( (cache.mask = Word(w<<(LEAF_DIM-1))) ) {
cache.setOrigin(cache[0]->origin());
cache.template scatter< 0, 0,-1>(1, n);
cache.setOrigin(orig_mz);
cache.scatterFacesXY(x, y, 1, n, 11);
cache.scatterEdgesXY(x, y, 1, n, 11);
}
if ( (cache.mask = Word(w>>(LEAF_DIM-1))) ) {
cache.setOrigin(cache[0]->origin());
cache.template scatter< 0, 0, 1>(2, n);
cache.setOrigin(orig_pz);
cache.scatterFacesXY(x, y, 2, n, 19);
cache.scatterEdgesXY(x, y, 2, n, 19);
}
}
}// loop over y
}//loop over x
cache.clear();
}//loop over leafs
mManager->rebuildLeafArray();
}// dilateVoxels26
template<typename TreeType>
inline void
Morphology<TreeType>::LeafCache::scatterFacesXY(int x, int y, int i1, int n, int i2)
{
// dilate current leaf or neighbor in the -x direction
if (x > 0) {
this->scatter(i1, n-LEAF_DIM);
} else {
this->template scatter<-1, 0, 0>(i2, n);
}
// dilate current leaf or neighbor in the +x direction
if (x < LEAF_DIM-1) {
this->scatter(i1, n+LEAF_DIM);
} else {
this->template scatter< 1, 0, 0>(i2+1, n);
}
// dilate current leaf or neighbor in the -y direction
if (y > 0) {
this->scatter(i1, n-1);
} else {
this->template scatter< 0,-1, 0>(i2+2, n);
}
// dilate current leaf or neighbor in the +y direction
if (y < LEAF_DIM-1) {
this->scatter(i1, n+1);
} else {
this->template scatter< 0, 1, 0>(i2+3, n);
}
}
template<typename TreeType>
inline void
Morphology<TreeType>::LeafCache::scatterEdgesXY(int x, int y, int i1, int n, int i2)
{
if (x > 0) {
if (y > 0) {
this->scatter(i1, n-LEAF_DIM-1);
} else {
this->template scatter< 0,-1, 0>(i2+2, n-LEAF_DIM);
}
if (y < LEAF_DIM-1) {
this->scatter(i1, n-LEAF_DIM+1);
} else {
this->template scatter< 0, 1, 0>(i2+3, n-LEAF_DIM);
}
} else {
if (y < LEAF_DIM-1) {
this->template scatter<-1, 0, 0>(i2 , n+1);
} else {
this->template scatter<-1, 1, 0>(i2+7, n );
}
if (y > 0) {
this->template scatter<-1, 0, 0>(i2 , n-1);
} else {
this->template scatter<-1,-1, 0>(i2+4, n );
}
}
if (x < LEAF_DIM-1) {
if (y > 0) {
this->scatter(i1, n+LEAF_DIM-1);
} else {
this->template scatter< 0,-1, 0>(i2+2, n+LEAF_DIM);
}
if (y < LEAF_DIM-1) {
this->scatter(i1, n+LEAF_DIM+1);
} else {
this->template scatter< 0, 1, 0>(i2+3, n+LEAF_DIM);
}
} else {
if (y > 0) {
this->template scatter< 1, 0, 0>(i2+1, n-1);
} else {
this->template scatter< 1,-1, 0>(i2+6, n );
}
if (y < LEAF_DIM-1) {
this->template scatter< 1, 0, 0>(i2+1, n+1);
} else {
this->template scatter< 1, 1, 0>(i2+5, n );
}
}
}
template<typename TreeType>
inline void
Morphology<TreeType>::ErodeVoxelsOp::runParallel(NearestNeighbors nn)
{
namespace ph = std::placeholders;
switch (nn) {
case NN_FACE_EDGE:
mTask = std::bind(&ErodeVoxelsOp::erode18, ph::_1, ph::_2);
break;
case NN_FACE_EDGE_VERTEX:
mTask = std::bind(&ErodeVoxelsOp::erode26, ph::_1, ph::_2);
break;
case NN_FACE:
default:
mTask = std::bind(&ErodeVoxelsOp::erode6, ph::_1, ph::_2);
}
tbb::parallel_for(mManager.getRange(), *this);
}
template<typename TreeType>
inline typename Morphology<TreeType>::Word
Morphology<TreeType>::LeafCache::gatherFacesXY(int x, int y, int i1, int n, int i2)
{
// erode current leaf or neighbor in negative x-direction
Word w = x>0 ? this->gather(i1,n-LEAF_DIM) : this->template gather<-1,0,0>(i2, n);
// erode current leaf or neighbor in positive x-direction
w = Word(w & (x<LEAF_DIM-1?this->gather(i1,n+LEAF_DIM):this->template gather<1,0,0>(i2+1,n)));
// erode current leaf or neighbor in negative y-direction
w = Word(w & (y>0 ? this->gather(i1, n-1) : this->template gather<0,-1,0>(i2+2, n)));
// erode current leaf or neighbor in positive y-direction
w = Word(w & (y<LEAF_DIM-1 ? this->gather(i1, n+1) : this->template gather<0,1,0>(i2+3, n)));
return w;
}
template<typename TreeType>
inline typename Morphology<TreeType>::Word
Morphology<TreeType>::LeafCache::gatherEdgesXY(int x, int y, int i1, int n, int i2)
{
Word w = ~Word(0);
if (x > 0) {
w &= y > 0 ? this->gather(i1, n-LEAF_DIM-1) :
this->template gather< 0,-1, 0>(i2+2, n-LEAF_DIM);
w &= y < LEAF_DIM-1 ? this->gather(i1, n-LEAF_DIM+1) :
this->template gather< 0, 1, 0>(i2+3, n-LEAF_DIM);
} else {
w &= y < LEAF_DIM-1 ? this->template gather<-1, 0, 0>(i2 , n+1):
this->template gather<-1, 1, 0>(i2+7, n );
w &= y > 0 ? this->template gather<-1, 0, 0>(i2 , n-1):
this->template gather<-1,-1, 0>(i2+4, n );
}
if (x < LEAF_DIM-1) {
w &= y > 0 ? this->gather(i1, n+LEAF_DIM-1) :
this->template gather< 0,-1, 0>(i2+2, n+LEAF_DIM);
w &= y < LEAF_DIM-1 ? this->gather(i1, n+LEAF_DIM+1) :
this->template gather< 0, 1, 0>(i2+3, n+LEAF_DIM);
} else {
w &= y > 0 ? this->template gather< 1, 0, 0>(i2+1, n-1):
this->template gather< 1,-1, 0>(i2+6, n );
w &= y < LEAF_DIM-1 ? this->template gather< 1, 0, 0>(i2+1, n+1):
this->template gather< 1, 1, 0>(i2+5, n );
}
return w;
}
template <typename TreeType>
inline void
Morphology<TreeType>::ErodeVoxelsOp::erode6(const RangeT& range) const
{
LeafCache cache(7, mManager.tree());
for (size_t leafIdx = range.begin(); leafIdx < range.end(); ++leafIdx) {
cache[0] = &mManager.leaf(leafIdx);
if (cache[0]->isEmpty()) continue;
cache.setOrigin(cache[0]->origin());
MaskType& newMask = mSavedMasks[leafIdx];//original bit-mask of current leaf node
for (int x = 0; x < LEAF_DIM; ++x ) {
for (int y = 0, n = (x << LEAF_LOG2DIM); y < LEAF_DIM; ++y, ++n) {
// Extract the portion of the original mask that corresponds to a row in z.
if (Word& w = newMask.template getWord<Word>(n)) {
// erode in two z directions (this is first since it uses the original w)
w = Word(w &
(Word(w<<1 | (cache.template gather<0,0,-1>(1, n)>>(LEAF_DIM-1))) &
Word(w>>1 | (cache.template gather<0,0, 1>(2, n)<<(LEAF_DIM-1)))));
w = Word(w & cache.gatherFacesXY(x, y, 0, n, 3));
}
}// loop over y
}//loop over x
cache.clear();
}//loop over leafs
}
template <typename TreeType>
inline void
Morphology<TreeType>::ErodeVoxelsOp::erode18(const RangeT&) const
{
OPENVDB_THROW(NotImplementedError, "tools::erode18 is not implemented yet!");
}
template <typename TreeType>
inline void
Morphology<TreeType>::ErodeVoxelsOp::erode26(const RangeT&) const
{
OPENVDB_THROW(NotImplementedError, "tools::erode26 is not implemented yet!");
}
template<typename TreeType>
inline void
Morphology<TreeType>::doErosion(NearestNeighbors nn)
{
/// @todo Currently operates only on leaf voxels; need to extend to tiles.
const size_t leafCount = mManager->leafCount();
// Save the value masks of all leaf nodes.
std::vector<MaskType> savedMasks(leafCount);
this->copyMasks(savedMasks, *mManager);
UpdateMasks a(savedMasks, *mManager);
ErodeVoxelsOp erode(savedMasks, *mManager);
for (int i = 0; i < mSteps; ++i) {
erode.runParallel(nn);
a.update();
}
tools::pruneLevelSet(mManager->tree());
}
////////////////////////////////////////
template<typename TreeType>
inline void
dilateVoxels(tree::LeafManager<TreeType>& manager, int iterations, NearestNeighbors nn)
{
if (iterations > 0 ) {
Morphology<TreeType> m(&manager);
m.dilateVoxels(iterations, nn);
}
}
template<typename TreeType>
inline void
dilateVoxels(TreeType& tree, int iterations, NearestNeighbors nn)
{
if (iterations > 0 ) {
Morphology<TreeType> m(tree);
m.dilateVoxels(iterations, nn);
}
}
template<typename TreeType>
inline void
erodeVoxels(tree::LeafManager<TreeType>& manager, int iterations, NearestNeighbors nn)
{
if (iterations > 0 ) {
Morphology<TreeType> m(&manager);
m.erodeVoxels(iterations, nn);
}
}
template<typename TreeType>
inline void
erodeVoxels(TreeType& tree, int iterations, NearestNeighbors nn)
{
if (iterations > 0 ) {
Morphology<TreeType> m(tree);
m.erodeVoxels(iterations, nn);
}
}
////////////////////////////////////////
namespace activation {
template<typename TreeType>
class ActivationOp
{
public:
using ValueT = typename TreeType::ValueType;
ActivationOp(bool state, const ValueT& val, const ValueT& tol)
: mActivate(state)
, mValue(val)
, mTolerance(tol)
{}
void operator()(const typename TreeType::ValueOnIter& it) const
{
if (math::isApproxEqual(*it, mValue, mTolerance)) {
it.setValueOff();
}
}
void operator()(const typename TreeType::ValueOffIter& it) const
{
if (math::isApproxEqual(*it, mValue, mTolerance)) {
it.setActiveState(/*on=*/true);
}
}
void operator()(const typename TreeType::LeafIter& lit) const
{
using LeafT = typename TreeType::LeafNodeType;
LeafT& leaf = *lit;
if (mActivate) {
for (typename LeafT::ValueOffIter it = leaf.beginValueOff(); it; ++it) {
if (math::isApproxEqual(*it, mValue, mTolerance)) {
leaf.setValueOn(it.pos());
}
}
} else {
for (typename LeafT::ValueOnIter it = leaf.beginValueOn(); it; ++it) {
if (math::isApproxEqual(*it, mValue, mTolerance)) {
leaf.setValueOff(it.pos());
}
}
}
}
private:
bool mActivate;
const ValueT mValue, mTolerance;
}; // class ActivationOp
} // namespace activation
template<typename GridOrTree>
inline void
activate(GridOrTree& gridOrTree, const typename GridOrTree::ValueType& value,
const typename GridOrTree::ValueType& tolerance)
{
using Adapter = TreeAdapter<GridOrTree>;
using TreeType = typename Adapter::TreeType;
TreeType& tree = Adapter::tree(gridOrTree);
activation::ActivationOp<TreeType> op(/*activate=*/true, value, tolerance);
// Process all leaf nodes in parallel.
foreach(tree.beginLeaf(), op);
// Process all other inactive values serially (because changing active states
// is not thread-safe unless no two threads modify the same node).
typename TreeType::ValueOffIter it = tree.beginValueOff();
it.setMaxDepth(tree.treeDepth() - 2);
foreach(it, op, /*threaded=*/false);
}
template<typename GridOrTree>
inline void
deactivate(GridOrTree& gridOrTree, const typename GridOrTree::ValueType& value,
const typename GridOrTree::ValueType& tolerance)
{
using Adapter = TreeAdapter<GridOrTree>;
using TreeType = typename Adapter::TreeType;
TreeType& tree = Adapter::tree(gridOrTree);
activation::ActivationOp<TreeType> op(/*activate=*/false, value, tolerance);
// Process all leaf nodes in parallel.
foreach(tree.beginLeaf(), op);
// Process all other active values serially (because changing active states
// is not thread-safe unless no two threads modify the same node).
typename TreeType::ValueOnIter it = tree.beginValueOn();
it.setMaxDepth(tree.treeDepth() - 2);
foreach(it, op, /*threaded=*/false);
}
/// @brief Class that performs multi-threaded dilation with support for active tiles.
/// @warning Dont use this class directly, instead call the function dilateActiveValues!
template<typename TreeT>
class DilationOp
{
using MaskT = typename TreeT::template ValueConverter<ValueMask>::Type;
using PoolT = tbb::enumerable_thread_specific<MaskT>;
using LeafT = typename MaskT::LeafNodeType;
// Very light-weight member data
const int mIter;// number of iterations
const tools::NearestNeighbors mNN;//enum to specify the dilation scheme
PoolT *mPool;// pointer to the thread-local pool of mask trees
LeafT **mLeafs;// raw array of pointers to leaf nodes
public:
DilationOp(TreeT &tree, int iterations, NearestNeighbors nn, TilePolicy mode)
: mIter(iterations), mNN(nn), mPool(nullptr), mLeafs(nullptr)
{
const size_t numLeafs = this->init( tree, mode );
const size_t numThreads = size_t(tbb::task_scheduler_init::default_num_threads());
const size_t grainSize = math::Max(size_t(1), numLeafs/(2*numThreads));
MaskT mask;
PoolT pool(mask);// Scoped thread-local storage of mask trees
mPool = &pool;
tbb::parallel_for(tbb::blocked_range<LeafT**>(mLeafs, mLeafs+numLeafs, grainSize), *this);
delete [] mLeafs;// no more need for the array of leaf node pointers
using IterT = typename PoolT::iterator;
for (IterT it=pool.begin(); it!=pool.end(); ++it) mask.merge(*it);// fast stealing
if (mode == PRESERVE_TILES) tools::prune(mask);//multithreaded
tree.topologyUnion(mask);//multithreaded
}
// This is required by tbb and should never be called directly
void operator()(const tbb::blocked_range<LeafT**> &r) const
{
MaskT mask;// thread-local temporary mask tree
for (LeafT** it=r.begin(); it!=r.end(); ++it) mask.addLeaf( *it );
tree::LeafManager<MaskT> manager(mask, r.begin(), r.end());
tools::dilateVoxels(manager, mIter, mNN);// serial dilation of active voxels
mPool->local().merge(mask, MERGE_ACTIVE_STATES);
}
private:
// Simple wrapper of a raw double-pointer to mimic a std container
struct MyArray {
using value_type = LeafT*;//required by Tree::stealNodes
value_type* ptr;
MyArray(value_type* array) : ptr(array) {}
void push_back(value_type leaf) { *ptr++ = leaf; }//required by Tree::stealNodes
};
// Convert active tiles to leafs and de-construct the tree into a linear array of leafs.
size_t linearize(MaskT& mask, TilePolicy mode)
{
if (mode != IGNORE_TILES) mask.voxelizeActiveTiles();//lightweight since this is a mask tree
const size_t numLeafs = mask.leafCount();
mLeafs = new LeafT*[numLeafs];// fast pre-allocation
MyArray tmp(mLeafs);
mask.stealNodes(tmp);// serializes the mask tree and leaves it empty
return numLeafs;
}
template<typename T>
typename std::enable_if<std::is_same<T, MaskT>::value, size_t>::type
init(T& tree, TilePolicy mode)
{
return this->linearize(tree, mode);
}
template<typename T>
typename std::enable_if<!std::is_same<T, MaskT>::value, size_t>::type
init(const T& tree, TilePolicy mode)
{
MaskT mask(tree, false, true, TopologyCopy());
return this->linearize(mask, mode);
}
};// DilationOp
template<typename TreeType>
inline void
dilateActiveValues(TreeType& tree, int iterations, NearestNeighbors nn, TilePolicy mode)
{
if (iterations > 0 ) DilationOp<TreeType> tmp(tree, iterations, nn, mode);
}
template<typename TreeType>
inline void
dilateActiveValues(tree::LeafManager<TreeType>& manager,
int iterations,
NearestNeighbors nn,
TilePolicy mode)
{
if (iterations > 0 ) {
DilationOp<TreeType> tmp(manager.tree(), iterations, nn, mode);
manager.rebuildLeafArray();
}
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MORPHOLOGY_HAS_BEEN_INCLUDED
| 39,332 | C | 35.759813 | 100 | 0.590054 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetMeasure.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth
///
/// @file LevelSetMeasure.h
#ifndef OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED
#include <openvdb/math/Math.h>
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/tree/ValueAccessor.h>
#include <openvdb/math/FiniteDifference.h>
#include <openvdb/math/Operators.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/util/NullInterrupter.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_sort.h>
#include <tbb/parallel_invoke.h>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Return the surface area of a narrow-band level set.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed level set surfaces
/// @param useWorldSpace if true the area is computed in
/// world space units, else in voxel units.
///
/// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty.
template<class GridType>
inline Real
levelSetArea(const GridType& grid, bool useWorldSpace = true);
/// @brief Return the volume of a narrow-band level set surface.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed level set surfaces
/// @param useWorldSpace if true the volume is computed in
/// world space units, else in voxel units.
///
/// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty.
template<class GridType>
inline Real
levelSetVolume(const GridType& grid, bool useWorldSpace = true);
/// @brief Return the Euler Characteristics of a narrow-band level set surface (possibly disconnected).
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed level set surfaces
///
/// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty.
template<class GridType>
inline int
levelSetEulerCharacteristic(const GridType& grid);
/// @brief Return the genus of a narrow-band level set surface.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed level set surfaces
/// @warning The genus is only well defined for a single connected surface
///
/// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty.
template<class GridType>
inline int
levelSetGenus(const GridType& grid);
////////////////////////////////////////////////////////////////////////////////////////
/// @brief Smeared-out and continuous Dirac Delta function.
template<typename RealT>
class DiracDelta
{
public:
// eps is the half-width of the dirac delta function in units of phi
DiracDelta(RealT eps) : mC(0.5/eps), mD(2*math::pi<RealT>()*mC), mE(eps) {}
// values of the dirac delta function are in units of one over the units of phi
inline RealT operator()(RealT phi) const { return math::Abs(phi) > mE ? 0 : mC*(1+cos(mD*phi)); }
private:
const RealT mC, mD, mE;
};// DiracDelta functor
/// @brief Multi-threaded computation of surface area, volume and
/// average mean-curvature for narrow band level sets.
///
/// @details To reduce the risk of round-off errors (primarily due to
/// catastrophic cancellation) and guarantee determinism during
/// multi-threading this class is implemented using parallel_for, and
/// delayed reduction of a sorted list.
template<typename GridT, typename InterruptT = util::NullInterrupter>
class LevelSetMeasure
{
public:
using GridType = GridT;
using TreeType = typename GridType::TreeType;
using ValueType = typename TreeType::ValueType;
using ManagerType = typename tree::LeafManager<const TreeType>;
static_assert(std::is_floating_point<ValueType>::value,
"level set measure is supported only for scalar, floating-point grids");
/// @brief Main constructor from a grid
/// @param grid The level set to be measured.
/// @param interrupt Optional interrupter.
/// @throw RuntimeError if the grid is not a level set or if it's empty.
LevelSetMeasure(const GridType& grid, InterruptT* interrupt = nullptr);
/// @brief Re-initialize using the specified grid.
/// @param grid The level set to be measured.
/// @throw RuntimeError if the grid is not a level set or if it's empty.
void init(const GridType& grid);
/// @brief Destructor
virtual ~LevelSetMeasure() {}
/// @return the grain-size used for multi-threading
int getGrainSize() const { return mGrainSize; }
/// @brief Set the grain-size used for multi-threading.
/// @note A grain size of 0 or less disables multi-threading!
void setGrainSize(int grainsize) { mGrainSize = grainsize; }
/// @brief Compute the surface area of the level set.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real area(bool useWorldUnits = true);
/// @brief Compute the volume of the level set surface.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real volume(bool useWorldUnits = true);
/// @brief Compute the total mean curvature of the level set surface.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real totMeanCurvature(bool useWorldUnits = true);
/// @brief Compute the total gaussian curvature of the level set surface.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real totGaussianCurvature(bool useWorldUnits = true);
/// @brief Compute the average mean curvature of the level set surface.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real avgMeanCurvature(bool useWorldUnits = true) {return this->totMeanCurvature(useWorldUnits) / this->area(useWorldUnits);}
/// @brief Compute the average gaussian curvature of the level set surface.
/// @param useWorldUnits Specifies if the result is in world or voxel units.
/// @note Performs internal caching so only the initial call incurs actual computation.
Real avgGaussianCurvature(bool useWorldUnits = true) {return this->totGaussianCurvature(useWorldUnits) / this->area(useWorldUnits); }
/// @brief Compute the Euler characteristic of the level set surface.
/// @note Performs internal caching so only the initial call incurs actual computation.
int eulerCharacteristic();
/// @brief Compute the genus of the level set surface.
/// @warning The genus is only well defined for a single connected surface.
/// @note Performs internal caching so only the initial call incurs actual computation.
int genus() { return 1 - this->eulerCharacteristic()/2;}
private:
using LeafT = typename TreeType::LeafNodeType;
using VoxelCIterT = typename LeafT::ValueOnCIter;
using LeafRange = typename ManagerType::LeafRange;
using LeafIterT = typename LeafRange::Iterator;
using ManagerPtr = std::unique_ptr<ManagerType>;
using BufferPtr = std::unique_ptr<double[]>;
// disallow copy construction and copy by assignment!
LevelSetMeasure(const LevelSetMeasure&);// not implemented
LevelSetMeasure& operator=(const LevelSetMeasure&);// not implemented
const GridType *mGrid;
ManagerPtr mLeafs;
BufferPtr mBuffer;
InterruptT *mInterrupter;
double mDx, mArea, mVolume, mTotMeanCurvature, mTotGausCurvature;
int mGrainSize;
bool mUpdateArea, mUpdateCurvature;
// @brief Return false if the process was interrupted
bool checkInterrupter();
struct MeasureArea
{
MeasureArea(LevelSetMeasure* parent) : mParent(parent), mStencil(*mParent->mGrid)
{
if (parent->mInterrupter) parent->mInterrupter->start("Measuring area and volume of level set");
if (parent->mGrainSize>0) {
tbb::parallel_for(parent->mLeafs->leafRange(parent->mGrainSize), *this);
} else {
(*this)(parent->mLeafs->leafRange());
}
tbb::parallel_invoke([&](){parent->mArea = parent->reduce(0);},
[&](){parent->mVolume = parent->reduce(1)/3.0;});
parent->mUpdateArea = false;
if (parent->mInterrupter) parent->mInterrupter->end();
}
MeasureArea(const MeasureArea& other) : mParent(other.mParent), mStencil(*mParent->mGrid) {}
void operator()(const LeafRange& range) const;
LevelSetMeasure* mParent;
mutable math::GradStencil<GridT, false> mStencil;
};// MeasureArea
struct MeasureCurvatures
{
MeasureCurvatures(LevelSetMeasure* parent) : mParent(parent), mStencil(*mParent->mGrid)
{
if (parent->mInterrupter) parent->mInterrupter->start("Measuring curvatures of level set");
if (parent->mGrainSize>0) {
tbb::parallel_for(parent->mLeafs->leafRange(parent->mGrainSize), *this);
} else {
(*this)(parent->mLeafs->leafRange());
}
tbb::parallel_invoke([&](){parent->mTotMeanCurvature = parent->reduce(0);},
[&](){parent->mTotGausCurvature = parent->reduce(1);});
parent->mUpdateCurvature = false;
if (parent->mInterrupter) parent->mInterrupter->end();
}
MeasureCurvatures(const MeasureCurvatures& other) : mParent(other.mParent), mStencil(*mParent->mGrid) {}
void operator()(const LeafRange& range) const;
LevelSetMeasure* mParent;
mutable math::CurvatureStencil<GridT, false> mStencil;
};// MeasureCurvatures
double reduce(int offset)
{
double *first = mBuffer.get() + offset*mLeafs->leafCount(), *last = first + mLeafs->leafCount();
tbb::parallel_sort(first, last);// mitigates catastrophic cancellation
Real sum = 0.0;
while(first != last) sum += *first++;
return sum;
}
}; // end of LevelSetMeasure class
template<typename GridT, typename InterruptT>
inline
LevelSetMeasure<GridT, InterruptT>::LevelSetMeasure(const GridType& grid, InterruptT* interrupt)
: mInterrupter(interrupt)
, mGrainSize(1)
{
this->init(grid);
}
template<typename GridT, typename InterruptT>
inline void
LevelSetMeasure<GridT, InterruptT>::init(const GridType& grid)
{
if (!grid.hasUniformVoxels()) {
OPENVDB_THROW(RuntimeError,
"The transform must have uniform scale for the LevelSetMeasure to function");
}
if (grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_THROW(RuntimeError,
"LevelSetMeasure only supports level sets;"
" try setting the grid class to \"level set\"");
}
if (grid.empty()) {
OPENVDB_THROW(RuntimeError,
"LevelSetMeasure does not support empty grids;");
}
mGrid = &grid;
mDx = grid.voxelSize()[0];
mLeafs = std::make_unique<ManagerType>(mGrid->tree());
mBuffer = std::make_unique<double[]>(2*mLeafs->leafCount());
mUpdateArea = mUpdateCurvature = true;
}
template<typename GridT, typename InterruptT>
inline Real
LevelSetMeasure<GridT, InterruptT>::area(bool useWorldUnits)
{
if (mUpdateArea) {MeasureArea m(this);};
double area = mArea;
if (useWorldUnits) area *= math::Pow2(mDx);
return area;
}
template<typename GridT, typename InterruptT>
inline Real
LevelSetMeasure<GridT, InterruptT>::volume(bool useWorldUnits)
{
if (mUpdateArea) {MeasureArea m(this);};
double volume = mVolume;
if (useWorldUnits) volume *= math::Pow3(mDx) ;
return volume;
}
template<typename GridT, typename InterruptT>
inline Real
LevelSetMeasure<GridT, InterruptT>::totMeanCurvature(bool useWorldUnits)
{
if (mUpdateCurvature) {MeasureCurvatures m(this);};
return mTotMeanCurvature * (useWorldUnits ? mDx : 1);
}
template<typename GridT, typename InterruptT>
inline Real
LevelSetMeasure<GridT, InterruptT>::totGaussianCurvature(bool)
{
if (mUpdateCurvature) {MeasureCurvatures m(this);};
return mTotGausCurvature;
}
template<typename GridT, typename InterruptT>
inline int
LevelSetMeasure<GridT, InterruptT>::eulerCharacteristic()
{
const Real x = this->totGaussianCurvature(true) / (2.0*math::pi<Real>());
return int(math::Round( x ));
}
///////////////////////// PRIVATE METHODS //////////////////////
template<typename GridT, typename InterruptT>
inline bool
LevelSetMeasure<GridT, InterruptT>::checkInterrupter()
{
if (util::wasInterrupted(mInterrupter)) {
tbb::task::self().cancel_group_execution();
return false;
}
return true;
}
template<typename GridT, typename InterruptT>
inline void
LevelSetMeasure<GridT, InterruptT>::
MeasureArea::operator()(const LeafRange& range) const
{
using Vec3T = math::Vec3<ValueType>;
// computations are performed in index space where dV = 1
mParent->checkInterrupter();
const Real invDx = 1.0/mParent->mDx;
const DiracDelta<Real> DD(1.5);// dirac delta function is 3 voxel units wide
const size_t leafCount = mParent->mLeafs->leafCount();
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
Real sumA = 0, sumV = 0;//reduce risk of catastrophic cancellation
for (VoxelCIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
const Real dd = DD(invDx * (*voxelIter));
if (dd > 0.0) {
mStencil.moveTo(voxelIter);
const Coord& p = mStencil.getCenterCoord();// in voxel units
const Vec3T g = mStencil.gradient();// in world units
sumA += dd*g.length();// \delta(\phi)*|\nabla\phi|
sumV += dd*(g[0]*Real(p[0]) + g[1]*Real(p[1]) + g[2]*Real(p[2]));// \delta(\phi)\vec{x}\cdot\nabla\phi
}
}
double* ptr = mParent->mBuffer.get() + leafIter.pos();
*ptr = sumA;
ptr += leafCount;
*ptr = sumV;
}
}
template<typename GridT, typename InterruptT>
inline void
LevelSetMeasure<GridT, InterruptT>::
MeasureCurvatures::operator()(const LeafRange& range) const
{
using Vec3T = math::Vec3<ValueType>;
// computations are performed in index space where dV = 1
mParent->checkInterrupter();
const Real dx = mParent->mDx, dx2=dx*dx, invDx = 1.0/dx;
const DiracDelta<Real> DD(1.5);// dirac delta function is 3 voxel units wide
ValueType mean, gauss;
const size_t leafCount = mParent->mLeafs->leafCount();
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
Real sumM = 0, sumG = 0;//reduce risk of catastrophic cancellation
for (VoxelCIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
const Real dd = DD(invDx * (*voxelIter));
if (dd > 0.0) {
mStencil.moveTo(voxelIter);
const Vec3T g = mStencil.gradient();
const Real dA = dd*g.length();// \delta(\phi)*\delta(\phi)
mStencil.curvatures(mean, gauss);
sumM += dA*mean*dx;// \delta(\phi)*\delta(\phi)*MeanCurvature
sumG += dA*gauss*dx2;// \delta(\phi)*\delta(\phi)*GaussCurvature
}
}
double* ptr = mParent->mBuffer.get() + leafIter.pos();
*ptr = sumM;
ptr += leafCount;
*ptr = sumG;
}
}
////////////////////////////////////////
//{
/// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL
template<class GridT>
inline
typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, Real>::type
doLevelSetArea(const GridT& grid, bool useWorldUnits)
{
LevelSetMeasure<GridT> m(grid);
return m.area(useWorldUnits);
}
template<class GridT>
inline
typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, Real>::type
doLevelSetArea(const GridT&, bool)
{
OPENVDB_THROW(TypeError,
"level set area is supported only for scalar, floating-point grids");
}
/// @endcond
//}
template<class GridT>
inline Real
levelSetArea(const GridT& grid, bool useWorldUnits)
{
return doLevelSetArea<GridT>(grid, useWorldUnits);
}
////////////////////////////////////////
//{
/// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL
template<class GridT>
inline
typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, Real>::type
doLevelSetVolume(const GridT& grid, bool useWorldUnits)
{
LevelSetMeasure<GridT> m(grid);
return m.volume(useWorldUnits);
}
template<class GridT>
inline
typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, Real>::type
doLevelSetVolume(const GridT&, bool)
{
OPENVDB_THROW(TypeError,
"level set volume is supported only for scalar, floating-point grids");
}
/// @endcond
//}
template<class GridT>
inline Real
levelSetVolume(const GridT& grid, bool useWorldUnits)
{
return doLevelSetVolume<GridT>(grid, useWorldUnits);
}
////////////////////////////////////////
//{
/// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL
template<class GridT>
inline
typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type
doLevelSetEulerCharacteristic(const GridT& grid)
{
LevelSetMeasure<GridT> m(grid);
return m.eulerCharacteristic();
}
template<class GridT>
inline
typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, int>::type
doLevelSetEulerCharacteristic(const GridT&)
{
OPENVDB_THROW(TypeError,
"level set euler characteristic is supported only for scalar, floating-point grids");
}
/// @endcond
//}
template<class GridT>
inline int
levelSetEulerCharacteristic(const GridT& grid)
{
return doLevelSetEulerCharacteristic(grid);
}
////////////////////////////////////////
//{
/// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL
template<class GridT>
inline
typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type
doLevelSetEuler(const GridT& grid)
{
LevelSetMeasure<GridT> m(grid);
return m.eulerCharacteristics();
}
template<class GridT>
inline
typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type
doLevelSetGenus(const GridT& grid)
{
LevelSetMeasure<GridT> m(grid);
return m.genus();
}
template<class GridT>
inline
typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, int>::type
doLevelSetGenus(const GridT&)
{
OPENVDB_THROW(TypeError,
"level set genus is supported only for scalar, floating-point grids");
}
/// @endcond
//}
template<class GridT>
inline int
levelSetGenus(const GridT& grid)
{
return doLevelSetGenus(grid);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED
| 19,524 | C | 34.629562 | 137 | 0.673171 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetTracker.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @author Ken Museth
///
/// @file tools/LevelSetTracker.h
///
/// @brief Performs multi-threaded interface tracking of narrow band
/// level sets. This is the building-block for most level set
/// computations that involve dynamic topology, e.g. advection.
#ifndef OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED
#include <tbb/parallel_for.h>
#include <openvdb/Types.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/FiniteDifference.h>
#include <openvdb/math/Operators.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/math/Transform.h>
#include <openvdb/Grid.h>
#include <openvdb/util/NullInterrupter.h>
#include <openvdb/tree/ValueAccessor.h>
#include <openvdb/tree/LeafManager.h>
#include "ChangeBackground.h"// for changeLevelSetBackground
#include "Morphology.h"//for dilateActiveValues
#include "Prune.h"// for pruneLevelSet
#include <functional>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
namespace lstrack {
/// @brief How to handle voxels that fall outside the narrow band
/// @sa @link LevelSetTracker::trimming() trimming@endlink,
/// @link LevelSetTracker::setTrimming() setTrimming@endlink
enum class TrimMode {
kNone, ///< Leave out-of-band voxels intact
kInterior, ///< Set out-of-band interior voxels to the background value
kExterior, ///< Set out-of-band exterior voxels to the background value
kAll ///< Set all out-of-band voxels to the background value
};
} // namespace lstrack
/// @brief Performs multi-threaded interface tracking of narrow band level sets
template<typename GridT, typename InterruptT = util::NullInterrupter>
class LevelSetTracker
{
public:
using TrimMode = lstrack::TrimMode;
using GridType = GridT;
using TreeType = typename GridT::TreeType;
using LeafType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
using LeafManagerType = typename tree::LeafManager<TreeType>; // leafs + buffers
using LeafRange = typename LeafManagerType::LeafRange;
using BufferType = typename LeafManagerType::BufferType;
using MaskTreeType = typename TreeType::template ValueConverter<ValueMask>::Type;
static_assert(std::is_floating_point<ValueType>::value,
"LevelSetTracker requires a level set grid with floating-point values");
/// Lightweight struct that stores the state of the LevelSetTracker
struct State {
State(math::BiasedGradientScheme s = math::HJWENO5_BIAS,
math::TemporalIntegrationScheme t = math::TVD_RK1,
int n = static_cast<int>(LEVEL_SET_HALF_WIDTH), int g = 1)
: spatialScheme(s), temporalScheme(t), normCount(n), grainSize(g) {}
math::BiasedGradientScheme spatialScheme;
math::TemporalIntegrationScheme temporalScheme;
int normCount;// Number of iterations of normalization
int grainSize;
};
/// @brief Main constructor
/// @throw RuntimeError if the grid is not a level set
LevelSetTracker(GridT& grid, InterruptT* interrupt = nullptr);
virtual ~LevelSetTracker() { delete mLeafs; }
/// @brief Iterative normalization, i.e. solving the Eikonal equation
/// @note The mask it optional and by default it is ignored.
template <typename MaskType>
void normalize(const MaskType* mask);
/// @brief Iterative normalization, i.e. solving the Eikonal equation
void normalize() { this->normalize<MaskTreeType>(nullptr); }
/// @brief Track the level set interface, i.e. rebuild and normalize the
/// narrow band of the level set.
void track();
/// @brief Set voxels that are outside the narrow band to the background value
/// (if trimming is enabled) and prune the grid.
/// @details Pruning is done automatically as a step in tracking.
/// @sa @link setTrimming() setTrimming@endlink, @link trimming() trimming@endlink
void prune();
/// @brief Fast but approximate dilation of the narrow band - one
/// layer at a time. Normally we recommend using the resize method below
/// which internally calls dilate (or erode) with the correct
/// number of @a iterations to achieve the desired half voxel width
/// of the narrow band (3 is recomended for most level set applications).
///
/// @note Since many level set applications perform
/// interface-tracking, which in turn rebuilds the narrow-band
/// accurately, this dilate method can often be used with a
/// single iterations of low-order re-normalization. This
/// effectively allows very narrow bands to be created from points
/// or polygons (e.g. with a half voxel width of 1), followed by a
/// fast but approximate dilation (typically with a half voxel
/// width of 3). This can be significantly faster than generating
/// the final width of the narrow band from points or polygons.
void dilate(int iterations = 1);
/// @brief Erodes the width of the narrow-band and update the background values
/// @throw ValueError if @a iterations is larger than the current half-width.
void erode(int iterations = 1);
/// @brief Resize the width of the narrow band, i.e. perform
/// dilation and renormalization or erosion as required.
bool resize(Index halfWidth = static_cast<Index>(LEVEL_SET_HALF_WIDTH));
/// @brief Return the half width of the narrow band in floating-point voxel units.
ValueType getHalfWidth() const { return mGrid->background()/mDx; }
/// @brief Return the state of the tracker (see struct defined above)
State getState() const { return mState; }
/// @brief Set the state of the tracker (see struct defined above)
void setState(const State& s) { mState = s; }
/// @return the spatial finite difference scheme
math::BiasedGradientScheme getSpatialScheme() const { return mState.spatialScheme; }
/// @brief Set the spatial finite difference scheme
void setSpatialScheme(math::BiasedGradientScheme s) { mState.spatialScheme = s; }
/// @return the temporal integration scheme
math::TemporalIntegrationScheme getTemporalScheme() const { return mState.temporalScheme; }
/// @brief Set the spatial finite difference scheme
void setTemporalScheme(math::TemporalIntegrationScheme s) { mState.temporalScheme = s;}
/// @return The number of normalizations performed per track or
/// normalize call.
int getNormCount() const { return mState.normCount; }
/// @brief Set the number of normalizations performed per track or
/// normalize call.
void setNormCount(int n) { mState.normCount = n; }
/// @return the grain-size used for multi-threading
int getGrainSize() const { return mState.grainSize; }
/// @brief Set the grain-size used for multi-threading.
/// @note A grainsize of 0 or less disables multi-threading!
void setGrainSize(int grainsize) { mState.grainSize = grainsize; }
/// @brief Return the trimming mode for voxels outside the narrow band.
/// @details Trimming is enabled by default and is applied automatically prior to pruning.
/// @sa @link setTrimming() setTrimming@endlink, @link prune() prune@endlink
TrimMode trimming() const { return mTrimMode; }
/// @brief Specify whether to trim voxels outside the narrow band prior to pruning.
/// @sa @link trimming() trimming@endlink, @link prune() prune@endlink
void setTrimming(TrimMode mode) { mTrimMode = mode; }
ValueType voxelSize() const { return mDx; }
void startInterrupter(const char* msg);
void endInterrupter();
/// @return false if the process was interrupted
bool checkInterrupter();
const GridType& grid() const { return *mGrid; }
LeafManagerType& leafs() { return *mLeafs; }
const LeafManagerType& leafs() const { return *mLeafs; }
private:
// disallow copy construction and copy by assignment!
LevelSetTracker(const LevelSetTracker&);// not implemented
LevelSetTracker& operator=(const LevelSetTracker&);// not implemented
// Private class to perform multi-threaded trimming of
// voxels that are too far away from the zero-crossing.
template<TrimMode Trimming>
struct Trim
{
Trim(LevelSetTracker& tracker) : mTracker(tracker) {}
void trim();
void operator()(const LeafRange& r) const;
LevelSetTracker& mTracker;
};// Trim
// Private struct to perform multi-threaded normalization
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
struct Normalizer
{
using SchemeT = math::BIAS_SCHEME<SpatialScheme>;
using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType;
using MaskLeafT = typename MaskT::LeafNodeType;
using MaskIterT = typename MaskLeafT::ValueOnCIter;
using VoxelIterT = typename LeafType::ValueOnCIter;
Normalizer(LevelSetTracker& tracker, const MaskT* mask);
void normalize();
void operator()(const LeafRange& r) const {mTask(const_cast<Normalizer*>(this), r);}
void cook(const char* msg, int swapBuffer=0);
template <int Nominator, int Denominator>
void euler(const LeafRange& range, Index phiBuffer, Index resultBuffer);
inline void euler01(const LeafRange& r) {this->euler<0,1>(r, 0, 1);}
inline void euler12(const LeafRange& r) {this->euler<1,2>(r, 1, 1);}
inline void euler34(const LeafRange& r) {this->euler<3,4>(r, 1, 2);}
inline void euler13(const LeafRange& r) {this->euler<1,3>(r, 1, 2);}
template <int Nominator, int Denominator>
void eval(StencilT& stencil, const ValueType* phi, ValueType* result, Index n) const;
LevelSetTracker& mTracker;
const MaskT* mMask;
const ValueType mDt, mInvDx;
typename std::function<void (Normalizer*, const LeafRange&)> mTask;
}; // Normalizer struct
template<math::BiasedGradientScheme SpatialScheme, typename MaskT>
void normalize1(const MaskT* mask);
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme, typename MaskT>
void normalize2(const MaskT* mask);
// Throughout the methods below mLeafs is always assumed to contain
// a list of the current LeafNodes! The auxiliary buffers on the
// other hand always have to be allocated locally, since some
// methods need them and others don't!
GridType* mGrid;
LeafManagerType* mLeafs;
InterruptT* mInterrupter;
const ValueType mDx;
State mState;
TrimMode mTrimMode = TrimMode::kAll;
}; // end of LevelSetTracker class
template<typename GridT, typename InterruptT>
LevelSetTracker<GridT, InterruptT>::
LevelSetTracker(GridT& grid, InterruptT* interrupt):
mGrid(&grid),
mLeafs(new LeafManagerType(grid.tree())),
mInterrupter(interrupt),
mDx(static_cast<ValueType>(grid.voxelSize()[0])),
mState()
{
if ( !grid.hasUniformVoxels() ) {
OPENVDB_THROW(RuntimeError,
"The transform must have uniform scale for the LevelSetTracker to function");
}
if ( grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_THROW(RuntimeError,
"LevelSetTracker expected a level set, got a grid of class \""
+ grid.gridClassToString(grid.getGridClass())
+ "\" [hint: Grid::setGridClass(openvdb::GRID_LEVEL_SET)]");
}
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
prune()
{
this->startInterrupter("Pruning Level Set");
// Set voxels that are too far away from the zero crossing to the background value.
switch (mTrimMode) {
case TrimMode::kNone: break;
case TrimMode::kInterior: Trim<TrimMode::kInterior>(*this).trim(); break;
case TrimMode::kExterior: Trim<TrimMode::kExterior>(*this).trim(); break;
case TrimMode::kAll: Trim<TrimMode::kAll>(*this).trim(); break;
}
// Remove inactive nodes from tree
tools::pruneLevelSet(mGrid->tree());
// The tree topology has changes so rebuild the list of leafs
mLeafs->rebuildLeafArray();
this->endInterrupter();
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
track()
{
// Dilate narrow-band (this also rebuilds the leaf array!)
tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES);
// Compute signed distances in dilated narrow-band
this->normalize();
// Remove voxels that are outside the narrow band
this->prune();
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
dilate(int iterations)
{
if (this->getNormCount() == 0) {
for (int i=0; i < iterations; ++i) {
tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES);
tools::changeLevelSetBackground(this->leafs(), mDx + mGrid->background());
}
} else {
for (int i=0; i < iterations; ++i) {
MaskTreeType mask0(mGrid->tree(), false, TopologyCopy());
tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES);
tools::changeLevelSetBackground(this->leafs(), mDx + mGrid->background());
MaskTreeType mask(mGrid->tree(), false, TopologyCopy());
mask.topologyDifference(mask0);
this->normalize(&mask);
}
}
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
erode(int iterations)
{
tools::erodeVoxels(*mLeafs, iterations);
mLeafs->rebuildLeafArray();
const ValueType background = mGrid->background() - ValueType(iterations) * mDx;
tools::changeLevelSetBackground(this->leafs(), background);
}
template<typename GridT, typename InterruptT>
inline bool
LevelSetTracker<GridT, InterruptT>::
resize(Index halfWidth)
{
const int wOld = static_cast<int>(math::RoundDown(this->getHalfWidth()));
const int wNew = static_cast<int>(halfWidth);
if (wOld < wNew) {
this->dilate(wNew - wOld);
} else if (wOld > wNew) {
this->erode(wOld - wNew);
}
return wOld != wNew;
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
startInterrupter(const char* msg)
{
if (mInterrupter) mInterrupter->start(msg);
}
template<typename GridT, typename InterruptT>
inline void
LevelSetTracker<GridT, InterruptT>::
endInterrupter()
{
if (mInterrupter) mInterrupter->end();
}
template<typename GridT, typename InterruptT>
inline bool
LevelSetTracker<GridT, InterruptT>::
checkInterrupter()
{
if (util::wasInterrupted(mInterrupter)) {
tbb::task::self().cancel_group_execution();
return false;
}
return true;
}
template<typename GridT, typename InterruptT>
template<typename MaskT>
inline void
LevelSetTracker<GridT, InterruptT>::
normalize(const MaskT* mask)
{
switch (this->getSpatialScheme()) {
case math::FIRST_BIAS:
this->normalize1<math::FIRST_BIAS , MaskT>(mask); break;
case math::SECOND_BIAS:
this->normalize1<math::SECOND_BIAS, MaskT>(mask); break;
case math::THIRD_BIAS:
this->normalize1<math::THIRD_BIAS, MaskT>(mask); break;
case math::WENO5_BIAS:
this->normalize1<math::WENO5_BIAS, MaskT>(mask); break;
case math::HJWENO5_BIAS:
this->normalize1<math::HJWENO5_BIAS, MaskT>(mask); break;
case math::UNKNOWN_BIAS:
default:
OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!");
}
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme, typename MaskT>
inline void
LevelSetTracker<GridT, InterruptT>::
normalize1(const MaskT* mask)
{
switch (this->getTemporalScheme()) {
case math::TVD_RK1:
this->normalize2<SpatialScheme, math::TVD_RK1, MaskT>(mask); break;
case math::TVD_RK2:
this->normalize2<SpatialScheme, math::TVD_RK2, MaskT>(mask); break;
case math::TVD_RK3:
this->normalize2<SpatialScheme, math::TVD_RK3, MaskT>(mask); break;
case math::UNKNOWN_TIS:
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
inline void
LevelSetTracker<GridT, InterruptT>::
normalize2(const MaskT* mask)
{
Normalizer<SpatialScheme, TemporalScheme, MaskT> tmp(*this, mask);
tmp.normalize();
}
////////////////////////////////////////////////////////////////////////////
template<typename GridT, typename InterruptT>
template<lstrack::TrimMode Trimming>
inline void
LevelSetTracker<GridT, InterruptT>::Trim<Trimming>::trim()
{
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
if (Trimming != TrimMode::kNone) {
const int grainSize = mTracker.getGrainSize();
const LeafRange range = mTracker.leafs().leafRange(grainSize);
if (grainSize>0) {
tbb::parallel_for(range, *this);
} else {
(*this)(range);
}
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
/// Trim away voxels that have moved outside the narrow band
template<typename GridT, typename InterruptT>
template<lstrack::TrimMode Trimming>
inline void
LevelSetTracker<GridT, InterruptT>::Trim<Trimming>::operator()(const LeafRange& range) const
{
mTracker.checkInterrupter();
const ValueType gamma = mTracker.mGrid->background();
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
for (auto leafIter = range.begin(); leafIter; ++leafIter) {
auto& leaf = *leafIter;
for (auto iter = leaf.beginValueOn(); iter; ++iter) {
const auto val = *iter;
switch (Trimming) { // resolved at compile time
case TrimMode::kNone:
break;
case TrimMode::kInterior:
if (val <= -gamma) { leaf.setValueOff(iter.pos(), -gamma); }
break;
case TrimMode::kExterior:
if (val >= gamma) { leaf.setValueOff(iter.pos(), gamma); }
break;
case TrimMode::kAll:
if (val <= -gamma) {
leaf.setValueOff(iter.pos(), -gamma);
} else if (val >= gamma) {
leaf.setValueOff(iter.pos(), gamma);
}
break;
}
}
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
////////////////////////////////////////////////////////////////////////////
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
inline
LevelSetTracker<GridT, InterruptT>::
Normalizer<SpatialScheme, TemporalScheme, MaskT>::
Normalizer(LevelSetTracker& tracker, const MaskT* mask)
: mTracker(tracker)
, mMask(mask)
, mDt(tracker.voxelSize()*(TemporalScheme == math::TVD_RK1 ? 0.3f :
TemporalScheme == math::TVD_RK2 ? 0.9f : 1.0f))
, mInvDx(1.0f/tracker.voxelSize())
, mTask(nullptr)
{
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
inline void
LevelSetTracker<GridT, InterruptT>::
Normalizer<SpatialScheme, TemporalScheme, MaskT>::
normalize()
{
namespace ph = std::placeholders;
/// Make sure we have enough temporal auxiliary buffers
mTracker.mLeafs->rebuildAuxBuffers(TemporalScheme == math::TVD_RK3 ? 2 : 1);
for (int n=0, e=mTracker.getNormCount(); n < e; ++n) {
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
switch(TemporalScheme) {//switch is resolved at compile-time
case math::TVD_RK1:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(0) = Phi_t0(0) - dt * VdotG_t0(1)
mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Normalizing level set using TVD_RK1", 1);
break;
case math::TVD_RK2:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(1)
mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Normalizing level set using TVD_RK1 (step 1 of 2)", 1);
// Convex combine explicit Euler step: t2 = t0 + dt
// Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * V.Grad_t1(0))
mTask = std::bind(&Normalizer::euler12, ph::_1, ph::_2);
// Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1)
this->cook("Normalizing level set using TVD_RK1 (step 2 of 2)", 1);
break;
case math::TVD_RK3:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(1)
mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Normalizing level set using TVD_RK3 (step 1 of 3)", 1);
// Convex combine explicit Euler step: t2 = t0 + dt/2
// Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * V.Grad_t1(0))
mTask = std::bind(&Normalizer::euler34, ph::_1, ph::_2);
// Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2)
this->cook("Normalizing level set using TVD_RK3 (step 2 of 3)", 2);
// Convex combine explicit Euler step: t3 = t0 + dt
// Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * V.Grad_t2(0)
mTask = std::bind(&Normalizer::euler13, ph::_1, ph::_2);
// Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2)
this->cook("Normalizing level set using TVD_RK3 (step 3 of 3)", 2);
break;
case math::UNKNOWN_TIS:
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
mTracker.mLeafs->removeAuxBuffers();
}
/// Private method to perform the task (serial or threaded) and
/// subsequently swap the leaf buffers.
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
inline void
LevelSetTracker<GridT, InterruptT>::
Normalizer<SpatialScheme, TemporalScheme, MaskT>::
cook(const char* msg, int swapBuffer)
{
mTracker.startInterrupter( msg );
const int grainSize = mTracker.getGrainSize();
const LeafRange range = mTracker.leafs().leafRange(grainSize);
grainSize>0 ? tbb::parallel_for(range, *this) : (*this)(range);
mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize==0);
mTracker.endInterrupter();
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
template <int Nominator, int Denominator>
inline void
LevelSetTracker<GridT, InterruptT>::
Normalizer<SpatialScheme, TemporalScheme, MaskT>::
eval(StencilT& stencil, const ValueType* phi, ValueType* result, Index n) const
{
using GradientT = typename math::ISGradientNormSqrd<SpatialScheme>;
static const ValueType alpha = ValueType(Nominator)/ValueType(Denominator);
static const ValueType beta = ValueType(1) - alpha;
const ValueType normSqGradPhi = GradientT::result(stencil);
const ValueType phi0 = stencil.getValue();
ValueType v = phi0 / ( math::Sqrt(math::Pow2(phi0) + normSqGradPhi) +
math::Tolerance<ValueType>::value() );
v = phi0 - mDt * v * (math::Sqrt(normSqGradPhi) * mInvDx - 1.0f);
result[n] = Nominator ? alpha * phi[n] + beta * v : v;
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MaskT>
template <int Nominator, int Denominator>
inline void
LevelSetTracker<GridT,InterruptT>::
Normalizer<SpatialScheme, TemporalScheme, MaskT>::
euler(const LeafRange& range, Index phiBuffer, Index resultBuffer)
{
mTracker.checkInterrupter();
StencilT stencil(mTracker.grid());
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
const ValueType* phi = leafIter.buffer(phiBuffer).data();
ValueType* result = leafIter.buffer(resultBuffer).data();
if (mMask == nullptr) {
for (auto iter = leafIter->cbeginValueOn(); iter; ++iter) {
stencil.moveTo(iter);
this->eval<Nominator, Denominator>(stencil, phi, result, iter.pos());
}//loop over active voxels in the leaf of the level set
} else if (const MaskLeafT* mask = mMask->probeLeaf(leafIter->origin())) {
const ValueType* phi0 = leafIter->buffer().data();
for (MaskIterT iter = mask->cbeginValueOn(); iter; ++iter) {
const Index i = iter.pos();
stencil.moveTo(iter.getCoord(), phi0[i]);
this->eval<Nominator, Denominator>(stencil, phi, result, i);
}//loop over active voxels in the leaf of the mask
}
}//loop over leafs of the level set
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED
| 26,222 | C | 37.563235 | 95 | 0.661658 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointScatter.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth
///
/// @file tools/PointScatter.h
///
/// @brief We offer three different algorithms (each in its own class)
/// for scattering of points in active voxels:
///
/// 1) UniformPointScatter. Has two modes: Either randomly distributes
/// a fixed number of points into the active voxels, or the user can
/// specify a fixed probability of having a points per unit of volume.
///
/// 2) DenseUniformPointScatter. Randomly distributes points into active
/// voxels using a fixed number of points per voxel.
///
/// 3) NonIniformPointScatter. Define the local probability of having
/// a point in a voxel as the product of a global density and the
/// value of the voxel itself.
#ifndef OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/math/Math.h>
#include <openvdb/util/NullInterrupter.h>
#include <tbb/parallel_sort.h>
#include <tbb/parallel_for.h>
#include <iostream>
#include <memory>
#include <string>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// Forward declaration of base class
template<typename PointAccessorType,
typename RandomGenerator,
typename InterruptType = util::NullInterrupter>
class BasePointScatter;
/// @brief The two point scatters UniformPointScatter and
/// NonUniformPointScatter depend on the following two classes:
///
/// The @c PointAccessorType template argument below refers to any class
/// with the following interface:
/// @code
/// class PointAccessor {
/// ...
/// public:
/// void add(const openvdb::Vec3R &pos);// appends point with world positions pos
/// };
/// @endcode
///
///
/// The @c InterruptType template argument below refers to any class
/// with the following interface:
/// @code
/// class Interrupter {
/// ...
/// public:
/// void start(const char* name = nullptr) // called when computations begin
/// void end() // called when computations end
/// bool wasInterrupted(int percent=-1) // return true to break computation
///};
/// @endcode
///
/// @note If no template argument is provided for this InterruptType
/// the util::NullInterrupter is used which implies that all
/// interrupter calls are no-ops (i.e. incurs no computational overhead).
/// @brief Uniformly scatters points in the active voxels.
/// The point count is either explicitly defined or implicitly
/// through the specification of a global density (=points-per-volume)
///
/// @note This uniform scattering technique assumes that the number of
/// points is generally smaller than the number of active voxels
/// (including virtual active voxels in active tiles).
template<typename PointAccessorType,
typename RandomGenerator,
typename InterruptType = util::NullInterrupter>
class UniformPointScatter : public BasePointScatter<PointAccessorType,
RandomGenerator,
InterruptType>
{
public:
using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>;
UniformPointScatter(PointAccessorType& points,
Index64 pointCount,
RandomGenerator& randGen,
double spread = 1.0,
InterruptType* interrupt = nullptr)
: BaseT(points, randGen, spread, interrupt)
, mTargetPointCount(pointCount)
, mPointsPerVolume(0.0f)
{
}
UniformPointScatter(PointAccessorType& points,
float pointsPerVolume,
RandomGenerator& randGen,
double spread = 1.0,
InterruptType* interrupt = nullptr)
: BaseT(points, randGen, spread, interrupt)
, mTargetPointCount(0)
, mPointsPerVolume(pointsPerVolume)
{
}
/// This is the main functor method implementing the actual scattering of points.
template<typename GridT>
bool operator()(const GridT& grid)
{
mVoxelCount = grid.activeVoxelCount();
if (mVoxelCount == 0) return false;
const auto voxelVolume = grid.transform().voxelVolume();
if (mPointsPerVolume > 0) {
BaseT::start("Uniform scattering with fixed point density");
mTargetPointCount = Index64(mPointsPerVolume * voxelVolume * double(mVoxelCount));
} else if (mTargetPointCount > 0) {
BaseT::start("Uniform scattering with fixed point count");
mPointsPerVolume = float(mTargetPointCount) / float(voxelVolume * double(mVoxelCount));
} else {
return false;
}
std::unique_ptr<Index64[]> idList{new Index64[mTargetPointCount]};
math::RandInt<Index64, RandomGenerator> rand(BaseT::mRand01.engine(), 0, mVoxelCount-1);
for (Index64 i=0; i<mTargetPointCount; ++i) idList[i] = rand();
tbb::parallel_sort(idList.get(), idList.get() + mTargetPointCount);
CoordBBox bbox;
const Vec3R offset(0.5, 0.5, 0.5);
typename GridT::ValueOnCIter valueIter = grid.cbeginValueOn();
for (Index64 i=0, n=valueIter.getVoxelCount() ; i != mTargetPointCount; ++i) {
if (BaseT::interrupt()) return false;
const Index64 voxelId = idList[i];
while ( n <= voxelId ) {
++valueIter;
n += valueIter.getVoxelCount();
}
if (valueIter.isVoxelValue()) {// a majority is expected to be voxels
BaseT::addPoint(grid, valueIter.getCoord() - offset);
} else {// tiles contain multiple (virtual) voxels
valueIter.getBoundingBox(bbox);
BaseT::addPoint(grid, bbox.min() - offset, bbox.extents());
}
}//loop over all the active voxels and tiles
//}
BaseT::end();
return true;
}
// The following methods should only be called after the
// the operator() method was called
void print(const std::string &name, std::ostream& os = std::cout) const
{
os << "Uniformly scattered " << mPointCount << " points into " << mVoxelCount
<< " active voxels in \"" << name << "\" corresponding to "
<< mPointsPerVolume << " points per volume." << std::endl;
}
float getPointsPerVolume() const { return mPointsPerVolume; }
Index64 getTargetPointCount() const { return mTargetPointCount; }
private:
using BaseT::mPointCount;
using BaseT::mVoxelCount;
Index64 mTargetPointCount;
float mPointsPerVolume;
}; // class UniformPointScatter
/// @brief Scatters a fixed (and integer) number of points in all
/// active voxels and tiles.
template<typename PointAccessorType,
typename RandomGenerator,
typename InterruptType = util::NullInterrupter>
class DenseUniformPointScatter : public BasePointScatter<PointAccessorType,
RandomGenerator,
InterruptType>
{
public:
using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>;
DenseUniformPointScatter(PointAccessorType& points,
float pointsPerVoxel,
RandomGenerator& randGen,
double spread = 1.0,
InterruptType* interrupt = nullptr)
: BaseT(points, randGen, spread, interrupt)
, mPointsPerVoxel(pointsPerVoxel)
{
}
/// This is the main functor method implementing the actual scattering of points.
template<typename GridT>
bool operator()(const GridT& grid)
{
using ValueIter = typename GridT::ValueOnCIter;
if (mPointsPerVoxel < 1.0e-6) return false;
mVoxelCount = grid.activeVoxelCount();
if (mVoxelCount == 0) return false;
BaseT::start("Dense uniform scattering with fixed point count");
CoordBBox bbox;
const Vec3R offset(0.5, 0.5, 0.5);
const int ppv = math::Floor(mPointsPerVoxel);
const double delta = mPointsPerVoxel - float(ppv);
const bool fractional = !math::isApproxZero(delta, 1.0e-6);
for (ValueIter iter = grid.cbeginValueOn(); iter; ++iter) {
if (BaseT::interrupt()) return false;
if (iter.isVoxelValue()) {// a majority is expected to be voxels
const Vec3R dmin = iter.getCoord() - offset;
for (int n = 0; n != ppv; ++n) BaseT::addPoint(grid, dmin);
if (fractional && BaseT::getRand01() < delta) BaseT::addPoint(grid, dmin);
} else {// tiles contain multiple (virtual) voxels
iter.getBoundingBox(bbox);
const Coord size(bbox.extents());
const Vec3R dmin = bbox.min() - offset;
const double d = mPointsPerVoxel * float(iter.getVoxelCount());
const int m = math::Floor(d);
for (int n = 0; n != m; ++n) BaseT::addPoint(grid, dmin, size);
if (BaseT::getRand01() < d - m) BaseT::addPoint(grid, dmin, size);
}
}//loop over all the active voxels and tiles
//}
BaseT::end();
return true;
}
// The following methods should only be called after the
// the operator() method was called
void print(const std::string &name, std::ostream& os = std::cout) const
{
os << "Dense uniformly scattered " << mPointCount << " points into " << mVoxelCount
<< " active voxels in \"" << name << "\" corresponding to "
<< mPointsPerVoxel << " points per voxel." << std::endl;
}
float getPointsPerVoxel() const { return mPointsPerVoxel; }
private:
using BaseT::mPointCount;
using BaseT::mVoxelCount;
float mPointsPerVoxel;
}; // class DenseUniformPointScatter
/// @brief Non-uniform scatters of point in the active voxels.
/// The local point count is implicitly defined as a product of
/// of a global density (called pointsPerVolume) and the local voxel
/// (or tile) value.
///
/// @note This scattering technique can be significantly slower
/// than a uniform scattering since its computational complexity
/// is proportional to the active voxel (and tile) count.
template<typename PointAccessorType,
typename RandomGenerator,
typename InterruptType = util::NullInterrupter>
class NonUniformPointScatter : public BasePointScatter<PointAccessorType,
RandomGenerator,
InterruptType>
{
public:
using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>;
NonUniformPointScatter(PointAccessorType& points,
float pointsPerVolume,
RandomGenerator& randGen,
double spread = 1.0,
InterruptType* interrupt = nullptr)
: BaseT(points, randGen, spread, interrupt)
, mPointsPerVolume(pointsPerVolume)//note this is merely a
//multiplier for the local point density
{
}
/// This is the main functor method implementing the actual scattering of points.
template<typename GridT>
bool operator()(const GridT& grid)
{
if (mPointsPerVolume <= 0.0f) return false;
mVoxelCount = grid.activeVoxelCount();
if (mVoxelCount == 0) return false;
BaseT::start("Non-uniform scattering with local point density");
const Vec3d dim = grid.voxelSize();
const double volumePerVoxel = dim[0]*dim[1]*dim[2],
pointsPerVoxel = mPointsPerVolume * volumePerVoxel;
CoordBBox bbox;
const Vec3R offset(0.5, 0.5, 0.5);
for (typename GridT::ValueOnCIter iter = grid.cbeginValueOn(); iter; ++iter) {
if (BaseT::interrupt()) return false;
const double d = double(*iter) * pointsPerVoxel * double(iter.getVoxelCount());
const int n = int(d);
if (iter.isVoxelValue()) { // a majority is expected to be voxels
const Vec3R dmin =iter.getCoord() - offset;
for (int i = 0; i < n; ++i) BaseT::addPoint(grid, dmin);
if (BaseT::getRand01() < (d - n)) BaseT::addPoint(grid, dmin);
} else { // tiles contain multiple (virtual) voxels
iter.getBoundingBox(bbox);
const Coord size(bbox.extents());
const Vec3R dmin = bbox.min() - offset;
for (int i = 0; i < n; ++i) BaseT::addPoint(grid, dmin, size);
if (BaseT::getRand01() < (d - n)) BaseT::addPoint(grid, dmin, size);
}
}//loop over all the active voxels and tiles
BaseT::end();
return true;
}
// The following methods should only be called after the
// the operator() method was called
void print(const std::string &name, std::ostream& os = std::cout) const
{
os << "Non-uniformly scattered " << mPointCount << " points into " << mVoxelCount
<< " active voxels in \"" << name << "\"." << std::endl;
}
float getPointPerVolume() const { return mPointsPerVolume; }
private:
using BaseT::mPointCount;
using BaseT::mVoxelCount;
float mPointsPerVolume;
}; // class NonUniformPointScatter
/// Base class of all the point scattering classes defined above
template<typename PointAccessorType,
typename RandomGenerator,
typename InterruptType>
class BasePointScatter
{
public:
Index64 getPointCount() const { return mPointCount; }
Index64 getVoxelCount() const { return mVoxelCount; }
protected:
PointAccessorType& mPoints;
InterruptType* mInterrupter;
Index64 mPointCount;
Index64 mVoxelCount;
Index64 mInterruptCount;
const double mSpread;
math::Rand01<double, RandomGenerator> mRand01;
/// This is a base class so the constructor is protected
BasePointScatter(PointAccessorType& points,
RandomGenerator& randGen,
double spread,
InterruptType* interrupt = nullptr)
: mPoints(points)
, mInterrupter(interrupt)
, mPointCount(0)
, mVoxelCount(0)
, mInterruptCount(0)
, mSpread(math::Clamp01(spread))
, mRand01(randGen)
{
}
inline void start(const char* name)
{
if (mInterrupter) mInterrupter->start(name);
}
inline void end()
{
if (mInterrupter) mInterrupter->end();
}
inline bool interrupt()
{
//only check interrupter for every 32'th call
return !(mInterruptCount++ & ((1<<5)-1)) && util::wasInterrupted(mInterrupter);
}
/// @brief Return a random floating point number between zero and one
inline double getRand01() { return mRand01(); }
/// @brief Return a random floating point number between 0.5 -+ mSpread/2
inline double getRand() { return 0.5 + mSpread * (mRand01() - 0.5); }
template <typename GridT>
inline void addPoint(const GridT &grid, const Vec3R &dmin)
{
const Vec3R pos(dmin[0] + this->getRand(),
dmin[1] + this->getRand(),
dmin[2] + this->getRand());
mPoints.add(grid.indexToWorld(pos));
++mPointCount;
}
template <typename GridT>
inline void addPoint(const GridT &grid, const Vec3R &dmin, const Coord &size)
{
const Vec3R pos(dmin[0] + size[0]*this->getRand(),
dmin[1] + size[1]*this->getRand(),
dmin[2] + size[2]*this->getRand());
mPoints.add(grid.indexToWorld(pos));
++mPointCount;
}
};// class BasePointScatter
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED
| 16,293 | C | 37.611374 | 99 | 0.61241 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ChangeBackground.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file ChangeBackground.h
///
/// @brief Efficient multi-threaded replacement of the background
/// values in tree.
///
/// @author Ken Museth
#ifndef OPENVDB_TOOLS_ChangeBACKGROUND_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_ChangeBACKGROUND_HAS_BEEN_INCLUDED
#include <openvdb/math/Math.h> // for isNegative and negative
#include <openvdb/Types.h> // for Index typedef
#include <openvdb/tree/NodeManager.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Replace the background value in all the nodes of a tree.
/// @details The sign of the background value is preserved, and only
/// inactive values equal to the old background value are replaced.
///
/// @note If a LeafManager is used the cached leaf nodes are reused,
/// resulting in slightly better overall performance.
///
/// @param tree Tree (or LeafManager) that will have its background value changed
/// @param background the new background value
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 32)
template<typename TreeOrLeafManagerT>
inline void
changeBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& background,
bool threaded = true,
size_t grainSize = 32);
/// @brief Replace the background value in all the nodes of a floating-point tree
/// containing a symmetric narrow-band level set.
/// @details All inactive values will be set to +| @a halfWidth | if outside
/// and -| @a halfWidth | if inside, where @a halfWidth is half the width
/// of the symmetric narrow band.
///
/// @note This method is faster than changeBackground since it does not
/// perform tests to see if inactive values are equal to the old background value.
/// @note If a LeafManager is used the cached leaf nodes are reused,
/// resulting in slightly better overall performance.
///
/// @param tree Tree (or LeafManager) that will have its background value changed
/// @param halfWidth half of the width of the symmetric narrow band
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 32)
///
/// @throw ValueError if @a halfWidth is negative (as defined by math::isNegative)
template<typename TreeOrLeafManagerT>
inline void
changeLevelSetBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& halfWidth,
bool threaded = true,
size_t grainSize = 32);
/// @brief Replace the background values in all the nodes of a floating-point tree
/// containing a possibly asymmetric narrow-band level set.
/// @details All inactive values will be set to +| @a outsideWidth | if outside
/// and -| @a insideWidth | if inside, where @a outsideWidth is the outside
/// width of the narrow band and @a insideWidth is its inside width.
///
/// @note This method is faster than changeBackground since it does not
/// perform tests to see if inactive values are equal to the old background value.
/// @note If a LeafManager is used the cached leaf nodes are reused,
/// resulting in slightly better overall performance.
///
/// @param tree Tree (or LeafManager) that will have its background value changed
/// @param outsideWidth The width of the outside of the narrow band
/// @param insideWidth The width of the inside of the narrow band
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 32)
///
/// @throw ValueError if @a outsideWidth is negative or @a insideWidth is
/// not negative (as defined by math::isNegative)
template<typename TreeOrLeafManagerT>
inline void
changeAsymmetricLevelSetBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& outsideWidth,
const typename TreeOrLeafManagerT::ValueType& insideWidth,
bool threaded = true,
size_t grainSize = 32);
//////////////////////////////////////////////////////
// Replaces the background value in a Tree of any type.
template<typename TreeOrLeafManagerT>
class ChangeBackgroundOp
{
public:
typedef typename TreeOrLeafManagerT::ValueType ValueT;
typedef typename TreeOrLeafManagerT::RootNodeType RootT;
typedef typename TreeOrLeafManagerT::LeafNodeType LeafT;
ChangeBackgroundOp(const TreeOrLeafManagerT& tree, const ValueT& newValue)
: mOldValue(tree.root().background())
, mNewValue(newValue)
{
}
void operator()(RootT& root) const
{
for (typename RootT::ValueOffIter it = root.beginValueOff(); it; ++it) this->set(it);
root.setBackground(mNewValue, false);
}
void operator()(LeafT& node) const
{
for (typename LeafT::ValueOffIter it = node.beginValueOff(); it; ++it) this->set(it);
}
template<typename NodeT>
void operator()(NodeT& node) const
{
typename NodeT::NodeMaskType mask = node.getValueOffMask();
for (typename NodeT::ValueOnIter it(mask.beginOn(), &node); it; ++it) this->set(it);
}
private:
template<typename IterT>
inline void set(IterT& iter) const
{
if (math::isApproxEqual(*iter, mOldValue)) {
iter.setValue(mNewValue);
} else if (math::isApproxEqual(*iter, math::negative(mOldValue))) {
iter.setValue(math::negative(mNewValue));
}
}
const ValueT mOldValue, mNewValue;
};// ChangeBackgroundOp
// Replaces the background value in a Tree assumed to represent a
// level set. It is generally faster than ChangeBackgroundOp.
// Note that is follows the sign-convention that outside is positive
// and inside is negative!
template<typename TreeOrLeafManagerT>
class ChangeLevelSetBackgroundOp
{
public:
typedef typename TreeOrLeafManagerT::ValueType ValueT;
typedef typename TreeOrLeafManagerT::RootNodeType RootT;
typedef typename TreeOrLeafManagerT::LeafNodeType LeafT;
/// @brief Constructor for asymmetric narrow-bands
ChangeLevelSetBackgroundOp(const ValueT& outside, const ValueT& inside)
: mOutside(outside)
, mInside(inside)
{
if (math::isNegative(mOutside)) {
OPENVDB_THROW(ValueError,
"ChangeLevelSetBackgroundOp: the outside value cannot be negative!");
}
if (!math::isNegative(mInside)) {
OPENVDB_THROW(ValueError,
"ChangeLevelSetBackgroundOp: the inside value must be negative!");
}
}
void operator()(RootT& root) const
{
for (typename RootT::ValueOffIter it = root.beginValueOff(); it; ++it) this->set(it);
root.setBackground(mOutside, false);
}
void operator()(LeafT& node) const
{
for(typename LeafT::ValueOffIter it = node.beginValueOff(); it; ++it) this->set(it);
}
template<typename NodeT>
void operator()(NodeT& node) const
{
typedef typename NodeT::ValueOffIter IterT;
for (IterT it(node.getChildMask().beginOff(), &node); it; ++it) this->set(it);
}
private:
template<typename IterT>
inline void set(IterT& iter) const
{
//this is safe since we know ValueType is_floating_point
ValueT& v = const_cast<ValueT&>(*iter);
v = v < 0 ? mInside : mOutside;
}
const ValueT mOutside, mInside;
};// ChangeLevelSetBackgroundOp
template<typename TreeOrLeafManagerT>
inline void
changeBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& background,
bool threaded,
size_t grainSize)
{
tree::NodeManager<TreeOrLeafManagerT> linearTree(tree);
ChangeBackgroundOp<TreeOrLeafManagerT> op(tree, background);
linearTree.foreachTopDown(op, threaded, grainSize);
}
template<typename TreeOrLeafManagerT>
inline void
changeAsymmetricLevelSetBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& outsideValue,
const typename TreeOrLeafManagerT::ValueType& insideValue,
bool threaded,
size_t grainSize)
{
tree::NodeManager<TreeOrLeafManagerT> linearTree(tree);
ChangeLevelSetBackgroundOp<TreeOrLeafManagerT> op(outsideValue, insideValue);
linearTree.foreachTopDown(op, threaded, grainSize);
}
// If the narrow-band is symmetric only one background value is required
template<typename TreeOrLeafManagerT>
inline void
changeLevelSetBackground(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& background,
bool threaded,
size_t grainSize)
{
changeAsymmetricLevelSetBackground(
tree, background, math::negative(background), threaded, grainSize);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_CHANGEBACKGROUND_HAS_BEEN_INCLUDED
| 9,036 | C | 35.439516 | 95 | 0.711598 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Statistics.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Statistics.h
///
/// @brief Functions to efficiently compute histograms, extremas
/// (min/max) and statistics (mean, variance, etc.) of grid values
#ifndef OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/Exceptions.h>
#include <openvdb/math/Stats.h>
#include "ValueTransformer.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Iterate over a scalar grid and compute a histogram of the values
/// of the voxels that are visited, or iterate over a vector-valued grid
/// and compute a histogram of the magnitudes of the vectors.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param minVal the smallest value that can be added to the histogram
/// @param maxVal the largest value that can be added to the histogram
/// @param numBins the number of histogram bins
/// @param threaded if true, iterate over the grid in parallel
template<typename IterT>
inline math::Histogram
histogram(const IterT& iter, double minVal, double maxVal,
size_t numBins = 10, bool threaded = true);
/// @brief Iterate over a scalar grid and compute extrema (min/max) of the
/// values of the voxels that are visited, or iterate over a vector-valued grid
/// and compute extrema of the magnitudes of the vectors.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param threaded if true, iterate over the grid in parallel
template<typename IterT>
inline math::Extrema
extrema(const IterT& iter, bool threaded = true);
/// @brief Iterate over a scalar grid and compute statistics (mean, variance, etc.)
/// of the values of the voxels that are visited, or iterate over a vector-valued grid
/// and compute statistics of the magnitudes of the vectors.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param threaded if true, iterate over the grid in parallel
template<typename IterT>
inline math::Stats
statistics(const IterT& iter, bool threaded = true);
/// @brief Iterate over a grid and compute extrema (min/max) of
/// the values produced by applying the given functor at each voxel that is visited.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param op a functor of the form <tt>void op(const IterT&, math::Stats&)</tt>,
/// where @c IterT is the type of @a iter, that inserts zero or more
/// floating-point values into the provided @c math::Stats object
/// @param threaded if true, iterate over the grid in parallel
/// @note When @a threaded is true, each thread gets its own copy of the functor.
///
/// @par Example:
/// Compute statistics of just the active and positive-valued voxels of a scalar,
/// floating-point grid.
/// @code
/// struct Local {
/// static inline
/// void addIfPositive(const FloatGrid::ValueOnCIter& iter, math::Extrema& ex)
/// {
/// const float f = *iter;
/// if (f > 0.0) {
/// if (iter.isVoxelValue()) ex.add(f);
/// else ex.add(f, iter.getVoxelCount());
/// }
/// }
/// };
/// FloatGrid grid = ...;
/// math::Extrema stats =
/// tools::extrema(grid.cbeginValueOn(), Local::addIfPositive, /*threaded=*/true);
/// @endcode
template<typename IterT, typename ValueOp>
inline math::Extrema
extrema(const IterT& iter, const ValueOp& op, bool threaded);
/// @brief Iterate over a grid and compute statistics (mean, variance, etc.) of
/// the values produced by applying the given functor at each voxel that is visited.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param op a functor of the form <tt>void op(const IterT&, math::Stats&)</tt>,
/// where @c IterT is the type of @a iter, that inserts zero or more
/// floating-point values into the provided @c math::Stats object
/// @param threaded if true, iterate over the grid in parallel
/// @note When @a threaded is true, each thread gets its own copy of the functor.
///
/// @par Example:
/// Compute statistics of just the active and positive-valued voxels of a scalar,
/// floating-point grid.
/// @code
/// struct Local {
/// static inline
/// void addIfPositive(const FloatGrid::ValueOnCIter& iter, math::Stats& stats)
/// {
/// const float f = *iter;
/// if (f > 0.0) {
/// if (iter.isVoxelValue()) stats.add(f);
/// else stats.add(f, iter.getVoxelCount());
/// }
/// }
/// };
/// FloatGrid grid = ...;
/// math::Stats stats =
/// tools::statistics(grid.cbeginValueOn(), Local::addIfPositive, /*threaded=*/true);
/// @endcode
template<typename IterT, typename ValueOp>
inline math::Stats
statistics(const IterT& iter, const ValueOp& op, bool threaded);
/// @brief Iterate over a grid and compute statistics (mean, variance, etc.)
/// of the values produced by applying a given operator (see math/Operators.h)
/// at each voxel that is visited.
/// @param iter an iterator over the values of a grid or its tree
/// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.)
/// @param op an operator object with a method of the form
/// <tt>double result(Accessor&, const Coord&)</tt>
/// @param threaded if true, iterate over the grid in parallel
/// @note World-space operators, whose @c result() methods are of the form
/// <tt>double result(const Map&, Accessor&, const Coord&)</tt>, must be wrapped
/// in a math::MapAdapter.
/// @note Vector-valued operators like math::Gradient must be wrapped in an adapter
/// such as math::OpMagnitude.
///
/// @par Example:
/// Compute statistics of the magnitude of the gradient at the active voxels of
/// a scalar, floating-point grid. (Note the use of the math::MapAdapter and
/// math::OpMagnitude adapters.)
/// @code
/// FloatGrid grid = ...;
///
/// // Assume that we know that the grid has a uniform scale map.
/// using MapType = math::UniformScaleMap;
/// // Specify a world-space gradient operator that uses first-order differencing.
/// using GradientOp = math::Gradient<MapType, math::FD_1ST>;
/// // Wrap the operator with an adapter that computes the magnitude of the gradient.
/// using MagnitudeOp = math::OpMagnitude<GradientOp, MapType>;
/// // Wrap the operator with an adapter that associates a map with it.
/// using CompoundOp = math::MapAdapter<MapType, GradientOp, double>;
///
/// if (MapType::Ptr map = grid.constTransform().constMap<MapType>()) {
/// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), CompoundOp(*map));
/// }
/// @endcode
///
/// @par Example:
/// Compute statistics of the divergence at the active voxels of a vector-valued grid.
/// @code
/// Vec3SGrid grid = ...;
///
/// // Assume that we know that the grid has a uniform scale map.
/// using MapType = math::UniformScaleMap;
/// // Specify a world-space divergence operator that uses first-order differencing.
/// using DivergenceOp = math::Divergence<MapType, math::FD_1ST>;
/// // Wrap the operator with an adapter that associates a map with it.
/// using CompoundOp = math::MapAdapter<MapType, DivergenceOp, double>;
///
/// if (MapType::Ptr map = grid.constTransform().constMap<MapType>()) {
/// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), CompoundOp(*map));
/// }
/// @endcode
///
/// @par Example:
/// As above, but computing the divergence in index space.
/// @code
/// Vec3SGrid grid = ...;
///
/// // Specify an index-space divergence operator that uses first-order differencing.
/// using DivergenceOp = math::ISDivergence<math::FD_1ST>;
///
/// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), DivergenceOp());
/// @endcode
template<typename OperatorT, typename IterT>
inline math::Stats
opStatistics(const IterT& iter, const OperatorT& op = OperatorT(), bool threaded = true);
/// @brief Same as opStatistics except it returns a math::Extrema vs a math::Stats
template<typename OperatorT, typename IterT>
inline math::Extrema
opExtrema(const IterT& iter, const OperatorT& op = OperatorT(), bool threaded = true);
////////////////////////////////////////
namespace stats_internal {
/// @todo This traits class is needed because tree::TreeValueIteratorBase uses
/// the name ValueT for the type of the value to which the iterator points,
/// whereas node-level iterators use the name ValueType.
template<typename IterT, typename AuxT = void>
struct IterTraits {
using ValueType = typename IterT::ValueType;
};
template<typename TreeT, typename ValueIterT>
struct IterTraits<tree::TreeValueIteratorBase<TreeT, ValueIterT> > {
using ValueType = typename tree::TreeValueIteratorBase<TreeT, ValueIterT>::ValueT;
};
// Helper class to compute a scalar value from either a scalar or a vector value
// (the latter by computing the vector's magnitude)
template<typename T, bool IsVector> struct GetValImpl;
template<typename T>
struct GetValImpl<T, /*IsVector=*/false> {
static inline double get(const T& val) { return double(val); }
};
template<typename T>
struct GetValImpl<T, /*IsVector=*/true> {
static inline double get(const T& val) { return val.length(); }
};
// Helper class to compute a scalar value from a tree or node iterator
// that points to a value in either a scalar or a vector grid, and to
// add that value to a math::Stats object.
template<typename IterT, typename StatsT>
struct GetVal
{
using ValueT = typename IterTraits<IterT>::ValueType;
using ImplT = GetValImpl<ValueT, VecTraits<ValueT>::IsVec>;
inline void operator()(const IterT& iter, StatsT& stats) const {
if (iter.isVoxelValue()) stats.add(ImplT::get(*iter));
else stats.add(ImplT::get(*iter), iter.getVoxelCount());
}
};
// Helper class to accumulate scalar voxel values or vector voxel magnitudes
// into a math::Stats object
template<typename IterT, typename ValueOp, typename StatsT>
struct StatsOp
{
StatsOp(const ValueOp& op): getValue(op) {}
// Accumulate voxel and tile values into this functor's Stats object.
inline void operator()(const IterT& iter) { getValue(iter, stats); }
// Accumulate another functor's Stats object into this functor's.
inline void join(StatsOp& other) { stats.add(other.stats); }
StatsT stats;
ValueOp getValue;
};
// Helper class to accumulate scalar voxel values or vector voxel magnitudes
// into a math::Histogram object
template<typename IterT, typename ValueOp>
struct HistOp
{
HistOp(const ValueOp& op, double vmin, double vmax, size_t bins):
hist(vmin, vmax, bins), getValue(op)
{}
// Accumulate voxel and tile values into this functor's Histogram object.
inline void operator()(const IterT& iter) { getValue(iter, hist); }
// Accumulate another functor's Histogram object into this functor's.
inline void join(HistOp& other) { hist.add(other.hist); }
math::Histogram hist;
ValueOp getValue;
};
// Helper class to apply an operator such as math::Gradient or math::Laplacian
// to voxels and accumulate the scalar results or the magnitudes of vector results
// into a math::Stats object
template<typename IterT, typename OpT, typename StatsT>
struct MathOp
{
using TreeT = typename IterT::TreeT;
using ValueT = typename TreeT::ValueType;
using ConstAccessor = typename tree::ValueAccessor<const TreeT>;
// Each thread gets its own accessor and its own copy of the operator.
ConstAccessor mAcc;
OpT mOp;
StatsT mStats;
template<typename TreeT>
static inline TreeT* THROW_IF_NULL(TreeT* ptr) {
if (ptr == nullptr) OPENVDB_THROW(ValueError, "iterator references a null tree");
return ptr;
}
MathOp(const IterT& iter, const OpT& op):
mAcc(*THROW_IF_NULL(iter.getTree())), mOp(op)
{}
// Accumulate voxel and tile values into this functor's Stats object.
void operator()(const IterT& it)
{
if (it.isVoxelValue()) {
// Add the magnitude of the gradient at a single voxel.
mStats.add(mOp.result(mAcc, it.getCoord()));
} else {
// Iterate over the voxels enclosed by a tile and add the results
// of applying the operator at each voxel.
/// @todo This could be specialized to be done more efficiently for some operators.
/// For example, all voxels in the interior of a tile (i.e., not on the borders)
/// have gradient zero, so there's no need to apply the operator to every voxel.
CoordBBox bbox = it.getBoundingBox();
Coord xyz;
int &x = xyz.x(), &y = xyz.y(), &z = xyz.z();
for (x = bbox.min().x(); x <= bbox.max().x(); ++x) {
for (y = bbox.min().y(); y <= bbox.max().y(); ++y) {
for (z = bbox.min().z(); z <= bbox.max().z(); ++z) {
mStats.add(mOp.result(mAcc, it.getCoord()));
}
}
}
}
}
// Accumulate another functor's Stats object into this functor's.
inline void join(MathOp& other) { mStats.add(other.mStats); }
}; // struct MathOp
} // namespace stats_internal
template<typename IterT>
inline math::Histogram
histogram(const IterT& iter, double vmin, double vmax, size_t numBins, bool threaded)
{
using ValueOp = stats_internal::GetVal<IterT, math::Histogram>;
ValueOp valOp;
stats_internal::HistOp<IterT, ValueOp> op(valOp, vmin, vmax, numBins);
tools::accumulate(iter, op, threaded);
return op.hist;
}
template<typename IterT>
inline math::Extrema
extrema(const IterT& iter, bool threaded)
{
stats_internal::GetVal<IterT, math::Extrema> valOp;
return extrema(iter, valOp, threaded);
}
template<typename IterT>
inline math::Stats
statistics(const IterT& iter, bool threaded)
{
stats_internal::GetVal<IterT, math::Stats> valOp;
return statistics(iter, valOp, threaded);
}
template<typename IterT, typename ValueOp>
inline math::Extrema
extrema(const IterT& iter, const ValueOp& valOp, bool threaded)
{
stats_internal::StatsOp<IterT, const ValueOp, math::Extrema> op(valOp);
tools::accumulate(iter, op, threaded);
return op.stats;
}
template<typename IterT, typename ValueOp>
inline math::Stats
statistics(const IterT& iter, const ValueOp& valOp, bool threaded)
{
stats_internal::StatsOp<IterT, const ValueOp, math::Stats> op(valOp);
tools::accumulate(iter, op, threaded);
return op.stats;
}
template<typename OperatorT, typename IterT>
inline math::Extrema
opExtrema(const IterT& iter, const OperatorT& op, bool threaded)
{
stats_internal::MathOp<IterT, OperatorT, math::Extrema> func(iter, op);
tools::accumulate(iter, func, threaded);
return func.mStats;
}
template<typename OperatorT, typename IterT>
inline math::Stats
opStatistics(const IterT& iter, const OperatorT& op, bool threaded)
{
stats_internal::MathOp<IterT, OperatorT, math::Stats> func(iter, op);
tools::accumulate(iter, func, threaded);
return func.mStats;
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED
| 15,710 | C | 37.507353 | 95 | 0.67823 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/SignedFloodFill.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file SignedFloodFill.h
///
/// @brief Propagate the signs of distance values from the active voxels
/// in the narrow band to the inactive values outside the narrow band.
///
/// @author Ken Museth
#ifndef OPENVDB_TOOLS_SIGNEDFLOODFILL_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_SIGNEDFLOODFILL_HAS_BEEN_INCLUDED
#include <openvdb/version.h>
#include <openvdb/Types.h> // for Index typedef
#include <openvdb/math/Math.h> // for math::negative
#include <openvdb/tree/NodeManager.h>
#include <map>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Set the values of all inactive voxels and tiles of a narrow-band
/// level set from the signs of the active voxels, setting outside values to
/// +background and inside values to -background.
///
/// @warning This method should only be used on closed, symmetric narrow-band level sets.
///
/// @note If a LeafManager is used the cached leaf nodes are reused,
/// resulting in slightly better overall performance.
///
/// @param tree Tree or LeafManager that will be flood filled.
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
/// @param minLevel Specify the lowest tree level to process (leafnode level = 0)
///
/// @throw TypeError if the ValueType of @a tree is not floating-point.
template<typename TreeOrLeafManagerT>
inline void
signedFloodFill(TreeOrLeafManagerT& tree, bool threaded = true,
size_t grainSize = 1, Index minLevel = 0);
/// @brief Set the values of all inactive voxels and tiles of a narrow-band
/// level set from the signs of the active voxels, setting exterior values to
/// @a outsideWidth and interior values to @a insideWidth. Set the background value
/// of this tree to @a outsideWidth.
///
/// @warning This method should only be used on closed, narrow-band level sets.
///
/// @note If a LeafManager is used the cached leaf nodes are reused
/// resulting in slightly better overall performance.
///
/// @param tree Tree or LeafManager that will be flood filled
/// @param outsideWidth the width of the outside of the narrow band
/// @param insideWidth the width of the inside of the narrow band
/// @param threaded enable or disable threading (threading is enabled by default)
/// @param grainSize used to control the threading granularity (default is 1)
/// @param minLevel Specify the lowest tree level to process (leafnode level = 0)
///
/// @throw TypeError if the ValueType of @a tree is not floating-point.
template<typename TreeOrLeafManagerT>
inline void
signedFloodFillWithValues(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& outsideWidth,
const typename TreeOrLeafManagerT::ValueType& insideWidth,
bool threaded = true,
size_t grainSize = 1,
Index minLevel = 0);
////////////////////////// Implementation of SignedFloodFill ////////////////////////////
template<typename TreeOrLeafManagerT>
class SignedFloodFillOp
{
public:
using ValueT = typename TreeOrLeafManagerT::ValueType;
using RootT = typename TreeOrLeafManagerT::RootNodeType;
using LeafT = typename TreeOrLeafManagerT::LeafNodeType;
static_assert(std::is_signed<ValueT>::value,
"signed flood fill is supported only for signed value grids");
SignedFloodFillOp(const TreeOrLeafManagerT& tree, Index minLevel = 0)
: mOutside(ValueT(math::Abs(tree.background())))
, mInside(ValueT(math::negative(mOutside)))
, mMinLevel(minLevel)
{
}
SignedFloodFillOp(ValueT outsideValue, ValueT insideValue, Index minLevel = 0)
: mOutside(ValueT(math::Abs(outsideValue)))
, mInside(ValueT(math::negative(math::Abs(insideValue))))
, mMinLevel(minLevel)
{
}
// Nothing to do at the leaf node level
void operator()(LeafT& leaf) const
{
if (LeafT::LEVEL < mMinLevel) return;
if (!leaf.allocate()) return; // this assures that the buffer is allocated and in-memory
const typename LeafT::NodeMaskType& valueMask = leaf.getValueMask();
// WARNING: "Never do what you're about to see at home, we're what you call experts!"
typename LeafT::ValueType* buffer =
const_cast<typename LeafT::ValueType*>(&(leaf.getFirstValue()));
const Index first = valueMask.findFirstOn();
if (first < LeafT::SIZE) {
bool xInside = buffer[first]<0, yInside = xInside, zInside = xInside;
for (Index x = 0; x != (1 << LeafT::LOG2DIM); ++x) {
const Index x00 = x << (2 * LeafT::LOG2DIM);
if (valueMask.isOn(x00)) xInside = buffer[x00] < 0; // element(x, 0, 0)
yInside = xInside;
for (Index y = 0; y != (1 << LeafT::LOG2DIM); ++y) {
const Index xy0 = x00 + (y << LeafT::LOG2DIM);
if (valueMask.isOn(xy0)) yInside = buffer[xy0] < 0; // element(x, y, 0)
zInside = yInside;
for (Index z = 0; z != (1 << LeafT::LOG2DIM); ++z) {
const Index xyz = xy0 + z; // element(x, y, z)
if (valueMask.isOn(xyz)) {
zInside = buffer[xyz] < 0;
} else {
buffer[xyz] = zInside ? mInside : mOutside;
}
}
}
}
} else {// if no active voxels exist simply use the sign of the first value
leaf.fill(buffer[0] < 0 ? mInside : mOutside);
}
}
// Prune the child nodes of the internal nodes
template<typename NodeT>
void operator()(NodeT& node) const
{
if (NodeT::LEVEL < mMinLevel) return;
// We assume the child nodes have already been flood filled!
const typename NodeT::NodeMaskType& childMask = node.getChildMask();
// WARNING: "Never do what you're about to see at home, we're what you call experts!"
typename NodeT::UnionType* table = const_cast<typename NodeT::UnionType*>(node.getTable());
const Index first = childMask.findFirstOn();
if (first < NodeT::NUM_VALUES) {
bool xInside = table[first].getChild()->getFirstValue()<0;
bool yInside = xInside, zInside = xInside;
for (Index x = 0; x != (1 << NodeT::LOG2DIM); ++x) {
const int x00 = x << (2 * NodeT::LOG2DIM); // offset for block(x, 0, 0)
if (childMask.isOn(x00)) xInside = table[x00].getChild()->getLastValue()<0;
yInside = xInside;
for (Index y = 0; y != (1 << NodeT::LOG2DIM); ++y) {
const Index xy0 = x00 + (y << NodeT::LOG2DIM); // offset for block(x, y, 0)
if (childMask.isOn(xy0)) yInside = table[xy0].getChild()->getLastValue()<0;
zInside = yInside;
for (Index z = 0; z != (1 << NodeT::LOG2DIM); ++z) {
const Index xyz = xy0 + z; // offset for block(x, y, z)
if (childMask.isOn(xyz)) {
zInside = table[xyz].getChild()->getLastValue()<0;
} else {
table[xyz].setValue(zInside ? mInside : mOutside);
}
}
}
}
} else {//no child nodes exist simply use the sign of the first tile value.
const ValueT v = table[0].getValue()<0 ? mInside : mOutside;
for (Index i = 0; i < NodeT::NUM_VALUES; ++i) table[i].setValue(v);
}
}
// Prune the child nodes of the root node
void operator()(RootT& root) const
{
if (RootT::LEVEL < mMinLevel) return;
using ChildT = typename RootT::ChildNodeType;
// Insert the child nodes into a map sorted according to their origin
std::map<Coord, ChildT*> nodeKeys;
typename RootT::ChildOnIter it = root.beginChildOn();
for (; it; ++it) nodeKeys.insert(std::pair<Coord, ChildT*>(it.getCoord(), &(*it)));
static const Index DIM = RootT::ChildNodeType::DIM;
// We employ a simple z-scanline algorithm that inserts inactive tiles with
// the inside value if they are sandwiched between inside child nodes only!
typename std::map<Coord, ChildT*>::const_iterator b = nodeKeys.begin(), e = nodeKeys.end();
if ( b == e ) return;
for (typename std::map<Coord, ChildT*>::const_iterator a = b++; b != e; ++a, ++b) {
Coord d = b->first - a->first; // delta of neighboring coordinates
if (d[0]!=0 || d[1]!=0 || d[2]==Int32(DIM)) continue;// not same z-scanline or neighbors
const ValueT fill[] = { a->second->getLastValue(), b->second->getFirstValue() };
if (!(fill[0] < 0) || !(fill[1] < 0)) continue; // scanline isn't inside
Coord c = a->first + Coord(0u, 0u, DIM);
for (; c[2] != b->first[2]; c[2] += DIM) root.addTile(c, mInside, false);
}
root.setBackground(mOutside, /*updateChildNodes=*/false);
}
private:
const ValueT mOutside, mInside;
const Index mMinLevel;
};// SignedFloodFillOp
//{
/// @cond OPENVDB_SIGNED_FLOOD_FILL_INTERNAL
template<typename TreeOrLeafManagerT>
inline
typename std::enable_if<std::is_signed<typename TreeOrLeafManagerT::ValueType>::value, void>::type
doSignedFloodFill(TreeOrLeafManagerT& tree,
typename TreeOrLeafManagerT::ValueType outsideValue,
typename TreeOrLeafManagerT::ValueType insideValue,
bool threaded,
size_t grainSize,
Index minLevel)
{
tree::NodeManager<TreeOrLeafManagerT> nodes(tree);
SignedFloodFillOp<TreeOrLeafManagerT> op(outsideValue, insideValue, minLevel);
nodes.foreachBottomUp(op, threaded, grainSize);
}
// Dummy (no-op) implementation for unsigned types
template <typename TreeOrLeafManagerT>
inline
typename std::enable_if<!std::is_signed<typename TreeOrLeafManagerT::ValueType>::value, void>::type
doSignedFloodFill(TreeOrLeafManagerT&,
const typename TreeOrLeafManagerT::ValueType&,
const typename TreeOrLeafManagerT::ValueType&,
bool,
size_t,
Index)
{
OPENVDB_THROW(TypeError,
"signedFloodFill is supported only for signed value grids");
}
/// @endcond
//}
// If the narrow-band is symmetric and unchanged
template <typename TreeOrLeafManagerT>
inline void
signedFloodFillWithValues(
TreeOrLeafManagerT& tree,
const typename TreeOrLeafManagerT::ValueType& outsideValue,
const typename TreeOrLeafManagerT::ValueType& insideValue,
bool threaded,
size_t grainSize,
Index minLevel)
{
doSignedFloodFill(tree, outsideValue, insideValue, threaded, grainSize, minLevel);
}
template <typename TreeOrLeafManagerT>
inline void
signedFloodFill(TreeOrLeafManagerT& tree,
bool threaded,
size_t grainSize,
Index minLevel)
{
const typename TreeOrLeafManagerT::ValueType v = tree.root().background();
doSignedFloodFill(tree, v, math::negative(v), threaded, grainSize, minLevel);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_RESETBACKGROUND_HAS_BEEN_INCLUDED
| 11,608 | C | 40.460714 | 100 | 0.625603 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/MultiResGrid.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file MultiResGrid.h
///
/// @author Ken Museth
///
/// @warning This class is fairly new and as such has not seen a lot of
/// use in production. Please report any issues or request for new
/// features directly to [email protected].
///
/// @brief Multi-resolution grid that contains LoD sequences of trees
/// with powers of two refinements.
///
/// @note While this class can arguably be used to implement a sparse
/// Multi-Grid solver it is currently intended as a means to
/// efficiently compute LoD levels for applications like rendering
///
/// @note Prolongation means interpolation from coarse -> fine
/// @note Restriction means interpolation (or remapping) from fine -> coarse
///
/// @todo Add option to define the level of the input grid (currenlty
/// 0) so as to allow for super-sampling.
#ifndef OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/math/FiniteDifference.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Operators.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/Metadata.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/tree/NodeManager.h>
#include "Interpolation.h"
#include "Morphology.h"
#include "Prune.h"
#include "SignedFloodFill.h"
#include "ValueTransformer.h"
#include <tbb/blocked_range.h>
#include <tbb/enumerable_thread_specific.h>
#include <tbb/parallel_for.h>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
template<typename TreeType>
class MultiResGrid: public MetaMap
{
public:
using Ptr = SharedPtr<MultiResGrid>;
using ConstPtr = SharedPtr<const MultiResGrid>;
using ValueType = typename TreeType::ValueType;
using ValueOnCIter = typename TreeType::ValueOnCIter;
using ValueOnIter = typename TreeType::ValueOnIter;
using TreePtr = typename TreeType::Ptr;
using ConstTreePtr = typename TreeType::ConstPtr;
using GridPtr = typename Grid<TreeType>::Ptr;
using ConstGridPtr = typename Grid<TreeType>::ConstPtr;
//////////////////////////////////////////////////////////////////////
/// @brief Constructor of empty grids
/// @param levels The number of trees in this MultiResGrid
/// @param background Background value
/// @param voxelSize Size of a (uniform voxel). Defaults to one.
/// @note The multiple grids are all empty.
MultiResGrid(size_t levels, ValueType background, double voxelSize = 1.0);
/// @brief Given an initial high-resolution grid this constructor
/// generates all the coarser grids by means of restriction.
/// @param levels The number of trees in this MultiResGrid
/// @param grid High-resolution input grid
/// @param useInjection Use restriction by injection, vs
/// full-weighting. It defaults to false and should rarely be used.
/// @note This constructor will perform a deep copy of the input
/// grid and use it as the highest level grid.
MultiResGrid(size_t levels, const Grid<TreeType> &grid, bool useInjection = false);
/// @brief Given an initial high-resolution grid this constructor
/// generates all the coarser grids by means of restriction.
/// @param levels The number of trees in this MultiResGrid
/// @param grid High-resolution input grid
/// @param useInjection Use restriction by injection, vs
/// full-weighting. It defaults to false and should rarely be used.
/// @note This constructor will steal the input grid and use it
/// as the highest level grid. On output the grid is empty.
MultiResGrid(size_t levels, GridPtr grid, bool useInjection = false);
//////////////////////////////////////////////////////////////////////
/// @brief Return the number of levels, i.e. trees, in this MultiResGrid
/// @note level 0 is the finest level and numLevels()-1 is the coarsest
/// level.
size_t numLevels() const { return mTrees.size(); }
/// @brief Return the level of the finest grid (always 0)
static size_t finestLevel() { return 0; }
/// @brief Return the level of the coarsest grid, i.e. numLevels()-1
size_t coarsestLevel() const { return mTrees.size()-1; }
//////////////////////////////////////////////////////////////////////
/// @brief Return a reference to the tree at the specified level
/// @param level The level of the tree to be returned
/// @note Level 0 is by definition the finest tree.
TreeType& tree(size_t level);
/// @brief Return a const reference to the tree at the specified level
/// @param level The level of the tree to be returned
/// @note Level 0 is by definition the finest tree.
const TreeType& constTree(size_t level) const;
/// @brief Return a shared pointer to the tree at the specified level
/// @param level The level of the tree to be returned
/// @note Level 0 is by definition the finest tree.
TreePtr treePtr(size_t level);
/// @brief Return a const shared pointer to the tree at the specified level
/// @param level The level of the tree to be returned
/// @note Level 0 is by definition the finest tree.
ConstTreePtr constTreePtr(size_t level) const;
/// @brief Return a reference to the tree at the finest level
TreeType& finestTree() { return *mTrees.front(); }
/// @brief Return a const reference to the tree at the finest level
const TreeType& finestConstTree() const { return *mTrees.front(); }
/// @brief Return a shared pointer to the tree at the finest level
TreePtr finestTreePtr() { return mTrees.front(); }
/// @brief Return a const shared pointer to the tree at the finest level
ConstTreePtr finestConstTreePtr() const { return mTrees.front(); }
/// @brief Return a reference to the tree at the coarsest level
TreeType& coarsestTree() { return *mTrees.back(); }
/// @brief Return a const reference to the tree at the coarsest level
const TreeType& coarsestConstTree() const { return *mTrees.back(); }
/// @brief Return a shared pointer to the tree at the coarsest level
TreePtr coarsestTreePtr() { return mTrees.back(); }
/// @brief Return a const shared pointer to the tree at the coarsest level
ConstTreePtr coarsestConstTreePtr() const { return mTrees.back(); }
//////////////////////////////////////////////////////////////////////
/// @brief Return a shared pointer to the grid at the specified integer level
/// @param level Integer level of the grid to be returned
/// @note Level 0 is by definition the finest grid.
GridPtr grid(size_t level);
/// @brief Return a const shared pointer to the grid at the specified level
/// @param level The level of the grid to be returned
/// @note Level 0 is by definition the finest grid.
ConstGridPtr grid(size_t level) const;
/// @brief Return a shared pointer to a new grid at the specified
/// floating-point level.
/// @param level Floating-point level of the grid to be returned
/// @param grainSize Grain size for the multi-threading
/// @details Interpolation of the specified order is performed
/// between the bracketing integer levels.
/// @note Level 0 is by definition the finest grid.
template<Index Order>
GridPtr createGrid(float level, size_t grainSize = 1) const;
/// @brief Return a shared pointer to a vector of all the base
/// grids in this instance of the MultiResGrid.
/// @brief This method is useful for I/O
GridPtrVecPtr grids();
/// @brief Return a const shared pointer to a vector of all the base
/// grids in this instance of the MultiResGrid.
/// @brief This method is useful for I/O
GridCPtrVecPtr grids() const;
//////////////////////////////////////////////////////////////////////
//@{
/// @brief Return a reference to the finest grid's transform, which might be
/// shared with other grids.
/// @note Calling setTransform() on this grid invalidates all references
/// previously returned by this method.
/// @warning The transform is relative to the finest level (=0) grid!
math::Transform& transform() { return *mTransform; }
const math::Transform& transform() const { return *mTransform; }
const math::Transform& constTransform() const { return *mTransform; }
//@}
//////////////////////////////////////////////////////////////////////
//@{
/// @brief Return the floating-point index coordinate at out_level given
/// the index coordinate in_xyz at in_level.
static Vec3R xyz(const Coord& in_ijk, size_t in_level, size_t out_level);
static Vec3R xyz(const Vec3R& in_xyz, size_t in_level, size_t out_level);
static Vec3R xyz(const Vec3R& in_xyz, double in_level, double out_level);
//@}
//////////////////////////////////////////////////////////////////////
//@{
/// @brief Return the value at the specified coordinate position using
/// interpolation of the specified order into the tree at the out_level.
///
/// @details First in_ijk is mapped from index space at in_level to
/// out_level, and then a value is interpolated from the tree at out_level.
///
/// @param in_ijk Index coordinate position relative to tree at in_level
/// @param in_level Integer level of the input coordinate in_ijk
/// @param out_level Integer level of the interpolated value
template<Index Order>
ValueType sampleValue(const Coord& in_ijk, size_t in_level, size_t out_level) const;
template<Index Order>
ValueType sampleValue(const Vec3R& in_ijk, size_t in_level, size_t out_level) const;
//@}
/// @brief Return the value at the specified integer coordinate position
/// and level using interpolation of the specified order.
/// @param ijk Integer coordinate position relative to the highest level (=0) grid
/// @param level Floating-point level from which to interpolate the value.
/// @brief Non-integer values of the level will use linear-interpolation
/// between the neighboring integer levels.
template<Index Order>
ValueType sampleValue(const Coord& ijk, double level) const;
/// @brief Return the value at the specified floating-point coordinate position
/// and level using interpolation of the specified order.
/// @param xyz Floating-point coordinate position relative to the highest level grid
/// @param level Floating-point level from which to interpolate
/// the value.
/// @brief Non-integer values of the level will use linear-interpolation
/// between the neighboring integer levels.
template<Index Order>
ValueType sampleValue(const Vec3R& xyz, double level) const;
//////////////////////////////////////////////////////////////////////
/// @brief Return the value at coordinate location in @a level tree
/// from the coarser tree at @a level+1 using trilinear interpolation
/// @param coords input coords relative to the fine tree at level
/// @param level The fine level to receive values from the coarser
/// level-1
/// @note Prolongation means to interpolation from coarse -> fine
ValueType prolongateVoxel(const Coord& coords, const size_t level) const;
/// (coarse->fine) Populates all the active voxel values in a fine (@a level) tree
/// from the coarse (@a level+1) tree using linear interpolation
/// This transforms multiple values of the tree in parallel
void prolongateActiveVoxels(size_t destlevel, size_t grainSize = 1);
//////////////////////////////////////////////////////////////////////
/// Populate a coordinate location in @a level (coarse) tree
/// from the @a level-1 (fine) tree using trilinear interpolation
/// input coords are relative to the mTree[level] (coarse)
/// @note Restriction means remapping from fine -> coarse
ValueType restrictVoxel(Coord ijk, const size_t level, bool useInjection = false) const;
/// (fine->coarse) Populates all the active voxel values in the coarse (@a level) tree
/// from the fine (@a level-1) tree using trilinear interpolation.
/// For cell-centered data, this is equivalent to an average
/// For vertex-centered data this is equivalent to transferring the data
/// from the fine vertex directly above the coarse vertex.
/// This transforms multiple values of the tree in parallel
void restrictActiveVoxels(size_t destlevel, size_t grainSize = 1);
/// Output a human-readable description of this MultiResGrid
void print(std::ostream& = std::cout, int verboseLevel = 1) const;
/// @brief Return a string with the name of this MultiResGrid
std::string getName() const
{
if (Metadata::ConstPtr meta = (*this)[GridBase::META_GRID_NAME]) return meta->str();
return "";
}
/// @brief Set the name of this MultiResGrid
void setName(const std::string& name)
{
this->removeMeta(GridBase::META_GRID_NAME);
this->insertMeta(GridBase::META_GRID_NAME, StringMetadata(name));
}
/// Return the class of volumetric data (level set, fog volume, etc.) stored in this grid.
GridClass getGridClass() const
{
typename StringMetadata::ConstPtr s =
this->getMetadata<StringMetadata>(GridBase::META_GRID_CLASS);
return s ? GridBase::stringToGridClass(s->value()) : GRID_UNKNOWN;
}
/// Specify the class of volumetric data (level set, fog volume, etc.) stored in this grid.
void setGridClass(GridClass cls)
{
this->insertMeta(GridBase::META_GRID_CLASS, StringMetadata(GridBase::gridClassToString(cls)));
}
/// Remove the setting specifying the class of this grid's volumetric data.
void clearGridClass() { this->removeMeta(GridBase::META_GRID_CLASS); }
private:
MultiResGrid(const MultiResGrid& other);//disallow copy construction
MultiResGrid& operator=(const MultiResGrid& other);//disallow copy assignment
// For optimal performance we disable registration of the ValueAccessor
using Accessor = tree::ValueAccessor<TreeType, false>;
using ConstAccessor = tree::ValueAccessor<const TreeType, false>;
void topDownRestrict(bool useInjection);
inline void initMeta();
// Private struct that concurrently creates a mask of active voxel
// in a coarse tree from the active voxels in a fine tree
struct MaskOp;
/// Private struct that performs multi-threaded restriction
struct RestrictOp;
/// Private struct that performs multi-threaded prolongation
struct ProlongateOp;
// Private struct that performs multi-threaded computation of grids a fraction levels
template<Index Order>
struct FractionOp;
/// Private template struct that performs the actual multi-threading
template<typename OpType> struct CookOp;
// Array of shared pointer to trees, level 0 has the highest resolution.
std::vector<TreePtr> mTrees;
// Shared pointer to a transform associated with the finest level grid
typename math::Transform::Ptr mTransform;
};// MultiResGrid
template<typename TreeType>
MultiResGrid<TreeType>::
MultiResGrid(size_t levels, ValueType background, double voxelSize)
: mTrees(levels)
, mTransform(math::Transform::createLinearTransform( voxelSize ))
{
this->initMeta();
for (size_t i=0; i<levels; ++i) mTrees[i] = TreePtr(new TreeType(background));
}
template<typename TreeType>
MultiResGrid<TreeType>::
MultiResGrid(size_t levels, const Grid<TreeType> &grid, bool useInjection)
: MetaMap(grid)
, mTrees(levels)
, mTransform( grid.transform().copy() )
{
this->initMeta();
mTrees[0].reset( new TreeType( grid.tree() ) );// deep copy input tree
mTrees[0]->voxelizeActiveTiles();
this->topDownRestrict(useInjection);
}
template<typename TreeType>
MultiResGrid<TreeType>::
MultiResGrid(size_t levels, GridPtr grid, bool useInjection)
: MetaMap(*grid)
, mTrees(levels)
, mTransform( grid->transform().copy() )
{
this->initMeta();
mTrees[0] = grid->treePtr();// steal tree from input grid
mTrees[0]->voxelizeActiveTiles();
grid->newTree();
this->topDownRestrict(useInjection);
}
template<typename TreeType>
inline TreeType& MultiResGrid<TreeType>::
tree(size_t level)
{
assert( level < mTrees.size() );
return *mTrees[level];
}
template<typename TreeType>
inline const TreeType& MultiResGrid<TreeType>::
constTree(size_t level) const
{
assert( level < mTrees.size() );
return *mTrees[level];
}
template<typename TreeType>
inline typename TreeType::Ptr MultiResGrid<TreeType>::
treePtr(size_t level)
{
assert( level < mTrees.size() );
return mTrees[level];
}
template<typename TreeType>
inline typename TreeType::ConstPtr MultiResGrid<TreeType>::
constTreePtr(size_t level) const
{
assert( level < mTrees.size() );
return mTrees[level];
}
template<typename TreeType>
typename Grid<TreeType>::Ptr MultiResGrid<TreeType>::
grid(size_t level)
{
typename Grid<TreeType>::Ptr grid = Grid<TreeType>::create(this->treePtr(level));
math::Transform::Ptr xform = mTransform->copy();
if (level>0) xform->preScale( Real(1 << level) );
grid->setTransform( xform );
grid->insertMeta( *this->copyMeta() );
grid->insertMeta( "MultiResGrid_Level", Int64Metadata(level));
std::stringstream ss;
ss << this->getName() << "_level_" << level;
grid->setName( ss.str() );
return grid;
}
template<typename TreeType>
inline typename Grid<TreeType>::ConstPtr MultiResGrid<TreeType>::
grid(size_t level) const
{
return const_cast<MultiResGrid*>(this)->grid(level);
}
template<typename TreeType>
template<Index Order>
typename Grid<TreeType>::Ptr MultiResGrid<TreeType>::
createGrid(float level, size_t grainSize) const
{
assert( level >= 0.0f && level <= float(mTrees.size()-1) );
typename Grid<TreeType>::Ptr grid(new Grid<TreeType>(this->constTree(0).background()));
math::Transform::Ptr xform = mTransform->copy();
xform->preScale( math::Pow(2.0f, level) );
grid->setTransform( xform );
grid->insertMeta( *(this->copyMeta()) );
grid->insertMeta( "MultiResGrid_Level", FloatMetadata(level) );
std::stringstream ss;
ss << this->getName() << "_level_" << level;
grid->setName( ss.str() );
if ( size_t(floorf(level)) == size_t(ceilf(level)) ) {
grid->setTree( this->constTree( size_t(floorf(level))).copy() );
} else {
FractionOp<Order> tmp(*this, grid->tree(), level, grainSize);
if ( grid->getGridClass() == GRID_LEVEL_SET ) {
signedFloodFill( grid->tree() );
pruneLevelSet( grid->tree() );//only creates inactive tiles
}
}
return grid;
}
template<typename TreeType>
GridPtrVecPtr MultiResGrid<TreeType>::
grids()
{
GridPtrVecPtr grids( new GridPtrVec );
for (size_t level=0; level<mTrees.size(); ++level) grids->push_back(this->grid(level));
return grids;
}
template<typename TreeType>
GridCPtrVecPtr MultiResGrid<TreeType>::
grids() const
{
GridCPtrVecPtr grids( new GridCPtrVec );
for (size_t level=0; level<mTrees.size(); ++level) grids->push_back(this->grid(level));
return grids;
}
template<typename TreeType>
Vec3R MultiResGrid<TreeType>::
xyz(const Coord& in_ijk, size_t in_level, size_t out_level)
{
return Vec3R( in_ijk.data() ) * Real(1 << in_level) / Real(1 << out_level);
}
template<typename TreeType>
Vec3R MultiResGrid<TreeType>::
xyz(const Vec3R& in_xyz, size_t in_level, size_t out_level)
{
return in_xyz * Real(1 << in_level) / Real(1 << out_level);
}
template<typename TreeType>
Vec3R MultiResGrid<TreeType>::
xyz(const Vec3R& in_xyz, double in_level, double out_level)
{
return in_xyz * math::Pow(2.0, in_level - out_level);
}
template<typename TreeType>
template<Index Order>
typename TreeType::ValueType MultiResGrid<TreeType>::
sampleValue(const Coord& in_ijk, size_t in_level, size_t out_level) const
{
assert( in_level >= 0 && in_level < mTrees.size() );
assert( out_level >= 0 && out_level < mTrees.size() );
const ConstAccessor acc(*mTrees[out_level]);// has disabled registration!
return tools::Sampler<Order>::sample( acc, this->xyz(in_ijk, in_level, out_level) );
}
template<typename TreeType>
template<Index Order>
typename TreeType::ValueType MultiResGrid<TreeType>::
sampleValue(const Vec3R& in_xyz, size_t in_level, size_t out_level) const
{
assert( in_level >= 0 && in_level < mTrees.size() );
assert( out_level >= 0 && out_level < mTrees.size() );
const ConstAccessor acc(*mTrees[out_level]);// has disabled registration!
return tools::Sampler<Order>::sample( acc, this->xyz(in_xyz, in_level, out_level) );
}
template<typename TreeType>
template<Index Order>
typename TreeType::ValueType MultiResGrid<TreeType>::
sampleValue(const Coord& ijk, double level) const
{
assert( level >= 0.0 && level <= double(mTrees.size()-1) );
const size_t level0 = size_t(floor(level)), level1 = size_t(ceil(level));
const ValueType v0 = this->template sampleValue<Order>( ijk, 0, level0 );
if ( level0 == level1 ) return v0;
assert( level1 - level0 == 1 );
const ValueType v1 = this->template sampleValue<Order>( ijk, 0, level1 );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueType a = ValueType(level1 - level);
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return a * v0 + (ValueType(1) - a) * v1;
}
template<typename TreeType>
template<Index Order>
typename TreeType::ValueType MultiResGrid<TreeType>::
sampleValue(const Vec3R& xyz, double level) const
{
assert( level >= 0.0 && level <= double(mTrees.size()-1) );
const size_t level0 = size_t(floor(level)), level1 = size_t(ceil(level));
const ValueType v0 = this->template sampleValue<Order>( xyz, 0, level0 );
if ( level0 == level1 ) return v0;
assert( level1 - level0 == 1 );
const ValueType v1 = this->template sampleValue<Order>( xyz, 0, level1 );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueType a = ValueType(level1 - level);
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return a * v0 + (ValueType(1) - a) * v1;
}
template<typename TreeType>
typename TreeType::ValueType MultiResGrid<TreeType>::
prolongateVoxel(const Coord& ijk, const size_t level) const
{
assert( level+1 < mTrees.size() );
const ConstAccessor acc(*mTrees[level + 1]);// has disabled registration!
return ProlongateOp::run(ijk, acc);
}
template<typename TreeType>
void MultiResGrid<TreeType>::
prolongateActiveVoxels(size_t destlevel, size_t grainSize)
{
assert( destlevel < mTrees.size()-1 );
TreeType &fineTree = *mTrees[ destlevel ];
const TreeType &coarseTree = *mTrees[ destlevel+1 ];
CookOp<ProlongateOp> tmp( coarseTree, fineTree, grainSize );
}
template<typename TreeType>
typename TreeType::ValueType MultiResGrid<TreeType>::
restrictVoxel(Coord ijk, const size_t destlevel, bool useInjection) const
{
assert( destlevel > 0 && destlevel < mTrees.size() );
const TreeType &fineTree = *mTrees[ destlevel-1 ];
if ( useInjection ) return fineTree.getValue(ijk<<1);
const ConstAccessor acc( fineTree );// has disabled registration!
return RestrictOp::run( ijk, acc);
}
template<typename TreeType>
void MultiResGrid<TreeType>::
restrictActiveVoxels(size_t destlevel, size_t grainSize)
{
assert( destlevel > 0 && destlevel < mTrees.size() );
const TreeType &fineTree = *mTrees[ destlevel-1 ];
TreeType &coarseTree = *mTrees[ destlevel ];
CookOp<RestrictOp> tmp( fineTree, coarseTree, grainSize );
}
template<typename TreeType>
void MultiResGrid<TreeType>::
print(std::ostream& os, int verboseLevel) const
{
os << "MultiResGrid with " << mTrees.size() << " levels\n";
for (size_t i=0; i<mTrees.size(); ++i) {
os << "Level " << i << ": ";
mTrees[i]->print(os, verboseLevel);
}
if ( MetaMap::metaCount() > 0) {
os << "Additional metadata:" << std::endl;
for (ConstMetaIterator it = beginMeta(), end = endMeta(); it != end; ++it) {
os << " " << it->first;
if (it->second) {
const std::string value = it->second->str();
if (!value.empty()) os << ": " << value;
}
os << "\n";
}
}
os << "Transform:" << std::endl;
transform().print(os, /*indent=*/" ");
os << std::endl;
}
template<typename TreeType>
void MultiResGrid<TreeType>::
initMeta()
{
const size_t levels = this->numLevels();
if (levels < 2) {
OPENVDB_THROW(ValueError, "MultiResGrid: at least two levels are required");
}
this->insertMeta("MultiResGrid_Levels", Int64Metadata( levels ) );
}
template<typename TreeType>
void MultiResGrid<TreeType>::
topDownRestrict(bool useInjection)
{
const bool isLevelSet = this->getGridClass() == GRID_LEVEL_SET;
for (size_t n=1; n<mTrees.size(); ++n) {
const TreeType &fineTree = *mTrees[n-1];
mTrees[n] = TreePtr(new TreeType( fineTree.background() ) );// empty tree
TreeType &coarseTree = *mTrees[n];
if (useInjection) {// Restriction by injection
for (ValueOnCIter it = fineTree.cbeginValueOn(); it; ++it) {
const Coord ijk = it.getCoord();
if ( (ijk[0] & 1) || (ijk[1] & 1) || (ijk[2] & 1) ) continue;
coarseTree.setValue( ijk >> 1, *it );
}
} else {// Restriction by full-weighting
MaskOp tmp(fineTree, coarseTree, 128);
this->restrictActiveVoxels(n, 64);
}
if ( isLevelSet ) {
tools::signedFloodFill( coarseTree );
tools::pruneLevelSet( coarseTree );//only creates inactive tiles
}
}// loop over grid levels
}
template<typename TreeType>
struct MultiResGrid<TreeType>::MaskOp
{
using MaskT = typename TreeType::template ValueConverter<ValueMask>::Type;
using PoolType = tbb::enumerable_thread_specific<TreeType>;
using ManagerT = tree::LeafManager<const MaskT>;
using RangeT = typename ManagerT::LeafRange;
using VoxelIterT = typename ManagerT::LeafNodeType::ValueOnCIter;
MaskOp(const TreeType& fineTree, TreeType& coarseTree, size_t grainSize = 1)
: mPool(new PoolType( coarseTree ) )// empty coarse tree acts as examplar
{
assert( coarseTree.empty() );
// Create Mask of restruction performed on fineTree
MaskT mask(fineTree, false, true, TopologyCopy() );
// Muli-threaded dilation which also linearizes the tree to leaf nodes
tools::dilateActiveValues(mask, 1, NN_FACE_EDGE_VERTEX, EXPAND_TILES);
// Restriction by injection using thread-local storage of coarse tree masks
ManagerT leafs( mask );
tbb::parallel_for(leafs.leafRange( grainSize ), *this);
// multithreaded union of thread-local coarse tree masks with the coarse tree
using IterT = typename PoolType::const_iterator;
for (IterT it=mPool->begin(); it!=mPool->end(); ++it) coarseTree.topologyUnion( *it );
delete mPool;
}
void operator()(const RangeT& range) const
{
Accessor coarseAcc( mPool->local() );// disabled registration
for (typename RangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
Coord ijk = voxelIter.getCoord();
if ( (ijk[2] & 1) || (ijk[1] & 1) || (ijk[0] & 1) ) continue;//no overlap
coarseAcc.setValueOn( ijk >> 1 );//injection from fine to coarse level
}//loop over active voxels in the fine tree
}// loop over leaf nodes in the fine tree
}
PoolType* mPool;
};// MaskOp
template<typename TreeType>
template<Index Order>
struct MultiResGrid<TreeType>::FractionOp
{
using MaskT = typename TreeType::template ValueConverter<ValueMask>::Type;
using PoolType = tbb::enumerable_thread_specific<MaskT>;
using PoolIterT = typename PoolType::iterator;
using Manager1 = tree::LeafManager<const TreeType>;
using Manager2 = tree::LeafManager<TreeType>;
using Range1 = typename Manager1::LeafRange;
using Range2 = typename Manager2::LeafRange;
FractionOp(const MultiResGrid& parent,
TreeType& midTree,
float level,
size_t grainSize = 1)
: mLevel( level )
, mPool(nullptr)
, mTree0( &*(parent.mTrees[size_t(floorf(level))]) )//high-resolution
, mTree1( &*(parent.mTrees[size_t(ceilf(level))]) ) //low-resolution
{
assert( midTree.empty() );
assert( mTree0 != mTree1 );
// Create a pool of thread-local masks
MaskT examplar( false );
mPool = new PoolType( examplar );
{// create mask from re-mapping coarse tree to mid-level tree
tree::LeafManager<const TreeType> manager( *mTree1 );
tbb::parallel_for( manager.leafRange(grainSize), *this );
}
// Multi-threaded dilation of mask
tbb::parallel_for(tbb::blocked_range<PoolIterT>(mPool->begin(),mPool->end(),1), *this);
// Union thread-local coarse tree masks into the coarse tree
for (PoolIterT it=mPool->begin(); it!=mPool->end(); ++it) midTree.topologyUnion( *it );
delete mPool;
{// Interpolate values into the static mid level tree
Manager2 manager( midTree );
tbb::parallel_for(manager.leafRange(grainSize), *this);
}
}
void operator()(const Range1& range) const
{
using VoxelIter = typename Manager1::LeafNodeType::ValueOnCIter;
// Let mLevel = level + frac, where
// level is integer part of mLevel and frac is the fractional part
// low-res voxel size in world units = dx1 = 2^(level + 1)
// mid-res voxel size in world units = dx = 2^(mLevel) = 2^(level + frac)
// low-res index -> world: ijk * dx1
// world -> mid-res index: world / dx
// low-res index -> mid-res index: (ijk * dx1) / dx = ijk * scale where
// scale = dx1/dx = 2^(level+1)/2^(level+frac) = 2^(1-frac)
const float scale = math::Pow(2.0f, 1.0f - math::FractionalPart(mLevel));
tree::ValueAccessor<MaskT, false> acc( mPool->local() );// disabled registration
for (typename Range1::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
for (VoxelIter voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
Coord ijk = voxelIter.getCoord();
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto value0 = ijk[0] * scale;
const auto value1 = ijk[1] * scale;
const auto value2 = ijk[2] * scale;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
ijk[0] = int(math::Round(value0));
ijk[1] = int(math::Round(value1));
ijk[2] = int(math::Round(value2));
acc.setValueOn( ijk );
}//loop over active voxels in the fine tree
}// loop over leaf nodes in the fine tree
}
void operator()(const tbb::blocked_range<PoolIterT>& range) const
{
for (PoolIterT it=range.begin(); it!=range.end(); ++it) {
tools::dilateVoxels( *it, 1, NN_FACE_EDGE_VERTEX);
}
}
void operator()(const Range2 &r) const
{
using VoxelIter = typename TreeType::LeafNodeType::ValueOnIter;
// Let mLevel = level + frac, where
// level is integer part of mLevel and frac is the fractional part
// high-res voxel size in world units = dx0 = 2^(level)
// low-res voxel size in world units = dx1 = 2^(level+1)
// mid-res voxel size in world units = dx = 2^(mLevel) = 2^(level + frac)
// mid-res index -> world: ijk * dx
// world -> high-res index: world / dx0
// world -> low-res index: world / dx1
// mid-res index -> high-res index: (ijk * dx) / dx0 = ijk * scale0 where
// scale0 = dx/dx0 = 2^(level+frac)/2^(level) = 2^(frac)
// mid-res index -> low-res index: (ijk * dx) / dx1 = ijk * scale1 where
// scale1 = dx/dx1 = 2^(level+frac)/2^(level+1) = 2^(frac-1)
const float b = math::FractionalPart(mLevel), a = 1.0f - b;
const float scale0 = math::Pow( 2.0f, b );
const float scale1 = math::Pow( 2.0f,-a );
ConstAccessor acc0( *mTree0 ), acc1( *mTree1 );
for (typename Range2::Iterator leafIter = r.begin(); leafIter; ++leafIter) {
for (VoxelIter voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
const Vec3R xyz = Vec3R( voxelIter.getCoord().data() );// mid level coord
const ValueType v0 = tools::Sampler<Order>::sample( acc0, xyz * scale0 );
const ValueType v1 = tools::Sampler<Order>::sample( acc1, xyz * scale1 );
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto value0 = a*v0;
const auto value1 = b*v1;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
voxelIter.setValue( ValueType(value0 + value1) );
}
}
}
const float mLevel;
PoolType* mPool;
const TreeType *mTree0, *mTree1;
};// FractionOp
template<typename TreeType>
template<typename OperatorType>
struct MultiResGrid<TreeType>::CookOp
{
using ManagerT = tree::LeafManager<TreeType>;
using RangeT = typename ManagerT::LeafRange;
CookOp(const TreeType& srcTree, TreeType& dstTree, size_t grainSize): acc(srcTree)
{
ManagerT leafs(dstTree);
tbb::parallel_for(leafs.leafRange(grainSize), *this);
}
CookOp(const CookOp &other): acc(other.acc.tree()) {}
void operator()(const RangeT& range) const
{
for (auto leafIt = range.begin(); leafIt; ++leafIt) {
auto& phi = leafIt.buffer(0);
for (auto voxelIt = leafIt->beginValueOn(); voxelIt; ++voxelIt) {
phi.setValue(voxelIt.pos(), OperatorType::run(voxelIt.getCoord(), acc));
}
}
}
const ConstAccessor acc;
};// CookOp
template<typename TreeType>
struct MultiResGrid<TreeType>::RestrictOp
{
/// @brief Static method that performs restriction by full weighting
/// @param ijk Coordinate location on the coarse tree
/// @param acc ValueAccessor to the fine tree
static ValueType run(Coord ijk, const ConstAccessor &acc)
{
ijk <<= 1;
// Overlapping grid point
ValueType v = 8*acc.getValue(ijk);
// neighbors in one axial direction
v += 4*(acc.getValue(ijk.offsetBy(-1, 0, 0)) + acc.getValue(ijk.offsetBy( 1, 0, 0)) +// x
acc.getValue(ijk.offsetBy( 0,-1, 0)) + acc.getValue(ijk.offsetBy( 0, 1, 0)) +// y
acc.getValue(ijk.offsetBy( 0, 0,-1)) + acc.getValue(ijk.offsetBy( 0, 0, 1)));// z
// neighbors in two axial directions
v += 2*(acc.getValue(ijk.offsetBy(-1,-1, 0)) + acc.getValue(ijk.offsetBy(-1, 1, 0)) +// xy
acc.getValue(ijk.offsetBy( 1,-1, 0)) + acc.getValue(ijk.offsetBy( 1, 1, 0)) +// xy
acc.getValue(ijk.offsetBy(-1, 0,-1)) + acc.getValue(ijk.offsetBy(-1, 0, 1)) +// xz
acc.getValue(ijk.offsetBy( 1, 0,-1)) + acc.getValue(ijk.offsetBy( 1, 0, 1)) +// xz
acc.getValue(ijk.offsetBy( 0,-1,-1)) + acc.getValue(ijk.offsetBy( 0,-1, 1)) +// yz
acc.getValue(ijk.offsetBy( 0, 1,-1)) + acc.getValue(ijk.offsetBy( 0, 1, 1)));// yz
// neighbors in three axial directions
for (int i=-1; i<=1; i+=2) {
for (int j=-1; j<=1; j+=2) {
for (int k=-1; k<=1; k+=2) v += acc.getValue(ijk.offsetBy(i,j,k));// xyz
}
}
v *= ValueType(1.0f/64.0f);
return v;
}
};// RestrictOp
template<typename TreeType>
struct MultiResGrid<TreeType>::ProlongateOp
{
/// @brief Interpolate values from a coarse grid (acc) into the index space (ijk) of a fine grid
/// @param ijk Coordinate location on the fine tree
/// @param acc ValueAccessor to the coarse tree
static ValueType run(const Coord& ijk, const ConstAccessor &acc)
{
switch ( (ijk[0] & 1) | ((ijk[1] & 1) << 1) | ((ijk[2] & 1) << 2) ) {
case 0:// all even
return acc.getValue(ijk>>1);
case 1:// x is odd
return ValueType(0.5)*(acc.getValue(ijk.offsetBy(-1,0,0)>>1) +
acc.getValue(ijk.offsetBy( 1,0,0)>>1));
case 2:// y is odd
return ValueType(0.5)*(acc.getValue(ijk.offsetBy(0,-1,0)>>1) +
acc.getValue(ijk.offsetBy(0, 1,0)>>1));
case 3:// x&y are odd
return ValueType(0.25)*(acc.getValue(ijk.offsetBy(-1,-1,0)>>1) +
acc.getValue(ijk.offsetBy(-1, 1,0)>>1) +
acc.getValue(ijk.offsetBy( 1,-1,0)>>1) +
acc.getValue(ijk.offsetBy( 1, 1,0)>>1));
case 4:// z is odd
return ValueType(0.5)*(acc.getValue(ijk.offsetBy(0,0,-1)>>1) +
acc.getValue(ijk.offsetBy(0,0, 1)>>1));
case 5:// x&z are odd
return ValueType(0.25)*(acc.getValue(ijk.offsetBy(-1,0,-1)>>1) +
acc.getValue(ijk.offsetBy(-1,0, 1)>>1) +
acc.getValue(ijk.offsetBy( 1,0,-1)>>1) +
acc.getValue(ijk.offsetBy( 1,0, 1)>>1));
case 6:// y&z are odd
return ValueType(0.25)*(acc.getValue(ijk.offsetBy(0,-1,-1)>>1) +
acc.getValue(ijk.offsetBy(0,-1, 1)>>1) +
acc.getValue(ijk.offsetBy(0, 1,-1)>>1) +
acc.getValue(ijk.offsetBy(0, 1, 1)>>1));
}
// all are odd
ValueType v = zeroVal<ValueType>();
for (int i=-1; i<=1; i+=2) {
for (int j=-1; j<=1; j+=2) {
for (int k=-1; k<=1; k+=2) v += acc.getValue(ijk.offsetBy(i,j,k)>>1);// xyz
}
}
return ValueType(0.125) * v;
}
};// ProlongateOp
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED
| 38,573 | C | 39.518908 | 102 | 0.636637 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Interpolation.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file Interpolation.h
///
/// Sampler classes such as PointSampler and BoxSampler that are intended for use
/// with tools::GridTransformer should operate in voxel space and must adhere to
/// the interface described in the example below:
/// @code
/// struct MySampler
/// {
/// // Return a short name that can be used to identify this sampler
/// // in error messages and elsewhere.
/// const char* name() { return "mysampler"; }
///
/// // Return the radius of the sampling kernel in voxels, not including
/// // the center voxel. This is the number of voxels of padding that
/// // are added to all sides of a volume as a result of resampling.
/// int radius() { return 2; }
///
/// // Return true if scaling by a factor smaller than 0.5 (along any axis)
/// // should be handled via a mipmapping-like scheme of successive halvings
/// // of a grid's resolution, until the remaining scale factor is
/// // greater than or equal to 1/2. Set this to false only when high-quality
/// // scaling is not required.
/// bool mipmap() { return true; }
///
/// // Specify if sampling at a location that is collocated with a grid point
/// // is guaranteed to return the exact value at that grid point.
/// // For most sampling kernels, this should be false.
/// bool consistent() { return false; }
///
/// // Sample the tree at the given coordinates and return the result in val.
/// // Return true if the sampled value is active.
/// template<class TreeT>
/// bool sample(const TreeT& tree, const Vec3R& coord, typename TreeT::ValueType& val);
/// };
/// @endcode
#ifndef OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED
#include <openvdb/version.h> // for OPENVDB_VERSION_NAME
#include <openvdb/Platform.h> // for round()
#include <openvdb/math/Math.h>// for SmoothUnitStep
#include <openvdb/math/Transform.h> // for Transform
#include <openvdb/Grid.h>
#include <openvdb/tree/ValueAccessor.h>
#include <cmath>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Provises a unified interface for sampling, i.e. interpolation.
/// @details Order = 0: closest point
/// Order = 1: tri-linear
/// Order = 2: tri-quadratic
/// Staggered: Set to true for MAC grids
template <size_t Order, bool Staggered = false>
struct Sampler
{
static_assert(Order < 3, "Samplers of order higher than 2 are not supported");
static const char* name();
static int radius();
static bool mipmap();
static bool consistent();
static bool staggered();
static size_t order();
/// @brief Sample @a inTree at the floating-point index coordinate @a inCoord
/// and store the result in @a result.
///
/// @return @c true if the sampled value is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Sample @a inTree at the floating-point index coordinate @a inCoord.
///
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
};
//////////////////////////////////////// Non-Staggered Samplers
// The following samplers operate in voxel space.
// When the samplers are applied to grids holding vector or other non-scalar data,
// the data is assumed to be collocated. For example, using the BoxSampler on a grid
// with ValueType Vec3f assumes that all three elements in a vector can be assigned
// the same physical location. Consider using the GridSampler below instead.
struct PointSampler
{
static const char* name() { return "point"; }
static int radius() { return 0; }
static bool mipmap() { return false; }
static bool consistent() { return true; }
static bool staggered() { return false; }
static size_t order() { return 0; }
/// @brief Sample @a inTree at the nearest neighbor to @a inCoord
/// and store the result in @a result.
/// @return @c true if the sampled value is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Sample @a inTree at the nearest neighbor to @a inCoord
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
};
struct BoxSampler
{
static const char* name() { return "box"; }
static int radius() { return 1; }
static bool mipmap() { return true; }
static bool consistent() { return true; }
static bool staggered() { return false; }
static size_t order() { return 1; }
/// @brief Trilinearly reconstruct @a inTree at @a inCoord
/// and store the result in @a result.
/// @return @c true if any one of the sampled values is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Trilinearly reconstruct @a inTree at @a inCoord.
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
/// @brief Import all eight values from @a inTree to support
/// tri-linear interpolation.
template<class ValueT, class TreeT, size_t N>
static inline void getValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk);
/// @brief Import all eight values from @a inTree to support
/// tri-linear interpolation.
/// @return @c true if any of the eight values are active
template<class ValueT, class TreeT, size_t N>
static inline bool probeValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk);
/// @brief Find the minimum and maximum values of the eight cell
/// values in @ data.
template<class ValueT, size_t N>
static inline void extrema(ValueT (&data)[N][N][N], ValueT& vMin, ValueT& vMax);
/// @return the tri-linear interpolation with the unit cell coordinates @a uvw
template<class ValueT, size_t N>
static inline ValueT trilinearInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw);
};
struct QuadraticSampler
{
static const char* name() { return "quadratic"; }
static int radius() { return 1; }
static bool mipmap() { return true; }
static bool consistent() { return false; }
static bool staggered() { return false; }
static size_t order() { return 2; }
/// @brief Triquadratically reconstruct @a inTree at @a inCoord
/// and store the result in @a result.
/// @return @c true if any one of the sampled values is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Triquadratically reconstruct @a inTree at to @a inCoord.
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
template<class ValueT, size_t N>
static inline ValueT triquadraticInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw);
};
//////////////////////////////////////// Staggered Samplers
// The following samplers operate in voxel space and are designed for Vec3
// staggered grid data (e.g., fluid simulations using the Marker-and-Cell approach
// associate elements of the velocity vector with different physical locations:
// the faces of a cube).
struct StaggeredPointSampler
{
static const char* name() { return "point"; }
static int radius() { return 0; }
static bool mipmap() { return false; }
static bool consistent() { return false; }
static bool staggered() { return true; }
static size_t order() { return 0; }
/// @brief Sample @a inTree at the nearest neighbor to @a inCoord
/// and store the result in @a result.
/// @return true if the sampled value is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Sample @a inTree at the nearest neighbor to @a inCoord
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
};
struct StaggeredBoxSampler
{
static const char* name() { return "box"; }
static int radius() { return 1; }
static bool mipmap() { return true; }
static bool consistent() { return false; }
static bool staggered() { return true; }
static size_t order() { return 1; }
/// @brief Trilinearly reconstruct @a inTree at @a inCoord
/// and store the result in @a result.
/// @return true if any one of the sampled value is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Trilinearly reconstruct @a inTree at @a inCoord.
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
};
struct StaggeredQuadraticSampler
{
static const char* name() { return "quadratic"; }
static int radius() { return 1; }
static bool mipmap() { return true; }
static bool consistent() { return false; }
static bool staggered() { return true; }
static size_t order() { return 2; }
/// @brief Triquadratically reconstruct @a inTree at @a inCoord
/// and store the result in @a result.
/// @return true if any one of the sampled values is active.
template<class TreeT>
static bool sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result);
/// @brief Triquadratically reconstruct @a inTree at to @a inCoord.
/// @return the reconstructed value
template<class TreeT>
static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord);
};
//////////////////////////////////////// GridSampler
/// @brief Class that provides the interface for continuous sampling
/// of values in a tree.
///
/// @details Since trees support only discrete voxel sampling, TreeSampler
/// must be used to sample arbitrary continuous points in (world or
/// index) space.
///
/// @warning This implementation of the GridSampler stores a pointer
/// to a Tree for value access. While this is thread-safe it is
/// uncached and hence slow compared to using a
/// ValueAccessor. Consequently it is normally advisable to use the
/// template specialization below that employs a
/// ValueAccessor. However, care must be taken when dealing with
/// multi-threading (see warning below).
template<typename GridOrTreeType, typename SamplerType>
class GridSampler
{
public:
using Ptr = SharedPtr<GridSampler>;
using ValueType = typename GridOrTreeType::ValueType;
using GridType = typename TreeAdapter<GridOrTreeType>::GridType;
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
using AccessorType = typename TreeAdapter<GridOrTreeType>::AccessorType;
/// @param grid a grid to be sampled
explicit GridSampler(const GridType& grid)
: mTree(&(grid.tree())), mTransform(&(grid.transform())) {}
/// @param tree a tree to be sampled, or a ValueAccessor for the tree
/// @param transform is used when sampling world space locations.
GridSampler(const TreeType& tree, const math::Transform& transform)
: mTree(&tree), mTransform(&transform) {}
const math::Transform& transform() const { return *mTransform; }
/// @brief Sample a point in index space in the grid.
/// @param x Fractional x-coordinate of point in index-coordinates of grid
/// @param y Fractional y-coordinate of point in index-coordinates of grid
/// @param z Fractional z-coordinate of point in index-coordinates of grid
template<typename RealType>
ValueType sampleVoxel(const RealType& x, const RealType& y, const RealType& z) const
{
return this->isSample(Vec3d(x,y,z));
}
/// @brief Sample value in integer index space
/// @param i Integer x-coordinate in index space
/// @param j Integer y-coordinate in index space
/// @param k Integer x-coordinate in index space
ValueType sampleVoxel(typename Coord::ValueType i,
typename Coord::ValueType j,
typename Coord::ValueType k) const
{
return this->isSample(Coord(i,j,k));
}
/// @brief Sample value in integer index space
/// @param ijk the location in index space
ValueType isSample(const Coord& ijk) const { return mTree->getValue(ijk); }
/// @brief Sample in fractional index space
/// @param ispoint the location in index space
ValueType isSample(const Vec3d& ispoint) const
{
ValueType result = zeroVal<ValueType>();
SamplerType::sample(*mTree, ispoint, result);
return result;
}
/// @brief Sample in world space
/// @param wspoint the location in world space
ValueType wsSample(const Vec3d& wspoint) const
{
ValueType result = zeroVal<ValueType>();
SamplerType::sample(*mTree, mTransform->worldToIndex(wspoint), result);
return result;
}
private:
const TreeType* mTree;
const math::Transform* mTransform;
}; // class GridSampler
/// @brief Specialization of GridSampler for construction from a ValueAccessor type
///
/// @note This version should normally be favored over the one above
/// that takes a Grid or Tree. The reason is this version uses a
/// ValueAccessor that performs fast (cached) access where the
/// tree-based flavor performs slower (uncached) access.
///
/// @warning Since this version stores a pointer to an (externally
/// allocated) value accessor it is not threadsafe. Hence each thread
/// should have its own instance of a GridSampler constructed from a
/// local ValueAccessor. Alternatively the Grid/Tree-based GridSampler
/// is threadsafe, but also slower.
template<typename TreeT, typename SamplerType>
class GridSampler<tree::ValueAccessor<TreeT>, SamplerType>
{
public:
using Ptr = SharedPtr<GridSampler>;
using ValueType = typename TreeT::ValueType;
using TreeType = TreeT;
using GridType = Grid<TreeType>;
using AccessorType = typename tree::ValueAccessor<TreeT>;
/// @param acc a ValueAccessor to be sampled
/// @param transform is used when sampling world space locations.
GridSampler(const AccessorType& acc,
const math::Transform& transform)
: mAccessor(&acc), mTransform(&transform) {}
const math::Transform& transform() const { return *mTransform; }
/// @brief Sample a point in index space in the grid.
/// @param x Fractional x-coordinate of point in index-coordinates of grid
/// @param y Fractional y-coordinate of point in index-coordinates of grid
/// @param z Fractional z-coordinate of point in index-coordinates of grid
template<typename RealType>
ValueType sampleVoxel(const RealType& x, const RealType& y, const RealType& z) const
{
return this->isSample(Vec3d(x,y,z));
}
/// @brief Sample value in integer index space
/// @param i Integer x-coordinate in index space
/// @param j Integer y-coordinate in index space
/// @param k Integer x-coordinate in index space
ValueType sampleVoxel(typename Coord::ValueType i,
typename Coord::ValueType j,
typename Coord::ValueType k) const
{
return this->isSample(Coord(i,j,k));
}
/// @brief Sample value in integer index space
/// @param ijk the location in index space
ValueType isSample(const Coord& ijk) const { return mAccessor->getValue(ijk); }
/// @brief Sample in fractional index space
/// @param ispoint the location in index space
ValueType isSample(const Vec3d& ispoint) const
{
ValueType result = zeroVal<ValueType>();
SamplerType::sample(*mAccessor, ispoint, result);
return result;
}
/// @brief Sample in world space
/// @param wspoint the location in world space
ValueType wsSample(const Vec3d& wspoint) const
{
ValueType result = zeroVal<ValueType>();
SamplerType::sample(*mAccessor, mTransform->worldToIndex(wspoint), result);
return result;
}
private:
const AccessorType* mAccessor;//not thread-safe!
const math::Transform* mTransform;
};//Specialization of GridSampler
//////////////////////////////////////// DualGridSampler
/// @brief This is a simple convenience class that allows for sampling
/// from a source grid into the index space of a target grid. At
/// construction the source and target grids are checked for alignment
/// which potentially renders interpolation unnecessary. Else
/// interpolation is performed according to the templated Sampler
/// type.
///
/// @warning For performance reasons the check for alignment of the
/// two grids is only performed at construction time!
template<typename GridOrTreeT,
typename SamplerT>
class DualGridSampler
{
public:
using ValueType = typename GridOrTreeT::ValueType;
using GridType = typename TreeAdapter<GridOrTreeT>::GridType;
using TreeType = typename TreeAdapter<GridOrTreeT>::TreeType;
using AccessorType = typename TreeAdapter<GridType>::AccessorType;
/// @brief Grid and transform constructor.
/// @param sourceGrid Source grid.
/// @param targetXform Transform of the target grid.
DualGridSampler(const GridType& sourceGrid,
const math::Transform& targetXform)
: mSourceTree(&(sourceGrid.tree()))
, mSourceXform(&(sourceGrid.transform()))
, mTargetXform(&targetXform)
, mAligned(targetXform == *mSourceXform)
{
}
/// @brief Tree and transform constructor.
/// @param sourceTree Source tree.
/// @param sourceXform Transform of the source grid.
/// @param targetXform Transform of the target grid.
DualGridSampler(const TreeType& sourceTree,
const math::Transform& sourceXform,
const math::Transform& targetXform)
: mSourceTree(&sourceTree)
, mSourceXform(&sourceXform)
, mTargetXform(&targetXform)
, mAligned(targetXform == sourceXform)
{
}
/// @brief Return the value of the source grid at the index
/// coordinates, ijk, relative to the target grid (or its tranform).
inline ValueType operator()(const Coord& ijk) const
{
if (mAligned) return mSourceTree->getValue(ijk);
const Vec3R world = mTargetXform->indexToWorld(ijk);
return SamplerT::sample(*mSourceTree, mSourceXform->worldToIndex(world));
}
/// @brief Return true if the two grids are aligned.
inline bool isAligned() const { return mAligned; }
private:
const TreeType* mSourceTree;
const math::Transform* mSourceXform;
const math::Transform* mTargetXform;
const bool mAligned;
};// DualGridSampler
/// @brief Specialization of DualGridSampler for construction from a ValueAccessor type.
template<typename TreeT,
typename SamplerT>
class DualGridSampler<tree::ValueAccessor<TreeT>, SamplerT>
{
public:
using ValueType = typename TreeT::ValueType;
using TreeType = TreeT;
using GridType = Grid<TreeType>;
using AccessorType = typename tree::ValueAccessor<TreeT>;
/// @brief ValueAccessor and transform constructor.
/// @param sourceAccessor ValueAccessor into the source grid.
/// @param sourceXform Transform for the source grid.
/// @param targetXform Transform for the target grid.
DualGridSampler(const AccessorType& sourceAccessor,
const math::Transform& sourceXform,
const math::Transform& targetXform)
: mSourceAcc(&sourceAccessor)
, mSourceXform(&sourceXform)
, mTargetXform(&targetXform)
, mAligned(targetXform == sourceXform)
{
}
/// @brief Return the value of the source grid at the index
/// coordinates, ijk, relative to the target grid.
inline ValueType operator()(const Coord& ijk) const
{
if (mAligned) return mSourceAcc->getValue(ijk);
const Vec3R world = mTargetXform->indexToWorld(ijk);
return SamplerT::sample(*mSourceAcc, mSourceXform->worldToIndex(world));
}
/// @brief Return true if the two grids are aligned.
inline bool isAligned() const { return mAligned; }
private:
const AccessorType* mSourceAcc;
const math::Transform* mSourceXform;
const math::Transform* mTargetXform;
const bool mAligned;
};//Specialization of DualGridSampler
//////////////////////////////////////// AlphaMask
// Class to derive the normalized alpha mask
template <typename GridT,
typename MaskT,
typename SamplerT = tools::BoxSampler,
typename FloatT = float>
class AlphaMask
{
public:
static_assert(std::is_floating_point<FloatT>::value,
"AlphaMask requires a floating-point value type");
using GridType = GridT;
using MaskType = MaskT;
using SamlerType = SamplerT;
using FloatType = FloatT;
AlphaMask(const GridT& grid, const MaskT& mask, FloatT min, FloatT max, bool invert)
: mAcc(mask.tree())
, mSampler(mAcc, mask.transform() , grid.transform())
, mMin(min)
, mInvNorm(1/(max-min))
, mInvert(invert)
{
assert(min < max);
}
inline bool operator()(const Coord& xyz, FloatT& a, FloatT& b) const
{
a = math::SmoothUnitStep( (mSampler(xyz) - mMin) * mInvNorm );//smooth mapping to 0->1
b = 1 - a;
if (mInvert) std::swap(a,b);
return a>0;
}
protected:
using AccT = typename MaskType::ConstAccessor;
AccT mAcc;
tools::DualGridSampler<AccT, SamplerT> mSampler;
const FloatT mMin, mInvNorm;
const bool mInvert;
};// AlphaMask
////////////////////////////////////////
namespace local_util {
inline Vec3i
floorVec3(const Vec3R& v)
{
return Vec3i(int(std::floor(v(0))), int(std::floor(v(1))), int(std::floor(v(2))));
}
inline Vec3i
ceilVec3(const Vec3R& v)
{
return Vec3i(int(std::ceil(v(0))), int(std::ceil(v(1))), int(std::ceil(v(2))));
}
inline Vec3i
roundVec3(const Vec3R& v)
{
return Vec3i(int(::round(v(0))), int(::round(v(1))), int(::round(v(2))));
}
} // namespace local_util
//////////////////////////////////////// PointSampler
template<class TreeT>
inline bool
PointSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
return inTree.probeValue(Coord(local_util::roundVec3(inCoord)), result);
}
template<class TreeT>
inline typename TreeT::ValueType
PointSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
return inTree.getValue(Coord(local_util::roundVec3(inCoord)));
}
//////////////////////////////////////// BoxSampler
template<class ValueT, class TreeT, size_t N>
inline void
BoxSampler::getValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk)
{
data[0][0][0] = inTree.getValue(ijk); // i, j, k
ijk[2] += 1;
data[0][0][1] = inTree.getValue(ijk); // i, j, k + 1
ijk[1] += 1;
data[0][1][1] = inTree.getValue(ijk); // i, j+1, k + 1
ijk[2] -= 1;
data[0][1][0] = inTree.getValue(ijk); // i, j+1, k
ijk[0] += 1;
ijk[1] -= 1;
data[1][0][0] = inTree.getValue(ijk); // i+1, j, k
ijk[2] += 1;
data[1][0][1] = inTree.getValue(ijk); // i+1, j, k + 1
ijk[1] += 1;
data[1][1][1] = inTree.getValue(ijk); // i+1, j+1, k + 1
ijk[2] -= 1;
data[1][1][0] = inTree.getValue(ijk); // i+1, j+1, k
}
template<class ValueT, class TreeT, size_t N>
inline bool
BoxSampler::probeValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk)
{
bool hasActiveValues = false;
hasActiveValues |= inTree.probeValue(ijk, data[0][0][0]); // i, j, k
ijk[2] += 1;
hasActiveValues |= inTree.probeValue(ijk, data[0][0][1]); // i, j, k + 1
ijk[1] += 1;
hasActiveValues |= inTree.probeValue(ijk, data[0][1][1]); // i, j+1, k + 1
ijk[2] -= 1;
hasActiveValues |= inTree.probeValue(ijk, data[0][1][0]); // i, j+1, k
ijk[0] += 1;
ijk[1] -= 1;
hasActiveValues |= inTree.probeValue(ijk, data[1][0][0]); // i+1, j, k
ijk[2] += 1;
hasActiveValues |= inTree.probeValue(ijk, data[1][0][1]); // i+1, j, k + 1
ijk[1] += 1;
hasActiveValues |= inTree.probeValue(ijk, data[1][1][1]); // i+1, j+1, k + 1
ijk[2] -= 1;
hasActiveValues |= inTree.probeValue(ijk, data[1][1][0]); // i+1, j+1, k
return hasActiveValues;
}
template<class ValueT, size_t N>
inline void
BoxSampler::extrema(ValueT (&data)[N][N][N], ValueT& vMin, ValueT &vMax)
{
vMin = vMax = data[0][0][0];
vMin = math::Min(vMin, data[0][0][1]);
vMax = math::Max(vMax, data[0][0][1]);
vMin = math::Min(vMin, data[0][1][0]);
vMax = math::Max(vMax, data[0][1][0]);
vMin = math::Min(vMin, data[0][1][1]);
vMax = math::Max(vMax, data[0][1][1]);
vMin = math::Min(vMin, data[1][0][0]);
vMax = math::Max(vMax, data[1][0][0]);
vMin = math::Min(vMin, data[1][0][1]);
vMax = math::Max(vMax, data[1][0][1]);
vMin = math::Min(vMin, data[1][1][0]);
vMax = math::Max(vMax, data[1][1][0]);
vMin = math::Min(vMin, data[1][1][1]);
vMax = math::Max(vMax, data[1][1][1]);
}
template<class ValueT, size_t N>
inline ValueT
BoxSampler::trilinearInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw)
{
auto _interpolate = [](const ValueT& a, const ValueT& b, double weight)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const auto temp = (b - a) * weight;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return static_cast<ValueT>(a + ValueT(temp));
};
// Trilinear interpolation:
// The eight surrounding latice values are used to construct the result. \n
// result(x,y,z) =
// v000 (1-x)(1-y)(1-z) + v001 (1-x)(1-y)z + v010 (1-x)y(1-z) + v011 (1-x)yz
// + v100 x(1-y)(1-z) + v101 x(1-y)z + v110 xy(1-z) + v111 xyz
return _interpolate(
_interpolate(
_interpolate(data[0][0][0], data[0][0][1], uvw[2]),
_interpolate(data[0][1][0], data[0][1][1], uvw[2]),
uvw[1]),
_interpolate(
_interpolate(data[1][0][0], data[1][0][1], uvw[2]),
_interpolate(data[1][1][0], data[1][1][1], uvw[2]),
uvw[1]),
uvw[0]);
}
template<class TreeT>
inline bool
BoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
using ValueT = typename TreeT::ValueType;
const Vec3i inIdx = local_util::floorVec3(inCoord);
const Vec3R uvw = inCoord - inIdx;
// Retrieve the values of the eight voxels surrounding the
// fractional source coordinates.
ValueT data[2][2][2];
const bool hasActiveValues = BoxSampler::probeValues(data, inTree, Coord(inIdx));
result = BoxSampler::trilinearInterpolation(data, uvw);
return hasActiveValues;
}
template<class TreeT>
inline typename TreeT::ValueType
BoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
using ValueT = typename TreeT::ValueType;
const Vec3i inIdx = local_util::floorVec3(inCoord);
const Vec3R uvw = inCoord - inIdx;
// Retrieve the values of the eight voxels surrounding the
// fractional source coordinates.
ValueT data[2][2][2];
BoxSampler::getValues(data, inTree, Coord(inIdx));
return BoxSampler::trilinearInterpolation(data, uvw);
}
//////////////////////////////////////// QuadraticSampler
template<class ValueT, size_t N>
inline ValueT
QuadraticSampler::triquadraticInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw)
{
auto _interpolate = [](const ValueT* value, double weight)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueT
a = static_cast<ValueT>(0.5 * (value[0] + value[2]) - value[1]),
b = static_cast<ValueT>(0.5 * (value[2] - value[0])),
c = static_cast<ValueT>(value[1]);
const auto temp = weight * (weight * a + b) + c;
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return static_cast<ValueT>(temp);
};
/// @todo For vector types, interpolate over each component independently.
ValueT vx[3];
for (int dx = 0; dx < 3; ++dx) {
ValueT vy[3];
for (int dy = 0; dy < 3; ++dy) {
// Fit a parabola to three contiguous samples in z
// (at z=-1, z=0 and z=1), then evaluate the parabola at z',
// where z' is the fractional part of inCoord.z, i.e.,
// inCoord.z - inIdx.z. The coefficients come from solving
//
// | (-1)^2 -1 1 || a | | v0 |
// | 0 0 1 || b | = | v1 |
// | 1^2 1 1 || c | | v2 |
//
// for a, b and c.
const ValueT* vz = &data[dx][dy][0];
vy[dy] = _interpolate(vz, uvw.z());
}//loop over y
// Fit a parabola to three interpolated samples in y, then
// evaluate the parabola at y', where y' is the fractional
// part of inCoord.y.
vx[dx] = _interpolate(vy, uvw.y());
}//loop over x
// Fit a parabola to three interpolated samples in x, then
// evaluate the parabola at the fractional part of inCoord.x.
return _interpolate(vx, uvw.x());
}
template<class TreeT>
inline bool
QuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
using ValueT = typename TreeT::ValueType;
const Vec3i inIdx = local_util::floorVec3(inCoord), inLoIdx = inIdx - Vec3i(1, 1, 1);
const Vec3R uvw = inCoord - inIdx;
// Retrieve the values of the 27 voxels surrounding the
// fractional source coordinates.
bool active = false;
ValueT data[3][3][3];
for (int dx = 0, ix = inLoIdx.x(); dx < 3; ++dx, ++ix) {
for (int dy = 0, iy = inLoIdx.y(); dy < 3; ++dy, ++iy) {
for (int dz = 0, iz = inLoIdx.z(); dz < 3; ++dz, ++iz) {
if (inTree.probeValue(Coord(ix, iy, iz), data[dx][dy][dz])) active = true;
}
}
}
result = QuadraticSampler::triquadraticInterpolation(data, uvw);
return active;
}
template<class TreeT>
inline typename TreeT::ValueType
QuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
using ValueT = typename TreeT::ValueType;
const Vec3i inIdx = local_util::floorVec3(inCoord), inLoIdx = inIdx - Vec3i(1, 1, 1);
const Vec3R uvw = inCoord - inIdx;
// Retrieve the values of the 27 voxels surrounding the
// fractional source coordinates.
ValueT data[3][3][3];
for (int dx = 0, ix = inLoIdx.x(); dx < 3; ++dx, ++ix) {
for (int dy = 0, iy = inLoIdx.y(); dy < 3; ++dy, ++iy) {
for (int dz = 0, iz = inLoIdx.z(); dz < 3; ++dz, ++iz) {
data[dx][dy][dz] = inTree.getValue(Coord(ix, iy, iz));
}
}
}
return QuadraticSampler::triquadraticInterpolation(data, uvw);
}
//////////////////////////////////////// StaggeredPointSampler
template<class TreeT>
inline bool
StaggeredPointSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
using ValueType = typename TreeT::ValueType;
ValueType tempX, tempY, tempZ;
bool active = false;
active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active;
active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active;
active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active;
result.x() = tempX.x();
result.y() = tempY.y();
result.z() = tempZ.z();
return active;
}
template<class TreeT>
inline typename TreeT::ValueType
StaggeredPointSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
using ValueT = typename TreeT::ValueType;
const ValueT tempX = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0));
const ValueT tempY = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0));
const ValueT tempZ = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5));
return ValueT(tempX.x(), tempY.y(), tempZ.z());
}
//////////////////////////////////////// StaggeredBoxSampler
template<class TreeT>
inline bool
StaggeredBoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
using ValueType = typename TreeT::ValueType;
ValueType tempX, tempY, tempZ;
tempX = tempY = tempZ = zeroVal<ValueType>();
bool active = false;
active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active;
active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active;
active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active;
result.x() = tempX.x();
result.y() = tempY.y();
result.z() = tempZ.z();
return active;
}
template<class TreeT>
inline typename TreeT::ValueType
StaggeredBoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
using ValueT = typename TreeT::ValueType;
const ValueT tempX = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0));
const ValueT tempY = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0));
const ValueT tempZ = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5));
return ValueT(tempX.x(), tempY.y(), tempZ.z());
}
//////////////////////////////////////// StaggeredQuadraticSampler
template<class TreeT>
inline bool
StaggeredQuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord,
typename TreeT::ValueType& result)
{
using ValueType = typename TreeT::ValueType;
ValueType tempX, tempY, tempZ;
bool active = false;
active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active;
active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active;
active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active;
result.x() = tempX.x();
result.y() = tempY.y();
result.z() = tempZ.z();
return active;
}
template<class TreeT>
inline typename TreeT::ValueType
StaggeredQuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord)
{
using ValueT = typename TreeT::ValueType;
const ValueT tempX = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0));
const ValueT tempY = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0));
const ValueT tempZ = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5));
return ValueT(tempX.x(), tempY.y(), tempZ.z());
}
//////////////////////////////////////// Sampler
template <>
struct Sampler<0, false> : public PointSampler {};
template <>
struct Sampler<1, false> : public BoxSampler {};
template <>
struct Sampler<2, false> : public QuadraticSampler {};
template <>
struct Sampler<0, true> : public StaggeredPointSampler {};
template <>
struct Sampler<1, true> : public StaggeredBoxSampler {};
template <>
struct Sampler<2, true> : public StaggeredQuadraticSampler {};
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED
| 36,124 | C | 34.486248 | 98 | 0.642398 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeAdvect.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
///////////////////////////////////////////////////////////////////////////
//
/// @author Ken Museth
///
/// @file tools/VolumeAdvect.h
///
/// @brief Sparse hyperbolic advection of volumes, e.g. a density or
/// velocity (vs a level set interface).
#ifndef OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED
#include <tbb/parallel_for.h>
#include <openvdb/Types.h>
#include <openvdb/math/Math.h>
#include <openvdb/util/NullInterrupter.h>
#include "Interpolation.h"// for Sampler
#include "VelocityFields.h" // for VelocityIntegrator
#include "Morphology.h"//for dilateActiveValues and dilateVoxels
#include "Prune.h"// for prune
#include "Statistics.h" // for extrema
#include <functional>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
namespace Scheme {
/// @brief Numerical advections schemes.
enum SemiLagrangian { SEMI, MID, RK3, RK4, MAC, BFECC };
/// @brief Flux-limiters employed to stabalize the second-order
/// advection schemes MacCormack and BFECC.
enum Limiter { NO_LIMITER, CLAMP, REVERT };
}
/// @brief Performs advections of an arbitrary type of volume in a
/// static velocity field. The advections are performed by means
/// of various derivatives of Semi-Lagrangian integration, i.e.
/// backwards tracking along the hyperbolic characteristics
/// followed by interpolation.
///
/// @note Optionally a limiter can be combined with the higher-order
/// integration schemes MacCormack and BFECC. There are two
/// types of limiters (CLAMP and REVERT) that supress
/// non-physical oscillations by means of either claminging or
/// reverting to a first-order schemes when the function is not
/// bounded by the cell values used for tri-linear interpolation.
///
/// @verbatim The supported integrations schemes:
///
/// ================================================================
/// | Lable | Accuracy | Integration Scheme | Interpolations |
/// | |Time/Space| | velocity/volume |
/// ================================================================
/// | SEMI | 1/1 | Semi-Lagrangian | 1/1 |
/// | MID | 2/1 | Mid-Point | 2/1 |
/// | RK3 | 3/1 | 3rd Order Runge-Kutta | 3/1 |
/// | RK4 | 4/1 | 4th Order Runge-Kutta | 4/1 |
/// | MAC | 2/2 | MacCormack | 2/2 |
/// | BFECC | 2/2 | BFECC | 3/2 |
/// ================================================================
/// @endverbatim
template<typename VelocityGridT = Vec3fGrid,
bool StaggeredVelocity = false,
typename InterrupterType = util::NullInterrupter>
class VolumeAdvection
{
public:
/// @brief Constructor
///
/// @param velGrid Velocity grid responsible for the (passive) advection.
/// @param interrupter Optional interrupter used to prematurely end computations.
///
/// @note The velocity field is assumed to be constant for the duration of the
/// advection.
VolumeAdvection(const VelocityGridT& velGrid, InterrupterType* interrupter = nullptr)
: mVelGrid(velGrid)
, mInterrupter(interrupter)
, mIntegrator( Scheme::SEMI )
, mLimiter( Scheme::CLAMP )
, mGrainSize( 128 )
, mSubSteps( 1 )
{
math::Extrema e = extrema(velGrid.cbeginValueAll(), /*threading*/true);
e.add(velGrid.background().length());
mMaxVelocity = e.max();
}
virtual ~VolumeAdvection()
{
}
/// @brief Return the spatial order of accuracy of the advection scheme
///
/// @note This is the optimal order in smooth regions. In
/// non-smooth regions the flux-limiter will drop the order of
/// accuracy to add numerical dissipation.
int spatialOrder() const { return (mIntegrator == Scheme::MAC ||
mIntegrator == Scheme::BFECC) ? 2 : 1; }
/// @brief Return the temporal order of accuracy of the advection scheme
///
/// @note This is the optimal order in smooth regions. In
/// non-smooth regions the flux-limiter will drop the order of
/// accuracy to add numerical dissipation.
int temporalOrder() const {
switch (mIntegrator) {
case Scheme::SEMI: return 1;
case Scheme::MID: return 2;
case Scheme::RK3: return 3;
case Scheme::RK4: return 4;
case Scheme::BFECC:return 2;
case Scheme::MAC: return 2;
}
return 0;//should never reach this point
}
/// @brief Set the integrator (see details in the table above)
void setIntegrator(Scheme::SemiLagrangian integrator) { mIntegrator = integrator; }
/// @brief Return the integrator (see details in the table above)
Scheme::SemiLagrangian getIntegrator() const { return mIntegrator; }
/// @brief Set the limiter (see details above)
void setLimiter(Scheme::Limiter limiter) { mLimiter = limiter; }
/// @brief Retrun the limiter (see details above)
Scheme::Limiter getLimiter() const { return mLimiter; }
/// @brief Return @c true if a limiter will be applied based on
/// the current settings.
bool isLimiterOn() const { return this->spatialOrder()>1 &&
mLimiter != Scheme::NO_LIMITER; }
/// @return the grain-size used for multi-threading
/// @note A grainsize of 0 implies serial execution
size_t getGrainSize() const { return mGrainSize; }
/// @brief Set the grain-size used for multi-threading
/// @note A grainsize of 0 disables multi-threading
/// @warning A small grainsize can degrade performance,
/// both in terms of time and memory footprint!
void setGrainSize(size_t grainsize) { mGrainSize = grainsize; }
/// @return the number of sub-steps per integration (always larger
/// than or equal to 1).
int getSubSteps() const { return mSubSteps; }
/// @brief Set the number of sub-steps per integration.
/// @note The only reason to increase the sub-step above its
/// default value of one is to reduce the memory footprint
/// due to significant dilation. Values smaller than 1 will
/// be clamped to 1!
void setSubSteps(int substeps) { mSubSteps = math::Max(1, substeps); }
/// @brief Return the maximum magnitude of the velocity in the
/// advection velocity field defined during construction.
double getMaxVelocity() const { return mMaxVelocity; }
/// @return Returns the maximum distance in voxel units of @a inGrid
/// that a particle can travel in the time-step @a dt when advected
/// in the velocity field defined during construction.
///
/// @details This method is useful when dilating sparse volume
/// grids to pad boundary regions. Excessive dilation can be
/// computationally expensive so use this method to prevent
/// or warn against run-away computation.
///
/// @throw RuntimeError if @a inGrid does not have uniform voxels.
template<typename VolumeGridT>
int getMaxDistance(const VolumeGridT& inGrid, double dt) const
{
if (!inGrid.hasUniformVoxels()) {
OPENVDB_THROW(RuntimeError, "Volume grid does not have uniform voxels!");
}
const double d = mMaxVelocity*math::Abs(dt)/inGrid.voxelSize()[0];
return static_cast<int>( math::RoundUp(d) );
}
/// @return Returns a new grid that is the result of passive advection
/// of all the active values the input grid by @a timeStep.
///
/// @param inGrid The input grid to be advected (unmodified)
/// @param timeStep Time-step of the Runge-Kutta integrator.
///
/// @details This method will advect all of the active values in
/// the input @a inGrid. To achieve this a
/// deep-copy is dilated to account for the material
/// transport. This dilation step can be slow for large
/// time steps @a dt or a velocity field with large magnitudes.
///
/// @warning If the VolumeSamplerT is of higher order than one
/// (i.e. tri-linear interpolation) instabilities are
/// known to occure. To suppress those monotonicity
/// constrains or flux-limiters need to be applies.
///
/// @throw RuntimeError if @a inGrid does not have uniform voxels.
template<typename VolumeGridT,
typename VolumeSamplerT>//only C++11 allows for a default argument
typename VolumeGridT::Ptr advect(const VolumeGridT& inGrid, double timeStep)
{
typename VolumeGridT::Ptr outGrid = inGrid.deepCopy();
const double dt = timeStep/mSubSteps;
const int n = this->getMaxDistance(inGrid, dt);
dilateActiveValues( outGrid->tree(), n, NN_FACE, EXPAND_TILES);
this->template cook<VolumeGridT, VolumeSamplerT>(*outGrid, inGrid, dt);
for (int step = 1; step < mSubSteps; ++step) {
typename VolumeGridT::Ptr tmpGrid = outGrid->deepCopy();
dilateActiveValues( tmpGrid->tree(), n, NN_FACE, EXPAND_TILES);
this->template cook<VolumeGridT, VolumeSamplerT>(*tmpGrid, *outGrid, dt);
outGrid.swap( tmpGrid );
}
return outGrid;
}
/// @return Returns a new grid that is the result of
/// passive advection of the active values in @a inGrid
/// that intersect the active values in @c mask. The time
/// of the output grid is incremented by @a timeStep.
///
/// @param inGrid The input grid to be advected (unmodified).
/// @param mask The mask of active values defining the active voxels
/// in @c inGrid on which to perform advection. Only
/// if a value is active in both grids will it be modified.
/// @param timeStep Time-step for a single Runge-Kutta integration step.
///
///
/// @details This method will advect all of the active values in
/// the input @a inGrid that intersects with the
/// active values in @a mask. To achieve this a
/// deep-copy is dilated to account for the material
/// transport and finally cropped to the intersection
/// with @a mask. The dilation step can be slow for large
/// time steps @a dt or fast moving velocity fields.
///
/// @warning If the VolumeSamplerT is of higher order the one
/// (i.e. tri-linear interpolation) instabilities are
/// known to occure. To suppress those monotonicity
/// constrains or flux-limiters need to be applies.
///
/// @throw RuntimeError if @a inGrid is not aligned with @a mask
/// or if its voxels are not uniform.
template<typename VolumeGridT,
typename MaskGridT,
typename VolumeSamplerT>//only C++11 allows for a default argument
typename VolumeGridT::Ptr advect(const VolumeGridT& inGrid, const MaskGridT& mask, double timeStep)
{
if (inGrid.transform() != mask.transform()) {
OPENVDB_THROW(RuntimeError, "Volume grid and mask grid are misaligned! Consider "
"resampling either of the two grids into the index space of the other.");
}
typename VolumeGridT::Ptr outGrid = inGrid.deepCopy();
const double dt = timeStep/mSubSteps;
const int n = this->getMaxDistance(inGrid, dt);
dilateActiveValues( outGrid->tree(), n, NN_FACE, EXPAND_TILES);
outGrid->topologyIntersection( mask );
pruneInactive( outGrid->tree(), mGrainSize>0, mGrainSize );
this->template cook<VolumeGridT, VolumeSamplerT>(*outGrid, inGrid, dt);
outGrid->topologyUnion( inGrid );
for (int step = 1; step < mSubSteps; ++step) {
typename VolumeGridT::Ptr tmpGrid = outGrid->deepCopy();
dilateActiveValues( tmpGrid->tree(), n, NN_FACE, EXPAND_TILES);
tmpGrid->topologyIntersection( mask );
pruneInactive( tmpGrid->tree(), mGrainSize>0, mGrainSize );
this->template cook<VolumeGridT, VolumeSamplerT>(*tmpGrid, *outGrid, dt);
tmpGrid->topologyUnion( inGrid );
outGrid.swap( tmpGrid );
}
return outGrid;
}
private:
// disallow copy construction and copy by assignment!
VolumeAdvection(const VolumeAdvection&);// not implemented
VolumeAdvection& operator=(const VolumeAdvection&);// not implemented
void start(const char* str) const
{
if (mInterrupter) mInterrupter->start(str);
}
void stop() const
{
if (mInterrupter) mInterrupter->end();
}
bool interrupt() const
{
if (mInterrupter && util::wasInterrupted(mInterrupter)) {
tbb::task::self().cancel_group_execution();
return true;
}
return false;
}
template<typename VolumeGridT, typename VolumeSamplerT>
void cook(VolumeGridT& outGrid, const VolumeGridT& inGrid, double dt)
{
switch (mIntegrator) {
case Scheme::SEMI: {
Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
case Scheme::MID: {
Advect<VolumeGridT, 2, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
case Scheme::RK3: {
Advect<VolumeGridT, 3, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
case Scheme::RK4: {
Advect<VolumeGridT, 4, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
case Scheme::BFECC: {
Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
case Scheme::MAC: {
Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this);
adv.cook(outGrid, dt);
break;
}
default:
OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!");
}
pruneInactive(outGrid.tree(), mGrainSize>0, mGrainSize);
}
// Private class that implements the multi-threaded advection
template<typename VolumeGridT, size_t OrderRK, typename SamplerT> struct Advect;
// Private member data of VolumeAdvection
const VelocityGridT& mVelGrid;
double mMaxVelocity;
InterrupterType* mInterrupter;
Scheme::SemiLagrangian mIntegrator;
Scheme::Limiter mLimiter;
size_t mGrainSize;
int mSubSteps;
};//end of VolumeAdvection class
// Private class that implements the multi-threaded advection
template<typename VelocityGridT, bool StaggeredVelocity, typename InterrupterType>
template<typename VolumeGridT, size_t OrderRK, typename SamplerT>
struct VolumeAdvection<VelocityGridT, StaggeredVelocity, InterrupterType>::Advect
{
using TreeT = typename VolumeGridT::TreeType;
using AccT = typename VolumeGridT::ConstAccessor;
using ValueT = typename TreeT::ValueType;
using LeafManagerT = typename tree::LeafManager<TreeT>;
using LeafNodeT = typename LeafManagerT::LeafNodeType;
using LeafRangeT = typename LeafManagerT::LeafRange;
using VelocityIntegratorT = VelocityIntegrator<VelocityGridT, StaggeredVelocity>;
using RealT = typename VelocityIntegratorT::ElementType;
using VoxelIterT = typename TreeT::LeafNodeType::ValueOnIter;
Advect(const VolumeGridT& inGrid, const VolumeAdvection& parent)
: mTask(nullptr)
, mInGrid(&inGrid)
, mVelocityInt(parent.mVelGrid)
, mParent(&parent)
{
}
inline void cook(const LeafRangeT& range)
{
if (mParent->mGrainSize > 0) {
tbb::parallel_for(range, *this);
} else {
(*this)(range);
}
}
void operator()(const LeafRangeT& range) const
{
assert(mTask);
mTask(const_cast<Advect*>(this), range);
}
void cook(VolumeGridT& outGrid, double time_step)
{
namespace ph = std::placeholders;
mParent->start("Advecting volume");
LeafManagerT manager(outGrid.tree(), mParent->spatialOrder()==2 ? 1 : 0);
const LeafRangeT range = manager.leafRange(mParent->mGrainSize);
const RealT dt = static_cast<RealT>(-time_step);//method of characteristics backtracks
if (mParent->mIntegrator == Scheme::MAC) {
mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//out[0]=forward
this->cook(range);
mTask = std::bind(&Advect::rk, ph::_1, ph::_2,-dt, 1, &outGrid);//out[1]=backward
this->cook(range);
mTask = std::bind(&Advect::mac, ph::_1, ph::_2);//out[0] = out[0] + (in[0] - out[1])/2
this->cook(range);
} else if (mParent->mIntegrator == Scheme::BFECC) {
mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//out[0]=forward
this->cook(range);
mTask = std::bind(&Advect::rk, ph::_1, ph::_2,-dt, 1, &outGrid);//out[1]=backward
this->cook(range);
mTask = std::bind(&Advect::bfecc, ph::_1, ph::_2);//out[0] = (3*in[0] - out[1])/2
this->cook(range);
mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 1, &outGrid);//out[1]=forward
this->cook(range);
manager.swapLeafBuffer(1);// out[0] = out[1]
} else {// SEMI, MID, RK3 and RK4
mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//forward
this->cook(range);
}
if (mParent->spatialOrder()==2) manager.removeAuxBuffers();
mTask = std::bind(&Advect::limiter, ph::_1, ph::_2, dt);// out[0] = limiter( out[0] )
this->cook(range);
mParent->stop();
}
// Last step of the MacCormack scheme: out[0] = out[0] + (in[0] - out[1])/2
void mac(const LeafRangeT& range) const
{
if (mParent->interrupt()) return;
assert( mParent->mIntegrator == Scheme::MAC );
AccT acc = mInGrid->getAccessor();
for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueT* out0 = leafIter.buffer( 0 ).data();// forward
const ValueT* out1 = leafIter.buffer( 1 ).data();// backward
const LeafNodeT* leaf = acc.probeConstLeaf( leafIter->origin() );
if (leaf != nullptr) {
const ValueT* in0 = leaf->buffer().data();
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
const Index i = voxelIter.pos();
out0[i] += RealT(0.5) * ( in0[i] - out1[i] );
}
} else {
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
const Index i = voxelIter.pos();
out0[i] += RealT(0.5) * ( acc.getValue(voxelIter.getCoord()) - out1[i] );
}//loop over active voxels
}
}//loop over leaf nodes
}
// Intermediate step in the BFECC scheme: out[0] = (3*in[0] - out[1])/2
void bfecc(const LeafRangeT& range) const
{
if (mParent->interrupt()) return;
assert( mParent->mIntegrator == Scheme::BFECC );
AccT acc = mInGrid->getAccessor();
for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueT* out0 = leafIter.buffer( 0 ).data();// forward
const ValueT* out1 = leafIter.buffer( 1 ).data();// backward
const LeafNodeT* leaf = acc.probeConstLeaf(leafIter->origin());
if (leaf != nullptr) {
const ValueT* in0 = leaf->buffer().data();
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
const Index i = voxelIter.pos();
out0[i] = RealT(0.5)*( RealT(3)*in0[i] - out1[i] );
}//loop over active voxels
} else {
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
const Index i = voxelIter.pos();
out0[i] = RealT(0.5)*( RealT(3)*acc.getValue(voxelIter.getCoord()) - out1[i] );
}//loop over active voxels
}
}//loop over leaf nodes
}
// Semi-Lagrangian integration with Runge-Kutta of various orders (1->4)
void rk(const LeafRangeT& range, RealT dt, size_t n, const VolumeGridT* grid) const
{
if (mParent->interrupt()) return;
const math::Transform& xform = mInGrid->transform();
AccT acc = grid->getAccessor();
for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueT* phi = leafIter.buffer( n ).data();
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
ValueT& value = phi[voxelIter.pos()];
Vec3d wPos = xform.indexToWorld(voxelIter.getCoord());
mVelocityInt.template rungeKutta<OrderRK, Vec3d>(dt, wPos);
value = SamplerT::sample(acc, xform.worldToIndex(wPos));
}//loop over active voxels
}//loop over leaf nodes
}
void limiter(const LeafRangeT& range, RealT dt) const
{
if (mParent->interrupt()) return;
const bool doLimiter = mParent->isLimiterOn();
const bool doClamp = mParent->mLimiter == Scheme::CLAMP;
ValueT data[2][2][2], vMin, vMax;
const math::Transform& xform = mInGrid->transform();
AccT acc = mInGrid->getAccessor();
const ValueT backg = mInGrid->background();
for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueT* phi = leafIter.buffer( 0 ).data();
for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) {
ValueT& value = phi[voxelIter.pos()];
if ( doLimiter ) {
assert(OrderRK == 1);
Vec3d wPos = xform.indexToWorld(voxelIter.getCoord());
mVelocityInt.template rungeKutta<1, Vec3d>(dt, wPos);// Explicit Euler
Vec3d iPos = xform.worldToIndex(wPos);
Coord ijk = Coord::floor( iPos );
BoxSampler::getValues(data, acc, ijk);
BoxSampler::extrema(data, vMin, vMax);
if ( doClamp ) {
value = math::Clamp( value, vMin, vMax);
} else if (value < vMin || value > vMax ) {
iPos -= Vec3R(ijk[0], ijk[1], ijk[2]);//unit coordinates
value = BoxSampler::trilinearInterpolation( data, iPos );
}
}
if (math::isApproxEqual(value, backg, math::Delta<ValueT>::value())) {
value = backg;
leafIter->setValueOff( voxelIter.pos() );
}
}//loop over active voxels
}//loop over leaf nodes
}
// Public member data of the private Advect class
typename std::function<void (Advect*, const LeafRangeT&)> mTask;
const VolumeGridT* mInGrid;
const VelocityIntegratorT mVelocityInt;// lightweight!
const VolumeAdvection* mParent;
};// end of private member class Advect
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED
| 24,026 | C | 43.330258 | 103 | 0.595688 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeToSpheres.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file tools/VolumeToSpheres.h
///
/// @brief Fill a closed level set or fog volume with adaptively-sized spheres.
#ifndef OPENVDB_TOOLS_VOLUME_TO_SPHERES_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_VOLUME_TO_SPHERES_HAS_BEEN_INCLUDED
#include <openvdb/tree/LeafManager.h>
#include <openvdb/math/Math.h>
#include "Morphology.h" // for erodeVoxels()
#include "PointScatter.h"
#include "LevelSetRebuild.h"
#include "LevelSetUtil.h"
#include "VolumeToMesh.h"
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <algorithm> // for std::min(), std::max()
#include <cmath> // for std::sqrt()
#include <limits> // for std::numeric_limits
#include <memory>
#include <random>
#include <utility> // for std::pair
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Fill a closed level set or fog volume with adaptively-sized spheres.
///
/// @param grid a scalar grid that defines the surface to be filled with spheres
/// @param spheres an output array of 4-tuples representing the fitted spheres<BR>
/// The first three components of each tuple specify the sphere center,
/// and the fourth specifies the radius.
/// The spheres are ordered by radius, from largest to smallest.
/// @param sphereCount lower and upper bounds on the number of spheres to be generated<BR>
/// The actual number will be somewhere within the bounds.
/// @param overlapping toggle to allow spheres to overlap/intersect
/// @param minRadius the smallest allowable sphere size, in voxel units<BR>
/// @param maxRadius the largest allowable sphere size, in voxel units
/// @param isovalue the voxel value that determines the surface of the volume<BR>
/// The default value of zero works for signed distance fields,
/// while fog volumes require a larger positive value
/// (0.5 is a good initial guess).
/// @param instanceCount the number of interior points to consider for the sphere placement<BR>
/// Increasing this count increases the chances of finding optimal
/// sphere sizes.
/// @param interrupter pointer to an object adhering to the util::NullInterrupter interface
///
/// @note The minimum sphere count takes precedence over the minimum radius.
template<typename GridT, typename InterrupterT = util::NullInterrupter>
inline void
fillWithSpheres(
const GridT& grid,
std::vector<openvdb::Vec4s>& spheres,
const Vec2i& sphereCount = Vec2i(1, 50),
bool overlapping = false,
float minRadius = 1.0,
float maxRadius = std::numeric_limits<float>::max(),
float isovalue = 0.0,
int instanceCount = 10000,
InterrupterT* interrupter = nullptr);
////////////////////////////////////////
/// @brief Accelerated closest surface point queries for narrow band level sets
/// @details Supports queries that originate at arbitrary world-space locations,
/// is not confined to the narrow band region of the input volume geometry.
template<typename GridT>
class ClosestSurfacePoint
{
public:
using Ptr = std::unique_ptr<ClosestSurfacePoint>;
using TreeT = typename GridT::TreeType;
using BoolTreeT = typename TreeT::template ValueConverter<bool>::Type;
using Index32TreeT = typename TreeT::template ValueConverter<Index32>::Type;
using Int16TreeT = typename TreeT::template ValueConverter<Int16>::Type;
/// @brief Extract surface points and construct a spatial acceleration structure.
///
/// @return a null pointer if the initialization fails for any reason,
/// otherwise a unique pointer to a newly-allocated ClosestSurfacePoint object.
///
/// @param grid a scalar level set or fog volume
/// @param isovalue the voxel value that determines the surface of the volume
/// The default value of zero works for signed distance fields,
/// while fog volumes require a larger positive value
/// (0.5 is a good initial guess).
/// @param interrupter pointer to an object adhering to the util::NullInterrupter interface.
template<typename InterrupterT = util::NullInterrupter>
static inline Ptr create(const GridT& grid, float isovalue = 0.0,
InterrupterT* interrupter = nullptr);
/// @brief Compute the distance from each input point to its closest surface point.
/// @param points input list of points in world space
/// @param distances output list of closest surface point distances
inline bool search(const std::vector<Vec3R>& points, std::vector<float>& distances);
/// @brief Overwrite each input point with its closest surface point.
/// @param points input/output list of points in world space
/// @param distances output list of closest surface point distances
inline bool searchAndReplace(std::vector<Vec3R>& points, std::vector<float>& distances);
/// @brief Tree accessor
const Index32TreeT& indexTree() const { return *mIdxTreePt; }
/// @brief Tree accessor
const Int16TreeT& signTree() const { return *mSignTreePt; }
private:
using Index32LeafT = typename Index32TreeT::LeafNodeType;
using IndexRange = std::pair<size_t, size_t>;
std::vector<Vec4R> mLeafBoundingSpheres, mNodeBoundingSpheres;
std::vector<IndexRange> mLeafRanges;
std::vector<const Index32LeafT*> mLeafNodes;
PointList mSurfacePointList;
size_t mPointListSize = 0, mMaxNodeLeafs = 0;
typename Index32TreeT::Ptr mIdxTreePt;
typename Int16TreeT::Ptr mSignTreePt;
ClosestSurfacePoint() = default;
template<typename InterrupterT = util::NullInterrupter>
inline bool initialize(const GridT&, float isovalue, InterrupterT*);
inline bool search(std::vector<Vec3R>&, std::vector<float>&, bool transformPoints);
};
////////////////////////////////////////
// Internal utility methods
namespace v2s_internal {
struct PointAccessor
{
PointAccessor(std::vector<Vec3R>& points)
: mPoints(points)
{
}
void add(const Vec3R &pos)
{
mPoints.push_back(pos);
}
private:
std::vector<Vec3R>& mPoints;
};
template<typename Index32LeafT>
class LeafOp
{
public:
LeafOp(std::vector<Vec4R>& leafBoundingSpheres,
const std::vector<const Index32LeafT*>& leafNodes,
const math::Transform& transform,
const PointList& surfacePointList);
void run(bool threaded = true);
void operator()(const tbb::blocked_range<size_t>&) const;
private:
std::vector<Vec4R>& mLeafBoundingSpheres;
const std::vector<const Index32LeafT*>& mLeafNodes;
const math::Transform& mTransform;
const PointList& mSurfacePointList;
};
template<typename Index32LeafT>
LeafOp<Index32LeafT>::LeafOp(
std::vector<Vec4R>& leafBoundingSpheres,
const std::vector<const Index32LeafT*>& leafNodes,
const math::Transform& transform,
const PointList& surfacePointList)
: mLeafBoundingSpheres(leafBoundingSpheres)
, mLeafNodes(leafNodes)
, mTransform(transform)
, mSurfacePointList(surfacePointList)
{
}
template<typename Index32LeafT>
void
LeafOp<Index32LeafT>::run(bool threaded)
{
if (threaded) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, mLeafNodes.size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mLeafNodes.size()));
}
}
template<typename Index32LeafT>
void
LeafOp<Index32LeafT>::operator()(const tbb::blocked_range<size_t>& range) const
{
typename Index32LeafT::ValueOnCIter iter;
Vec3s avg;
for (size_t n = range.begin(); n != range.end(); ++n) {
avg[0] = 0.0;
avg[1] = 0.0;
avg[2] = 0.0;
int count = 0;
for (iter = mLeafNodes[n]->cbeginValueOn(); iter; ++iter) {
avg += mSurfacePointList[iter.getValue()];
++count;
}
if (count > 1) avg *= float(1.0 / double(count));
float maxDist = 0.0;
for (iter = mLeafNodes[n]->cbeginValueOn(); iter; ++iter) {
float tmpDist = (mSurfacePointList[iter.getValue()] - avg).lengthSqr();
if (tmpDist > maxDist) maxDist = tmpDist;
}
Vec4R& sphere = mLeafBoundingSpheres[n];
sphere[0] = avg[0];
sphere[1] = avg[1];
sphere[2] = avg[2];
sphere[3] = maxDist * 2.0; // padded radius
}
}
class NodeOp
{
public:
using IndexRange = std::pair<size_t, size_t>;
NodeOp(std::vector<Vec4R>& nodeBoundingSpheres,
const std::vector<IndexRange>& leafRanges,
const std::vector<Vec4R>& leafBoundingSpheres);
inline void run(bool threaded = true);
inline void operator()(const tbb::blocked_range<size_t>&) const;
private:
std::vector<Vec4R>& mNodeBoundingSpheres;
const std::vector<IndexRange>& mLeafRanges;
const std::vector<Vec4R>& mLeafBoundingSpheres;
};
inline
NodeOp::NodeOp(std::vector<Vec4R>& nodeBoundingSpheres,
const std::vector<IndexRange>& leafRanges,
const std::vector<Vec4R>& leafBoundingSpheres)
: mNodeBoundingSpheres(nodeBoundingSpheres)
, mLeafRanges(leafRanges)
, mLeafBoundingSpheres(leafBoundingSpheres)
{
}
inline void
NodeOp::run(bool threaded)
{
if (threaded) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, mLeafRanges.size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mLeafRanges.size()));
}
}
inline void
NodeOp::operator()(const tbb::blocked_range<size_t>& range) const
{
Vec3d avg, pos;
for (size_t n = range.begin(); n != range.end(); ++n) {
avg[0] = 0.0;
avg[1] = 0.0;
avg[2] = 0.0;
int count = int(mLeafRanges[n].second) - int(mLeafRanges[n].first);
for (size_t i = mLeafRanges[n].first; i < mLeafRanges[n].second; ++i) {
avg[0] += mLeafBoundingSpheres[i][0];
avg[1] += mLeafBoundingSpheres[i][1];
avg[2] += mLeafBoundingSpheres[i][2];
}
if (count > 1) avg *= float(1.0 / double(count));
double maxDist = 0.0;
for (size_t i = mLeafRanges[n].first; i < mLeafRanges[n].second; ++i) {
pos[0] = mLeafBoundingSpheres[i][0];
pos[1] = mLeafBoundingSpheres[i][1];
pos[2] = mLeafBoundingSpheres[i][2];
const auto radiusSqr = mLeafBoundingSpheres[i][3];
double tmpDist = (pos - avg).lengthSqr() + radiusSqr;
if (tmpDist > maxDist) maxDist = tmpDist;
}
Vec4R& sphere = mNodeBoundingSpheres[n];
sphere[0] = avg[0];
sphere[1] = avg[1];
sphere[2] = avg[2];
sphere[3] = maxDist * 2.0; // padded radius
}
}
////////////////////////////////////////
template<typename Index32LeafT>
class ClosestPointDist
{
public:
using IndexRange = std::pair<size_t, size_t>;
ClosestPointDist(
std::vector<Vec3R>& instancePoints,
std::vector<float>& instanceDistances,
const PointList& surfacePointList,
const std::vector<const Index32LeafT*>& leafNodes,
const std::vector<IndexRange>& leafRanges,
const std::vector<Vec4R>& leafBoundingSpheres,
const std::vector<Vec4R>& nodeBoundingSpheres,
size_t maxNodeLeafs,
bool transformPoints = false);
void run(bool threaded = true);
void operator()(const tbb::blocked_range<size_t>&) const;
private:
void evalLeaf(size_t index, const Index32LeafT& leaf) const;
void evalNode(size_t pointIndex, size_t nodeIndex) const;
std::vector<Vec3R>& mInstancePoints;
std::vector<float>& mInstanceDistances;
const PointList& mSurfacePointList;
const std::vector<const Index32LeafT*>& mLeafNodes;
const std::vector<IndexRange>& mLeafRanges;
const std::vector<Vec4R>& mLeafBoundingSpheres;
const std::vector<Vec4R>& mNodeBoundingSpheres;
std::vector<float> mLeafDistances, mNodeDistances;
const bool mTransformPoints;
size_t mClosestPointIndex;
};// ClosestPointDist
template<typename Index32LeafT>
ClosestPointDist<Index32LeafT>::ClosestPointDist(
std::vector<Vec3R>& instancePoints,
std::vector<float>& instanceDistances,
const PointList& surfacePointList,
const std::vector<const Index32LeafT*>& leafNodes,
const std::vector<IndexRange>& leafRanges,
const std::vector<Vec4R>& leafBoundingSpheres,
const std::vector<Vec4R>& nodeBoundingSpheres,
size_t maxNodeLeafs,
bool transformPoints)
: mInstancePoints(instancePoints)
, mInstanceDistances(instanceDistances)
, mSurfacePointList(surfacePointList)
, mLeafNodes(leafNodes)
, mLeafRanges(leafRanges)
, mLeafBoundingSpheres(leafBoundingSpheres)
, mNodeBoundingSpheres(nodeBoundingSpheres)
, mLeafDistances(maxNodeLeafs, 0.0)
, mNodeDistances(leafRanges.size(), 0.0)
, mTransformPoints(transformPoints)
, mClosestPointIndex(0)
{
}
template<typename Index32LeafT>
void
ClosestPointDist<Index32LeafT>::run(bool threaded)
{
if (threaded) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, mInstancePoints.size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mInstancePoints.size()));
}
}
template<typename Index32LeafT>
void
ClosestPointDist<Index32LeafT>::evalLeaf(size_t index, const Index32LeafT& leaf) const
{
typename Index32LeafT::ValueOnCIter iter;
const Vec3s center = mInstancePoints[index];
size_t& closestPointIndex = const_cast<size_t&>(mClosestPointIndex);
for (iter = leaf.cbeginValueOn(); iter; ++iter) {
const Vec3s& point = mSurfacePointList[iter.getValue()];
float tmpDist = (point - center).lengthSqr();
if (tmpDist < mInstanceDistances[index]) {
mInstanceDistances[index] = tmpDist;
closestPointIndex = iter.getValue();
}
}
}
template<typename Index32LeafT>
void
ClosestPointDist<Index32LeafT>::evalNode(size_t pointIndex, size_t nodeIndex) const
{
if (nodeIndex >= mLeafRanges.size()) return;
const Vec3R& pos = mInstancePoints[pointIndex];
float minDist = mInstanceDistances[pointIndex];
size_t minDistIdx = 0;
Vec3R center;
bool updatedDist = false;
for (size_t i = mLeafRanges[nodeIndex].first, n = 0;
i < mLeafRanges[nodeIndex].second; ++i, ++n)
{
float& distToLeaf = const_cast<float&>(mLeafDistances[n]);
center[0] = mLeafBoundingSpheres[i][0];
center[1] = mLeafBoundingSpheres[i][1];
center[2] = mLeafBoundingSpheres[i][2];
const auto radiusSqr = mLeafBoundingSpheres[i][3];
distToLeaf = float(std::max(0.0, (pos - center).lengthSqr() - radiusSqr));
if (distToLeaf < minDist) {
minDist = distToLeaf;
minDistIdx = i;
updatedDist = true;
}
}
if (!updatedDist) return;
evalLeaf(pointIndex, *mLeafNodes[minDistIdx]);
for (size_t i = mLeafRanges[nodeIndex].first, n = 0;
i < mLeafRanges[nodeIndex].second; ++i, ++n)
{
if (mLeafDistances[n] < mInstanceDistances[pointIndex] && i != minDistIdx) {
evalLeaf(pointIndex, *mLeafNodes[i]);
}
}
}
template<typename Index32LeafT>
void
ClosestPointDist<Index32LeafT>::operator()(const tbb::blocked_range<size_t>& range) const
{
Vec3R center;
for (size_t n = range.begin(); n != range.end(); ++n) {
const Vec3R& pos = mInstancePoints[n];
float minDist = mInstanceDistances[n];
size_t minDistIdx = 0;
for (size_t i = 0, I = mNodeDistances.size(); i < I; ++i) {
float& distToNode = const_cast<float&>(mNodeDistances[i]);
center[0] = mNodeBoundingSpheres[i][0];
center[1] = mNodeBoundingSpheres[i][1];
center[2] = mNodeBoundingSpheres[i][2];
const auto radiusSqr = mNodeBoundingSpheres[i][3];
distToNode = float(std::max(0.0, (pos - center).lengthSqr() - radiusSqr));
if (distToNode < minDist) {
minDist = distToNode;
minDistIdx = i;
}
}
evalNode(n, minDistIdx);
for (size_t i = 0, I = mNodeDistances.size(); i < I; ++i) {
if (mNodeDistances[i] < mInstanceDistances[n] && i != minDistIdx) {
evalNode(n, i);
}
}
mInstanceDistances[n] = std::sqrt(mInstanceDistances[n]);
if (mTransformPoints) mInstancePoints[n] = mSurfacePointList[mClosestPointIndex];
}
}
class UpdatePoints
{
public:
UpdatePoints(
const Vec4s& sphere,
const std::vector<Vec3R>& points,
std::vector<float>& distances,
std::vector<unsigned char>& mask,
bool overlapping);
float radius() const { return mRadius; }
int index() const { return mIndex; }
inline void run(bool threaded = true);
UpdatePoints(UpdatePoints&, tbb::split);
inline void operator()(const tbb::blocked_range<size_t>& range);
void join(const UpdatePoints& rhs)
{
if (rhs.mRadius > mRadius) {
mRadius = rhs.mRadius;
mIndex = rhs.mIndex;
}
}
private:
const Vec4s& mSphere;
const std::vector<Vec3R>& mPoints;
std::vector<float>& mDistances;
std::vector<unsigned char>& mMask;
bool mOverlapping;
float mRadius;
int mIndex;
};
inline
UpdatePoints::UpdatePoints(
const Vec4s& sphere,
const std::vector<Vec3R>& points,
std::vector<float>& distances,
std::vector<unsigned char>& mask,
bool overlapping)
: mSphere(sphere)
, mPoints(points)
, mDistances(distances)
, mMask(mask)
, mOverlapping(overlapping)
, mRadius(0.0)
, mIndex(0)
{
}
inline
UpdatePoints::UpdatePoints(UpdatePoints& rhs, tbb::split)
: mSphere(rhs.mSphere)
, mPoints(rhs.mPoints)
, mDistances(rhs.mDistances)
, mMask(rhs.mMask)
, mOverlapping(rhs.mOverlapping)
, mRadius(rhs.mRadius)
, mIndex(rhs.mIndex)
{
}
inline void
UpdatePoints::run(bool threaded)
{
if (threaded) {
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mPoints.size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mPoints.size()));
}
}
inline void
UpdatePoints::operator()(const tbb::blocked_range<size_t>& range)
{
Vec3s pos;
for (size_t n = range.begin(); n != range.end(); ++n) {
if (mMask[n]) continue;
pos.x() = float(mPoints[n].x()) - mSphere[0];
pos.y() = float(mPoints[n].y()) - mSphere[1];
pos.z() = float(mPoints[n].z()) - mSphere[2];
float dist = pos.length();
if (dist < mSphere[3]) {
mMask[n] = 1;
continue;
}
if (!mOverlapping) {
mDistances[n] = std::min(mDistances[n], (dist - mSphere[3]));
}
if (mDistances[n] > mRadius) {
mRadius = mDistances[n];
mIndex = int(n);
}
}
}
} // namespace v2s_internal
////////////////////////////////////////
template<typename GridT, typename InterrupterT>
inline void
fillWithSpheres(
const GridT& grid,
std::vector<openvdb::Vec4s>& spheres,
const Vec2i& sphereCount,
bool overlapping,
float minRadius,
float maxRadius,
float isovalue,
int instanceCount,
InterrupterT* interrupter)
{
spheres.clear();
if (grid.empty()) return;
const int
minSphereCount = sphereCount[0],
maxSphereCount = sphereCount[1];
if ((minSphereCount > maxSphereCount) || (maxSphereCount < 1)) {
OPENVDB_LOG_WARN("fillWithSpheres: minimum sphere count ("
<< minSphereCount << ") exceeds maximum count (" << maxSphereCount << ")");
return;
}
spheres.reserve(maxSphereCount);
auto gridPtr = grid.copy(); // shallow copy
if (gridPtr->getGridClass() == GRID_LEVEL_SET) {
// Clamp the isovalue to the level set's background value minus epsilon.
// (In a valid narrow-band level set, all voxels, including background voxels,
// have values less than or equal to the background value, so an isovalue
// greater than or equal to the background value would produce a mask with
// effectively infinite extent.)
isovalue = std::min(isovalue,
static_cast<float>(gridPtr->background() - math::Tolerance<float>::value()));
} else if (gridPtr->getGridClass() == GRID_FOG_VOLUME) {
// Clamp the isovalue of a fog volume between epsilon and one,
// again to avoid a mask with infinite extent. (Recall that
// fog volume voxel values vary from zero outside to one inside.)
isovalue = math::Clamp(isovalue, math::Tolerance<float>::value(), 1.f);
}
// ClosestSurfacePoint is inaccurate for small grids.
// Resample the input grid if it is too small.
auto numVoxels = gridPtr->activeVoxelCount();
if (numVoxels < 10000) {
const auto scale = 1.0 / math::Cbrt(2.0 * 10000.0 / double(numVoxels));
auto scaledXform = gridPtr->transform().copy();
scaledXform->preScale(scale);
auto newGridPtr = levelSetRebuild(*gridPtr, isovalue,
LEVEL_SET_HALF_WIDTH, LEVEL_SET_HALF_WIDTH, scaledXform.get(), interrupter);
const auto newNumVoxels = newGridPtr->activeVoxelCount();
if (newNumVoxels > numVoxels) {
OPENVDB_LOG_DEBUG_RUNTIME("fillWithSpheres: resampled input grid from "
<< numVoxels << " voxel" << (numVoxels == 1 ? "" : "s")
<< " to " << newNumVoxels << " voxel" << (newNumVoxels == 1 ? "" : "s"));
gridPtr = newGridPtr;
numVoxels = newNumVoxels;
}
}
const bool addNarrowBandPoints = (numVoxels < 10000);
int instances = std::max(instanceCount, maxSphereCount);
using TreeT = typename GridT::TreeType;
using BoolTreeT = typename TreeT::template ValueConverter<bool>::Type;
using Int16TreeT = typename TreeT::template ValueConverter<Int16>::Type;
using RandGen = std::mersenne_twister_engine<uint32_t, 32, 351, 175, 19,
0xccab8ee7, 11, 0xffffffff, 7, 0x31b6ab00, 15, 0xffe50000, 17, 1812433253>; // mt11213b
RandGen mtRand(/*seed=*/0);
const TreeT& tree = gridPtr->tree();
math::Transform transform = gridPtr->transform();
std::vector<Vec3R> instancePoints;
{
// Compute a mask of the voxels enclosed by the isosurface.
typename Grid<BoolTreeT>::Ptr interiorMaskPtr;
if (gridPtr->getGridClass() == GRID_LEVEL_SET) {
interiorMaskPtr = sdfInteriorMask(*gridPtr, isovalue);
} else {
// For non-level-set grids, the interior mask comprises the active voxels.
interiorMaskPtr = typename Grid<BoolTreeT>::Ptr(Grid<BoolTreeT>::create(false));
interiorMaskPtr->setTransform(transform.copy());
interiorMaskPtr->tree().topologyUnion(tree);
}
if (interrupter && interrupter->wasInterrupted()) return;
// If the interior mask is small and eroding it results in an empty grid,
// use the uneroded mask instead. (But if the minimum sphere count is zero,
// then eroding away the mask is acceptable.)
if (!addNarrowBandPoints || (minSphereCount <= 0)) {
erodeVoxels(interiorMaskPtr->tree(), 1);
} else {
auto& maskTree = interiorMaskPtr->tree();
auto copyOfTree = StaticPtrCast<BoolTreeT>(maskTree.copy());
erodeVoxels(maskTree, 1);
if (maskTree.empty()) { interiorMaskPtr->setTree(copyOfTree); }
}
// Scatter candidate sphere centroids (instancePoints)
instancePoints.reserve(instances);
v2s_internal::PointAccessor ptnAcc(instancePoints);
const auto scatterCount = Index64(addNarrowBandPoints ? (instances / 2) : instances);
UniformPointScatter<v2s_internal::PointAccessor, RandGen, InterrupterT> scatter(
ptnAcc, scatterCount, mtRand, 1.0, interrupter);
scatter(*interiorMaskPtr);
}
if (interrupter && interrupter->wasInterrupted()) return;
auto csp = ClosestSurfacePoint<GridT>::create(*gridPtr, isovalue, interrupter);
if (!csp) return;
// Add extra instance points in the interior narrow band.
if (instancePoints.size() < size_t(instances)) {
const Int16TreeT& signTree = csp->signTree();
for (auto leafIt = signTree.cbeginLeaf(); leafIt; ++leafIt) {
for (auto it = leafIt->cbeginValueOn(); it; ++it) {
const int flags = int(it.getValue());
if (!(volume_to_mesh_internal::EDGES & flags)
&& (volume_to_mesh_internal::INSIDE & flags))
{
instancePoints.push_back(transform.indexToWorld(it.getCoord()));
}
if (instancePoints.size() == size_t(instances)) break;
}
if (instancePoints.size() == size_t(instances)) break;
}
}
if (interrupter && interrupter->wasInterrupted()) return;
// Assign a radius to each candidate sphere. The radius is the world-space
// distance from the sphere's center to the closest surface point.
std::vector<float> instanceRadius;
if (!csp->search(instancePoints, instanceRadius)) return;
float largestRadius = 0.0;
int largestRadiusIdx = 0;
for (size_t n = 0, N = instancePoints.size(); n < N; ++n) {
if (instanceRadius[n] > largestRadius) {
largestRadius = instanceRadius[n];
largestRadiusIdx = int(n);
}
}
std::vector<unsigned char> instanceMask(instancePoints.size(), 0);
minRadius = float(minRadius * transform.voxelSize()[0]);
maxRadius = float(maxRadius * transform.voxelSize()[0]);
for (size_t s = 0, S = std::min(size_t(maxSphereCount), instancePoints.size()); s < S; ++s) {
if (interrupter && interrupter->wasInterrupted()) return;
largestRadius = std::min(maxRadius, largestRadius);
if ((int(s) >= minSphereCount) && (largestRadius < minRadius)) break;
const Vec4s sphere(
float(instancePoints[largestRadiusIdx].x()),
float(instancePoints[largestRadiusIdx].y()),
float(instancePoints[largestRadiusIdx].z()),
largestRadius);
spheres.push_back(sphere);
instanceMask[largestRadiusIdx] = 1;
v2s_internal::UpdatePoints op(
sphere, instancePoints, instanceRadius, instanceMask, overlapping);
op.run();
largestRadius = op.radius();
largestRadiusIdx = op.index();
}
} // fillWithSpheres
////////////////////////////////////////
template<typename GridT>
template<typename InterrupterT>
inline typename ClosestSurfacePoint<GridT>::Ptr
ClosestSurfacePoint<GridT>::create(const GridT& grid, float isovalue, InterrupterT* interrupter)
{
auto csp = Ptr{new ClosestSurfacePoint};
if (!csp->initialize(grid, isovalue, interrupter)) csp.reset();
return csp;
}
template<typename GridT>
template<typename InterrupterT>
inline bool
ClosestSurfacePoint<GridT>::initialize(
const GridT& grid, float isovalue, InterrupterT* interrupter)
{
using Index32LeafManagerT = tree::LeafManager<Index32TreeT>;
using ValueT = typename GridT::ValueType;
const TreeT& tree = grid.tree();
const math::Transform& transform = grid.transform();
{ // Extract surface point cloud
BoolTreeT mask(false);
volume_to_mesh_internal::identifySurfaceIntersectingVoxels(mask, tree, ValueT(isovalue));
mSignTreePt.reset(new Int16TreeT(0));
mIdxTreePt.reset(new Index32TreeT(std::numeric_limits<Index32>::max()));
volume_to_mesh_internal::computeAuxiliaryData(
*mSignTreePt, *mIdxTreePt, mask, tree, ValueT(isovalue));
if (interrupter && interrupter->wasInterrupted()) return false;
// count unique points
using Int16LeafNodeType = typename Int16TreeT::LeafNodeType;
using Index32LeafNodeType = typename Index32TreeT::LeafNodeType;
std::vector<Int16LeafNodeType*> signFlagsLeafNodes;
mSignTreePt->getNodes(signFlagsLeafNodes);
const tbb::blocked_range<size_t> auxiliaryLeafNodeRange(0, signFlagsLeafNodes.size());
std::unique_ptr<Index32[]> leafNodeOffsets(new Index32[signFlagsLeafNodes.size()]);
tbb::parallel_for(auxiliaryLeafNodeRange,
volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM>
(signFlagsLeafNodes, leafNodeOffsets));
{
Index32 pointCount = 0;
for (size_t n = 0, N = signFlagsLeafNodes.size(); n < N; ++n) {
const Index32 tmp = leafNodeOffsets[n];
leafNodeOffsets[n] = pointCount;
pointCount += tmp;
}
mPointListSize = size_t(pointCount);
mSurfacePointList.reset(new Vec3s[mPointListSize]);
}
std::vector<Index32LeafNodeType*> pointIndexLeafNodes;
mIdxTreePt->getNodes(pointIndexLeafNodes);
tbb::parallel_for(auxiliaryLeafNodeRange, volume_to_mesh_internal::ComputePoints<TreeT>(
mSurfacePointList.get(), tree, pointIndexLeafNodes,
signFlagsLeafNodes, leafNodeOffsets, transform, ValueT(isovalue)));
}
if (interrupter && interrupter->wasInterrupted()) return false;
Index32LeafManagerT idxLeafs(*mIdxTreePt);
using Index32RootNodeT = typename Index32TreeT::RootNodeType;
using Index32NodeChainT = typename Index32RootNodeT::NodeChainType;
static_assert(Index32NodeChainT::Size > 1,
"expected tree depth greater than one");
using Index32InternalNodeT = typename Index32NodeChainT::template Get<1>;
typename Index32TreeT::NodeCIter nIt = mIdxTreePt->cbeginNode();
nIt.setMinDepth(Index32TreeT::NodeCIter::LEAF_DEPTH - 1);
nIt.setMaxDepth(Index32TreeT::NodeCIter::LEAF_DEPTH - 1);
std::vector<const Index32InternalNodeT*> internalNodes;
const Index32InternalNodeT* node = nullptr;
for (; nIt; ++nIt) {
nIt.getNode(node);
if (node) internalNodes.push_back(node);
}
std::vector<IndexRange>().swap(mLeafRanges);
mLeafRanges.resize(internalNodes.size());
std::vector<const Index32LeafT*>().swap(mLeafNodes);
mLeafNodes.reserve(idxLeafs.leafCount());
typename Index32InternalNodeT::ChildOnCIter leafIt;
mMaxNodeLeafs = 0;
for (size_t n = 0, N = internalNodes.size(); n < N; ++n) {
mLeafRanges[n].first = mLeafNodes.size();
size_t leafCount = 0;
for (leafIt = internalNodes[n]->cbeginChildOn(); leafIt; ++leafIt) {
mLeafNodes.push_back(&(*leafIt));
++leafCount;
}
mMaxNodeLeafs = std::max(leafCount, mMaxNodeLeafs);
mLeafRanges[n].second = mLeafNodes.size();
}
std::vector<Vec4R>().swap(mLeafBoundingSpheres);
mLeafBoundingSpheres.resize(mLeafNodes.size());
v2s_internal::LeafOp<Index32LeafT> leafBS(
mLeafBoundingSpheres, mLeafNodes, transform, mSurfacePointList);
leafBS.run();
std::vector<Vec4R>().swap(mNodeBoundingSpheres);
mNodeBoundingSpheres.resize(internalNodes.size());
v2s_internal::NodeOp nodeBS(mNodeBoundingSpheres, mLeafRanges, mLeafBoundingSpheres);
nodeBS.run();
return true;
} // ClosestSurfacePoint::initialize
template<typename GridT>
inline bool
ClosestSurfacePoint<GridT>::search(std::vector<Vec3R>& points,
std::vector<float>& distances, bool transformPoints)
{
distances.clear();
distances.resize(points.size(), std::numeric_limits<float>::infinity());
v2s_internal::ClosestPointDist<Index32LeafT> cpd(points, distances, mSurfacePointList,
mLeafNodes, mLeafRanges, mLeafBoundingSpheres, mNodeBoundingSpheres,
mMaxNodeLeafs, transformPoints);
cpd.run();
return true;
}
template<typename GridT>
inline bool
ClosestSurfacePoint<GridT>::search(const std::vector<Vec3R>& points, std::vector<float>& distances)
{
return search(const_cast<std::vector<Vec3R>& >(points), distances, false);
}
template<typename GridT>
inline bool
ClosestSurfacePoint<GridT>::searchAndReplace(std::vector<Vec3R>& points,
std::vector<float>& distances)
{
return search(points, distances, true);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
| 32,716 | C | 31.914487 | 99 | 0.645861 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetUtil.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file tools/LevelSetUtil.h
///
/// @brief Miscellaneous utility methods that operate primarily
/// or exclusively on level set grids.
///
/// @author Mihai Alden
#ifndef OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED
#include "MeshToVolume.h" // for traceExteriorBoundaries
#include "SignedFloodFill.h" // for signedFloodFillWithValues
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <tbb/parallel_sort.h>
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <deque>
#include <limits>
#include <memory>
#include <set>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
// MS Visual C++ requires this extra level of indirection in order to compile
// THIS MUST EXIST IN AN UNNAMED NAMESPACE IN ORDER TO COMPILE ON WINDOWS
namespace {
template<typename GridType>
inline typename GridType::ValueType lsutilGridMax()
{
return std::numeric_limits<typename GridType::ValueType>::max();
}
template<typename GridType>
inline typename GridType::ValueType lsutilGridZero()
{
return zeroVal<typename GridType::ValueType>();
}
} // unnamed namespace
////////////////////////////////////////
/// @brief Threaded method to convert a sparse level set/SDF into a sparse fog volume
///
/// @details For a level set, the active and negative-valued interior half of the
/// narrow band becomes a linear ramp from 0 to 1; the inactive interior becomes
/// active with a constant value of 1; and the exterior, including the background
/// and the active exterior half of the narrow band, becomes inactive with a constant
/// value of 0. The interior, though active, remains sparse.
/// @details For a generic SDF, a specified cutoff distance determines the width
/// of the ramp, but otherwise the result is the same as for a level set.
///
/// @param grid level set/SDF grid to transform
/// @param cutoffDistance optional world space cutoff distance for the ramp
/// (automatically clamped if greater than the interior
/// narrow band width)
template<class GridType>
inline void
sdfToFogVolume(
GridType& grid,
typename GridType::ValueType cutoffDistance = lsutilGridMax<GridType>());
/// @brief Threaded method to construct a boolean mask that represents interior regions
/// in a signed distance field.
///
/// @return A shared pointer to either a boolean grid or tree with the same tree
/// configuration and potentially transform as the input @c volume and whose active
/// and @c true values correspond to the interior of the input signed distance field.
///
/// @param volume Signed distance field / level set volume.
/// @param isovalue Threshold below which values are considered part of the
/// interior region.
template<class GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
sdfInteriorMask(
const GridOrTreeType& volume,
typename GridOrTreeType::ValueType isovalue = lsutilGridZero<GridOrTreeType>());
/// @brief Extracts the interior regions of a signed distance field and topologically enclosed
/// (watertight) regions of value greater than the @a isovalue (cavities) that can arise
/// as the result of CSG union operations between different shapes where at least one of
/// the shapes has a concavity that is capped.
///
/// For example the enclosed region of a capped bottle would include the walls and
/// the interior cavity.
///
/// @return A shared pointer to either a boolean grid or tree with the same tree configuration
/// and potentially transform as the input @c volume and whose active and @c true values
/// correspond to the interior and enclosed regions in the input signed distance field.
///
/// @param volume Signed distance field / level set volume.
/// @param isovalue Threshold below which values are considered part of the interior region.
/// @param fillMask Optional boolean tree, when provided enclosed cavity regions that are not
/// completely filled by this mask are ignored.
///
/// For instance if the fill mask does not completely fill the bottle in the
/// previous example only the walls and cap are returned and the interior
/// cavity will be ignored.
template<typename GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
extractEnclosedRegion(
const GridOrTreeType& volume,
typename GridOrTreeType::ValueType isovalue = lsutilGridZero<GridOrTreeType>(),
const typename TreeAdapter<GridOrTreeType>::TreeType::template ValueConverter<bool>::Type*
fillMask = nullptr);
/// @brief Return a mask of the voxels that intersect the implicit surface with
/// the given @a isovalue.
///
/// @param volume Signed distance field / level set volume.
/// @param isovalue The crossing point that is considered the surface.
template<typename GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
extractIsosurfaceMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue);
/// @brief Return a mask for each connected component of the given grid's active voxels.
///
/// @param volume Input grid or tree
/// @param masks Output set of disjoint active topology masks sorted in descending order
/// based on the active voxel count.
template<typename GridOrTreeType>
inline void
extractActiveVoxelSegmentMasks(const GridOrTreeType& volume,
std::vector<typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr>& masks);
/// @brief Separates disjoint active topology components into distinct grids or trees.
///
/// @details Supports volumes with active tiles.
///
/// @param volume Input grid or tree
/// @param segments Output set of disjoint active topology components sorted in
/// descending order based on the active voxel count.
template<typename GridOrTreeType>
inline void
segmentActiveVoxels(const GridOrTreeType& volume,
std::vector<typename GridOrTreeType::Ptr>& segments);
/// @brief Separates disjoint SDF surfaces into distinct grids or trees.
///
/// @details Supports asymmetric interior / exterior narrowband widths and
/// SDF volumes with dense interior regions.
///
/// @param volume Input signed distance field / level set volume
/// @param segments Output set of disjoint SDF surfaces found in @a volume sorted in
/// descending order based on the surface intersecting voxel count.
template<typename GridOrTreeType>
inline void
segmentSDF(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Internal utility objects and implementation details
namespace level_set_util_internal {
template<typename LeafNodeType>
struct MaskInteriorVoxels {
using ValueType = typename LeafNodeType::ValueType;
using BoolLeafNodeType = tree::LeafNode<bool, LeafNodeType::LOG2DIM>;
MaskInteriorVoxels(
ValueType isovalue, const LeafNodeType ** nodes, BoolLeafNodeType ** maskNodes)
: mNodes(nodes), mMaskNodes(maskNodes), mIsovalue(isovalue)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
BoolLeafNodeType * maskNodePt = nullptr;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mMaskNodes[n] = nullptr;
const LeafNodeType& node = *mNodes[n];
if (!maskNodePt) {
maskNodePt = new BoolLeafNodeType(node.origin(), false);
} else {
maskNodePt->setOrigin(node.origin());
}
const ValueType* values = &node.getValue(0);
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
if (values[i] < mIsovalue) maskNodePt->setValueOn(i, true);
}
if (maskNodePt->onVoxelCount() > 0) {
mMaskNodes[n] = maskNodePt;
maskNodePt = nullptr;
}
}
if (maskNodePt) delete maskNodePt;
}
LeafNodeType const * const * const mNodes;
BoolLeafNodeType ** const mMaskNodes;
ValueType const mIsovalue;
}; // MaskInteriorVoxels
template<typename TreeType, typename InternalNodeType>
struct MaskInteriorTiles {
using ValueType = typename TreeType::ValueType;
MaskInteriorTiles(ValueType isovalue, const TreeType& tree, InternalNodeType ** maskNodes)
: mTree(&tree), mMaskNodes(maskNodes), mIsovalue(isovalue) { }
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const TreeType> acc(*mTree);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
typename InternalNodeType::ValueAllIter it = mMaskNodes[n]->beginValueAll();
for (; it; ++it) {
if (acc.getValue(it.getCoord()) < mIsovalue) {
it.setValue(true);
it.setValueOn(true);
}
}
}
}
TreeType const * const mTree;
InternalNodeType ** const mMaskNodes;
ValueType const mIsovalue;
}; // MaskInteriorTiles
template<typename TreeType>
struct PopulateTree {
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
PopulateTree(TreeType& tree, LeafNodeType** leafnodes,
const size_t * nodexIndexMap, ValueType background)
: mNewTree(background)
, mTreePt(&tree)
, mNodes(leafnodes)
, mNodeIndexMap(nodexIndexMap)
{
}
PopulateTree(PopulateTree& rhs, tbb::split)
: mNewTree(rhs.mNewTree.background())
, mTreePt(&mNewTree)
, mNodes(rhs.mNodes)
, mNodeIndexMap(rhs.mNodeIndexMap)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
tree::ValueAccessor<TreeType> acc(*mTreePt);
if (mNodeIndexMap) {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
for (size_t i = mNodeIndexMap[n], I = mNodeIndexMap[n + 1]; i < I; ++i) {
if (mNodes[i] != nullptr) acc.addLeaf(mNodes[i]);
}
}
} else {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
acc.addLeaf(mNodes[n]);
}
}
}
void join(PopulateTree& rhs) { mTreePt->merge(*rhs.mTreePt); }
private:
TreeType mNewTree;
TreeType * const mTreePt;
LeafNodeType ** const mNodes;
size_t const * const mNodeIndexMap;
}; // PopulateTree
/// @brief Negative active values are set @c 0, everything else is set to @c 1.
template<typename LeafNodeType>
struct LabelBoundaryVoxels {
using ValueType = typename LeafNodeType::ValueType;
using CharLeafNodeType = tree::LeafNode<char, LeafNodeType::LOG2DIM>;
LabelBoundaryVoxels(
ValueType isovalue, const LeafNodeType ** nodes, CharLeafNodeType ** maskNodes)
: mNodes(nodes), mMaskNodes(maskNodes), mIsovalue(isovalue)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
CharLeafNodeType * maskNodePt = nullptr;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mMaskNodes[n] = nullptr;
const LeafNodeType& node = *mNodes[n];
if (!maskNodePt) {
maskNodePt = new CharLeafNodeType(node.origin(), 1);
} else {
maskNodePt->setOrigin(node.origin());
}
typename LeafNodeType::ValueOnCIter it;
for (it = node.cbeginValueOn(); it; ++it) {
maskNodePt->setValueOn(it.pos(), ((*it - mIsovalue) < 0.0) ? 0 : 1);
}
if (maskNodePt->onVoxelCount() > 0) {
mMaskNodes[n] = maskNodePt;
maskNodePt = nullptr;
}
}
if (maskNodePt) delete maskNodePt;
}
LeafNodeType const * const * const mNodes;
CharLeafNodeType ** const mMaskNodes;
ValueType const mIsovalue;
}; // LabelBoundaryVoxels
template<typename LeafNodeType>
struct FlipRegionSign {
using ValueType = typename LeafNodeType::ValueType;
FlipRegionSign(LeafNodeType ** nodes) : mNodes(nodes) { }
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
ValueType* values = const_cast<ValueType*>(&mNodes[n]->getValue(0));
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
values[i] = values[i] < 0 ? 1 : -1;
}
}
}
LeafNodeType ** const mNodes;
}; // FlipRegionSign
template<typename LeafNodeType>
struct FindMinVoxelValue {
using ValueType = typename LeafNodeType::ValueType;
FindMinVoxelValue(LeafNodeType const * const * const leafnodes)
: minValue(std::numeric_limits<ValueType>::max())
, mNodes(leafnodes)
{
}
FindMinVoxelValue(FindMinVoxelValue& rhs, tbb::split)
: minValue(std::numeric_limits<ValueType>::max())
, mNodes(rhs.mNodes)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const ValueType* data = mNodes[n]->buffer().data();
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
minValue = std::min(minValue, data[i]);
}
}
}
void join(FindMinVoxelValue& rhs) { minValue = std::min(minValue, rhs.minValue); }
ValueType minValue;
LeafNodeType const * const * const mNodes;
}; // FindMinVoxelValue
template<typename InternalNodeType>
struct FindMinTileValue {
using ValueType = typename InternalNodeType::ValueType;
FindMinTileValue(InternalNodeType const * const * const nodes)
: minValue(std::numeric_limits<ValueType>::max())
, mNodes(nodes)
{
}
FindMinTileValue(FindMinTileValue& rhs, tbb::split)
: minValue(std::numeric_limits<ValueType>::max())
, mNodes(rhs.mNodes)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
typename InternalNodeType::ValueAllCIter it = mNodes[n]->beginValueAll();
for (; it; ++it) {
minValue = std::min(minValue, *it);
}
}
}
void join(FindMinTileValue& rhs) { minValue = std::min(minValue, rhs.minValue); }
ValueType minValue;
InternalNodeType const * const * const mNodes;
}; // FindMinTileValue
template<typename LeafNodeType>
struct SDFVoxelsToFogVolume {
using ValueType = typename LeafNodeType::ValueType;
SDFVoxelsToFogVolume(LeafNodeType ** nodes, ValueType cutoffDistance)
: mNodes(nodes), mWeight(ValueType(1.0) / cutoffDistance)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
LeafNodeType& node = *mNodes[n];
node.setValuesOff();
ValueType* values = node.buffer().data();
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
values[i] = values[i] > ValueType(0.0) ? ValueType(0.0) : values[i] * mWeight;
if (values[i] > ValueType(0.0)) node.setValueOn(i);
}
if (node.onVoxelCount() == 0) {
delete mNodes[n];
mNodes[n] = nullptr;
}
}
}
LeafNodeType ** const mNodes;
ValueType const mWeight;
}; // SDFVoxelsToFogVolume
template<typename TreeType, typename InternalNodeType>
struct SDFTilesToFogVolume {
SDFTilesToFogVolume(const TreeType& tree, InternalNodeType ** nodes)
: mTree(&tree), mNodes(nodes) { }
void operator()(const tbb::blocked_range<size_t>& range) const {
using ValueType = typename TreeType::ValueType;
tree::ValueAccessor<const TreeType> acc(*mTree);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
typename InternalNodeType::ValueAllIter it = mNodes[n]->beginValueAll();
for (; it; ++it) {
if (acc.getValue(it.getCoord()) < ValueType(0.0)) {
it.setValue(ValueType(1.0));
it.setValueOn(true);
}
}
}
}
TreeType const * const mTree;
InternalNodeType ** const mNodes;
}; // SDFTilesToFogVolume
template<typename TreeType>
struct FillMaskBoundary {
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
FillMaskBoundary(const TreeType& tree, ValueType isovalue, const BoolTreeType& fillMask,
const BoolLeafNodeType ** fillNodes, BoolLeafNodeType ** newNodes)
: mTree(&tree)
, mFillMask(&fillMask)
, mFillNodes(fillNodes)
, mNewNodes(newNodes)
, mIsovalue(isovalue)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const BoolTreeType> maskAcc(*mFillMask);
tree::ValueAccessor<const TreeType> distAcc(*mTree);
std::unique_ptr<char[]> valueMask(new char[BoolLeafNodeType::SIZE]);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mNewNodes[n] = nullptr;
const BoolLeafNodeType& node = *mFillNodes[n];
const Coord& origin = node.origin();
const bool denseNode = node.isDense();
// possible early out if the fill mask is dense
if (denseNode) {
int denseNeighbors = 0;
const BoolLeafNodeType* neighborNode =
maskAcc.probeConstLeaf(origin.offsetBy(-1, 0, 0));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(BoolLeafNodeType::DIM, 0, 0));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, -1, 0));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, BoolLeafNodeType::DIM, 0));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, 0, -1));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, 0, BoolLeafNodeType::DIM));
if (neighborNode && neighborNode->isDense()) ++denseNeighbors;
if (denseNeighbors == 6) continue;
}
// rest value mask
memset(valueMask.get(), 0, sizeof(char) * BoolLeafNodeType::SIZE);
const typename TreeType::LeafNodeType* distNode = distAcc.probeConstLeaf(origin);
// check internal voxel neighbors
bool earlyTermination = false;
if (!denseNode) {
if (distNode) {
evalInternalNeighborsP(valueMask.get(), node, *distNode);
evalInternalNeighborsN(valueMask.get(), node, *distNode);
} else if (distAcc.getValue(origin) > mIsovalue) {
earlyTermination = evalInternalNeighborsP(valueMask.get(), node);
if (!earlyTermination) {
earlyTermination = evalInternalNeighborsN(valueMask.get(), node);
}
}
}
// check external voxel neighbors
if (!earlyTermination) {
evalExternalNeighborsX<true>(valueMask.get(), node, maskAcc, distAcc);
evalExternalNeighborsX<false>(valueMask.get(), node, maskAcc, distAcc);
evalExternalNeighborsY<true>(valueMask.get(), node, maskAcc, distAcc);
evalExternalNeighborsY<false>(valueMask.get(), node, maskAcc, distAcc);
evalExternalNeighborsZ<true>(valueMask.get(), node, maskAcc, distAcc);
evalExternalNeighborsZ<false>(valueMask.get(), node, maskAcc, distAcc);
}
// Export marked boundary voxels.
int numBoundaryValues = 0;
for (Index i = 0, I = BoolLeafNodeType::SIZE; i < I; ++i) {
numBoundaryValues += valueMask[i] == 1;
}
if (numBoundaryValues > 0) {
mNewNodes[n] = new BoolLeafNodeType(origin, false);
for (Index i = 0, I = BoolLeafNodeType::SIZE; i < I; ++i) {
if (valueMask[i] == 1) mNewNodes[n]->setValueOn(i);
}
}
}
}
private:
// Check internal voxel neighbors in positive {x, y, z} directions.
void evalInternalNeighborsP(char* valueMask, const BoolLeafNodeType& node,
const LeafNodeType& distNode) const
{
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM - 1; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos + 1) && distNode.getValue(pos + 1) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM - 1; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos + BoolLeafNodeType::DIM) &&
distNode.getValue(pos + BoolLeafNodeType::DIM) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM - 1; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) &&
(distNode.getValue(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)
> mIsovalue))
{
valueMask[pos] = 1;
}
}
}
}
}
bool evalInternalNeighborsP(char* valueMask, const BoolLeafNodeType& node) const {
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM - 1; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) && !node.isValueOn(pos + 1)) {
valueMask[pos] = 1;
return true;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM - 1; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) && !node.isValueOn(pos + BoolLeafNodeType::DIM)) {
valueMask[pos] = 1;
return true;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM - 1; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) &&
!node.isValueOn(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)) {
valueMask[pos] = 1;
return true;
}
}
}
}
return false;
}
// Check internal voxel neighbors in negative {x, y, z} directions.
void evalInternalNeighborsN(char* valueMask, const BoolLeafNodeType& node,
const LeafNodeType& distNode) const
{
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 1; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos - 1) && distNode.getValue(pos - 1) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 1; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos - BoolLeafNodeType::DIM) &&
distNode.getValue(pos - BoolLeafNodeType::DIM) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
for (Index x = 1; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue;
if (!node.isValueOn(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) &&
(distNode.getValue(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)
> mIsovalue))
{
valueMask[pos] = 1;
}
}
}
}
}
bool evalInternalNeighborsN(char* valueMask, const BoolLeafNodeType& node) const {
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 1; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) && !node.isValueOn(pos - 1)) {
valueMask[pos] = 1;
return true;
}
}
}
}
for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 1; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) && !node.isValueOn(pos - BoolLeafNodeType::DIM)) {
valueMask[pos] = 1;
return true;
}
}
}
}
for (Index x = 1; x < BoolLeafNodeType::DIM; ++x) {
const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM);
for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) {
const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM);
for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) {
const Index pos = yPos + z;
if (node.isValueOn(pos) &&
!node.isValueOn(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)) {
valueMask[pos] = 1;
return true;
}
}
}
}
return false;
}
// Check external voxel neighbors
// If UpWind is true check the X+ oriented node face, else the X- oriented face.
template<bool UpWind>
void evalExternalNeighborsX(char* valueMask, const BoolLeafNodeType& node,
const tree::ValueAccessor<const BoolTreeType>& maskAcc,
const tree::ValueAccessor<const TreeType>& distAcc) const {
const Coord& origin = node.origin();
Coord ijk(0, 0, 0), nijk;
int step = -1;
if (UpWind) {
step = 1;
ijk[0] = int(BoolLeafNodeType::DIM) - 1;
}
const Index xPos = ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM));
for (ijk[1] = 0; ijk[1] < int(BoolLeafNodeType::DIM); ++ijk[1]) {
const Index yPos = xPos + (ijk[1] << int(BoolLeafNodeType::LOG2DIM));
for (ijk[2] = 0; ijk[2] < int(BoolLeafNodeType::DIM); ++ijk[2]) {
const Index pos = yPos + ijk[2];
if (valueMask[pos] == 0 && node.isValueOn(pos)) {
nijk = origin + ijk.offsetBy(step, 0, 0);
if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
}
// If UpWind is true check the Y+ oriented node face, else the Y- oriented face.
template<bool UpWind>
void evalExternalNeighborsY(char* valueMask, const BoolLeafNodeType& node,
const tree::ValueAccessor<const BoolTreeType>& maskAcc,
const tree::ValueAccessor<const TreeType>& distAcc) const {
const Coord& origin = node.origin();
Coord ijk(0, 0, 0), nijk;
int step = -1;
if (UpWind) {
step = 1;
ijk[1] = int(BoolLeafNodeType::DIM) - 1;
}
const Index yPos = ijk[1] << int(BoolLeafNodeType::LOG2DIM);
for (ijk[0] = 0; ijk[0] < int(BoolLeafNodeType::DIM); ++ijk[0]) {
const Index xPos = yPos + (ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM)));
for (ijk[2] = 0; ijk[2] < int(BoolLeafNodeType::DIM); ++ijk[2]) {
const Index pos = xPos + ijk[2];
if (valueMask[pos] == 0 && node.isValueOn(pos)) {
nijk = origin + ijk.offsetBy(0, step, 0);
if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
}
// If UpWind is true check the Z+ oriented node face, else the Z- oriented face.
template<bool UpWind>
void evalExternalNeighborsZ(char* valueMask, const BoolLeafNodeType& node,
const tree::ValueAccessor<const BoolTreeType>& maskAcc,
const tree::ValueAccessor<const TreeType>& distAcc) const {
const Coord& origin = node.origin();
Coord ijk(0, 0, 0), nijk;
int step = -1;
if (UpWind) {
step = 1;
ijk[2] = int(BoolLeafNodeType::DIM) - 1;
}
for (ijk[0] = 0; ijk[0] < int(BoolLeafNodeType::DIM); ++ijk[0]) {
const Index xPos = ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM));
for (ijk[1] = 0; ijk[1] < int(BoolLeafNodeType::DIM); ++ijk[1]) {
const Index pos = ijk[2] + xPos + (ijk[1] << int(BoolLeafNodeType::LOG2DIM));
if (valueMask[pos] == 0 && node.isValueOn(pos)) {
nijk = origin + ijk.offsetBy(0, 0, step);
if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) {
valueMask[pos] = 1;
}
}
}
}
}
//////////
TreeType const * const mTree;
BoolTreeType const * const mFillMask;
BoolLeafNodeType const * const * const mFillNodes;
BoolLeafNodeType ** const mNewNodes;
ValueType const mIsovalue;
}; // FillMaskBoundary
/// @brief Constructs a memory light char tree that represents the exterior region with @c +1
/// and the interior regions with @c -1.
template <class TreeType>
inline typename TreeType::template ValueConverter<char>::Type::Ptr
computeEnclosedRegionMask(const TreeType& tree, typename TreeType::ValueType isovalue,
const typename TreeType::template ValueConverter<bool>::Type* fillMask)
{
using LeafNodeType = typename TreeType::LeafNodeType;
using RootNodeType = typename TreeType::RootNodeType;
using NodeChainType = typename RootNodeType::NodeChainType;
using InternalNodeType = typename NodeChainType::template Get<1>;
using CharTreeType = typename TreeType::template ValueConverter<char>::Type;
using CharLeafNodeType = typename CharTreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
const TreeType* treePt = &tree;
size_t numLeafNodes = 0, numInternalNodes = 0;
std::vector<const LeafNodeType*> nodes;
std::vector<size_t> leafnodeCount;
{
// compute the prefix sum of the leafnode count in each internal node.
std::vector<const InternalNodeType*> internalNodes;
treePt->getNodes(internalNodes);
numInternalNodes = internalNodes.size();
leafnodeCount.push_back(0);
for (size_t n = 0; n < numInternalNodes; ++n) {
leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount());
}
numLeafNodes = leafnodeCount.back();
// extract all leafnodes
nodes.reserve(numLeafNodes);
for (size_t n = 0; n < numInternalNodes; ++n) {
internalNodes[n]->getNodes(nodes);
}
}
// create mask leafnodes
std::unique_ptr<CharLeafNodeType*[]> maskNodes(new CharLeafNodeType*[numLeafNodes]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes),
LabelBoundaryVoxels<LeafNodeType>(isovalue, nodes.data(), maskNodes.get()));
// create mask grid
typename CharTreeType::Ptr maskTree(new CharTreeType(1));
PopulateTree<CharTreeType> populate(*maskTree, maskNodes.get(), leafnodeCount.data(), 1);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate);
// optionally evaluate the fill mask
std::vector<CharLeafNodeType*> extraMaskNodes;
if (fillMask) {
std::vector<const BoolLeafNodeType*> fillMaskNodes;
fillMask->getNodes(fillMaskNodes);
std::unique_ptr<BoolLeafNodeType*[]> boundaryMaskNodes(
new BoolLeafNodeType*[fillMaskNodes.size()]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, fillMaskNodes.size()),
FillMaskBoundary<TreeType>(tree, isovalue, *fillMask, fillMaskNodes.data(),
boundaryMaskNodes.get()));
tree::ValueAccessor<CharTreeType> maskAcc(*maskTree);
for (size_t n = 0, N = fillMaskNodes.size(); n < N; ++n) {
if (boundaryMaskNodes[n] == nullptr) continue;
const BoolLeafNodeType& boundaryNode = *boundaryMaskNodes[n];
const Coord& origin = boundaryNode.origin();
CharLeafNodeType* maskNodePt = maskAcc.probeLeaf(origin);
if (!maskNodePt) {
maskNodePt = maskAcc.touchLeaf(origin);
extraMaskNodes.push_back(maskNodePt);
}
char* data = maskNodePt->buffer().data();
typename BoolLeafNodeType::ValueOnCIter it = boundaryNode.cbeginValueOn();
for (; it; ++it) {
if (data[it.pos()] != 0) data[it.pos()] = -1;
}
delete boundaryMaskNodes[n];
}
}
// eliminate enclosed regions
tools::traceExteriorBoundaries(*maskTree);
// flip voxel sign to negative inside and positive outside.
tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes),
FlipRegionSign<CharLeafNodeType>(maskNodes.get()));
if (!extraMaskNodes.empty()) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, extraMaskNodes.size()),
FlipRegionSign<CharLeafNodeType>(extraMaskNodes.data()));
}
// propagate sign information into tile region
tools::signedFloodFill(*maskTree);
return maskTree;
} // computeEnclosedRegionMask()
template <class TreeType>
inline typename TreeType::template ValueConverter<bool>::Type::Ptr
computeInteriorMask(const TreeType& tree, typename TreeType::ValueType iso)
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using RootNodeType = typename TreeType::RootNodeType;
using NodeChainType = typename RootNodeType::NodeChainType;
using InternalNodeType = typename NodeChainType::template Get<1>;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
using BoolRootNodeType = typename BoolTreeType::RootNodeType;
using BoolNodeChainType = typename BoolRootNodeType::NodeChainType;
using BoolInternalNodeType = typename BoolNodeChainType::template Get<1>;
/////
// Clamp the isovalue to the level set's background value minus epsilon.
// (In a valid narrow-band level set, all voxels, including background voxels,
// have values less than or equal to the background value, so an isovalue
// greater than or equal to the background value would produce a mask with
// effectively infinite extent.)
iso = std::min(iso,
static_cast<ValueType>(tree.background() - math::Tolerance<ValueType>::value()));
size_t numLeafNodes = 0, numInternalNodes = 0;
std::vector<const LeafNodeType*> nodes;
std::vector<size_t> leafnodeCount;
{
// compute the prefix sum of the leafnode count in each internal node.
std::vector<const InternalNodeType*> internalNodes;
tree.getNodes(internalNodes);
numInternalNodes = internalNodes.size();
leafnodeCount.push_back(0);
for (size_t n = 0; n < numInternalNodes; ++n) {
leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount());
}
numLeafNodes = leafnodeCount.back();
// extract all leafnodes
nodes.reserve(numLeafNodes);
for (size_t n = 0; n < numInternalNodes; ++n) {
internalNodes[n]->getNodes(nodes);
}
}
// create mask leafnodes
std::unique_ptr<BoolLeafNodeType*[]> maskNodes(new BoolLeafNodeType*[numLeafNodes]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes),
MaskInteriorVoxels<LeafNodeType>(iso, nodes.data(), maskNodes.get()));
// create mask grid
typename BoolTreeType::Ptr maskTree(new BoolTreeType(false));
PopulateTree<BoolTreeType> populate(*maskTree, maskNodes.get(), leafnodeCount.data(), false);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate);
// evaluate tile values
std::vector<BoolInternalNodeType*> internalMaskNodes;
maskTree->getNodes(internalMaskNodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, internalMaskNodes.size()),
MaskInteriorTiles<TreeType, BoolInternalNodeType>(iso, tree, internalMaskNodes.data()));
tree::ValueAccessor<const TreeType> acc(tree);
typename BoolTreeType::ValueAllIter it(*maskTree);
it.setMaxDepth(BoolTreeType::ValueAllIter::LEAF_DEPTH - 2);
for ( ; it; ++it) {
if (acc.getValue(it.getCoord()) < iso) {
it.setValue(true);
it.setActiveState(true);
}
}
return maskTree;
} // computeInteriorMask()
template<typename InputTreeType>
struct MaskIsovalueCrossingVoxels
{
using InputValueType = typename InputTreeType::ValueType;
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MaskIsovalueCrossingVoxels(
const InputTreeType& inputTree,
const std::vector<const InputLeafNodeType*>& inputLeafNodes,
BoolTreeType& maskTree,
InputValueType iso)
: mInputAccessor(inputTree)
, mInputNodes(!inputLeafNodes.empty() ? &inputLeafNodes.front() : nullptr)
, mMaskTree(false)
, mMaskAccessor(maskTree)
, mIsovalue(iso)
{
}
MaskIsovalueCrossingVoxels(MaskIsovalueCrossingVoxels& rhs, tbb::split)
: mInputAccessor(rhs.mInputAccessor.tree())
, mInputNodes(rhs.mInputNodes)
, mMaskTree(false)
, mMaskAccessor(mMaskTree)
, mIsovalue(rhs.mIsovalue)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
const InputValueType iso = mIsovalue;
Coord ijk(0, 0, 0);
BoolLeafNodeType* maskNodePt = nullptr;
for (size_t n = range.begin(); mInputNodes && (n != range.end()); ++n) {
const InputLeafNodeType& node = *mInputNodes[n];
if (!maskNodePt) maskNodePt = new BoolLeafNodeType(node.origin(), false);
else maskNodePt->setOrigin(node.origin());
bool collectedData = false;
for (typename InputLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) {
bool isUnder = *it < iso;
ijk = it.getCoord();
++ijk[2];
bool signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +z edge
--ijk[2];
if (!signChange) {
--ijk[2];
signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -z edge
++ijk[2];
}
if (!signChange) {
++ijk[1];
signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +y edge
--ijk[1];
}
if (!signChange) {
--ijk[1];
signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -y edge
++ijk[1];
}
if (!signChange) {
++ijk[0];
signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +x edge
--ijk[0];
}
if (!signChange) {
--ijk[0];
signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -x edge
++ijk[0];
}
if (signChange) {
collectedData = true;
maskNodePt->setValueOn(it.pos(), true);
}
}
if (collectedData) {
mMaskAccessor.addLeaf(maskNodePt);
maskNodePt = nullptr;
}
}
if (maskNodePt) delete maskNodePt;
}
void join(MaskIsovalueCrossingVoxels& rhs) {
mMaskAccessor.tree().merge(rhs.mMaskAccessor.tree());
}
private:
tree::ValueAccessor<const InputTreeType> mInputAccessor;
InputLeafNodeType const * const * const mInputNodes;
BoolTreeType mMaskTree;
tree::ValueAccessor<BoolTreeType> mMaskAccessor;
InputValueType mIsovalue;
}; // MaskIsovalueCrossingVoxels
////////////////////////////////////////
template<typename NodeType>
struct NodeMaskSegment
{
using Ptr = SharedPtr<NodeMaskSegment>;
using NodeMaskType = typename NodeType::NodeMaskType;
NodeMaskSegment() : connections(), mask(false), origin(0,0,0), visited(false) {}
std::vector<NodeMaskSegment*> connections;
NodeMaskType mask;
Coord origin;
bool visited;
}; // struct NodeMaskSegment
template<typename NodeType>
inline void
nodeMaskSegmentation(const NodeType& node,
std::vector<typename NodeMaskSegment<NodeType>::Ptr>& segments)
{
using NodeMaskType = typename NodeType::NodeMaskType;
using NodeMaskSegmentType = NodeMaskSegment<NodeType>;
using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr;
NodeMaskType nodeMask(node.getValueMask());
std::deque<Index> indexList;
while (!nodeMask.isOff()) {
NodeMaskSegmentTypePtr segment(new NodeMaskSegmentType());
segment->origin = node.origin();
NodeMaskType& mask = segment->mask;
indexList.push_back(nodeMask.findFirstOn());
nodeMask.setOff(indexList.back()); // mark as visited
Coord ijk(0, 0, 0);
while (!indexList.empty()) {
const Index pos = indexList.back();
indexList.pop_back();
if (mask.isOn(pos)) continue;
mask.setOn(pos);
ijk = NodeType::offsetToLocalCoord(pos);
Index npos = pos - 1;
if (ijk[2] != 0 && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + 1;
if (ijk[2] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
npos = pos - NodeType::DIM;
if (ijk[1] != 0 && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + NodeType::DIM;
if (ijk[1] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
npos = pos - NodeType::DIM * NodeType::DIM;
if (ijk[0] != 0 && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + NodeType::DIM * NodeType::DIM;
if (ijk[0] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) {
nodeMask.setOff(npos);
indexList.push_back(npos);
}
}
segments.push_back(segment);
}
}
template<typename NodeType>
struct SegmentNodeMask
{
using NodeMaskSegmentType = NodeMaskSegment<NodeType>;
using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr;
using NodeMaskSegmentVector = typename std::vector<NodeMaskSegmentTypePtr>;
SegmentNodeMask(std::vector<NodeType*>& nodes, NodeMaskSegmentVector* nodeMaskArray)
: mNodes(!nodes.empty() ? &nodes.front() : nullptr)
, mNodeMaskArray(nodeMaskArray)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
NodeType& node = *mNodes[n];
nodeMaskSegmentation(node, mNodeMaskArray[n]);
// hack origin data to store array offset
Coord& origin = const_cast<Coord&>(node.origin());
origin[0] = static_cast<int>(n);
}
}
NodeType * const * const mNodes;
NodeMaskSegmentVector * const mNodeMaskArray;
}; // struct SegmentNodeMask
template<typename TreeType, typename NodeType>
struct ConnectNodeMaskSegments
{
using NodeMaskType = typename NodeType::NodeMaskType;
using NodeMaskSegmentType = NodeMaskSegment<NodeType>;
using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr;
using NodeMaskSegmentVector = typename std::vector<NodeMaskSegmentTypePtr>;
ConnectNodeMaskSegments(const TreeType& tree, NodeMaskSegmentVector* nodeMaskArray)
: mTree(&tree)
, mNodeMaskArray(nodeMaskArray)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const TreeType> acc(*mTree);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
NodeMaskSegmentVector& segments = mNodeMaskArray[n];
if (segments.empty()) continue;
std::vector<std::set<NodeMaskSegmentType*> > connections(segments.size());
Coord ijk = segments[0]->origin;
const NodeType* node = acc.template probeConstNode<NodeType>(ijk);
if (!node) continue;
// get neighbour nodes
ijk[2] += NodeType::DIM;
const NodeType* nodeZUp = acc.template probeConstNode<NodeType>(ijk);
ijk[2] -= (NodeType::DIM + NodeType::DIM);
const NodeType* nodeZDown = acc.template probeConstNode<NodeType>(ijk);
ijk[2] += NodeType::DIM;
ijk[1] += NodeType::DIM;
const NodeType* nodeYUp = acc.template probeConstNode<NodeType>(ijk);
ijk[1] -= (NodeType::DIM + NodeType::DIM);
const NodeType* nodeYDown = acc.template probeConstNode<NodeType>(ijk);
ijk[1] += NodeType::DIM;
ijk[0] += NodeType::DIM;
const NodeType* nodeXUp = acc.template probeConstNode<NodeType>(ijk);
ijk[0] -= (NodeType::DIM + NodeType::DIM);
const NodeType* nodeXDown = acc.template probeConstNode<NodeType>(ijk);
ijk[0] += NodeType::DIM;
const Index startPos = node->getValueMask().findFirstOn();
for (Index pos = startPos; pos < NodeMaskType::SIZE; ++pos) {
if (!node->isValueOn(pos)) continue;
ijk = NodeType::offsetToLocalCoord(pos);
#ifdef _MSC_FULL_VER
#if _MSC_FULL_VER >= 190000000 && _MSC_FULL_VER < 190024210
// Visual Studio 2015 had a codegen bug that wasn't fixed until Update 3
volatile Index npos = 0;
#else
Index npos = 0;
#endif
#else
Index npos = 0;
#endif
if (ijk[2] == 0) {
npos = pos + (NodeType::DIM - 1);
if (nodeZDown && nodeZDown->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeZDown)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
} else if (ijk[2] == (NodeType::DIM - 1)) {
npos = pos - (NodeType::DIM - 1);
if (nodeZUp && nodeZUp->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeZUp)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
}
if (ijk[1] == 0) {
npos = pos + (NodeType::DIM - 1) * NodeType::DIM;
if (nodeYDown && nodeYDown->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeYDown)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
} else if (ijk[1] == (NodeType::DIM - 1)) {
npos = pos - (NodeType::DIM - 1) * NodeType::DIM;
if (nodeYUp && nodeYUp->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeYUp)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
}
if (ijk[0] == 0) {
npos = pos + (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM;
if (nodeXDown && nodeXDown->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeXDown)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
} else if (ijk[0] == (NodeType::DIM - 1)) {
npos = pos - (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM;
if (nodeXUp && nodeXUp->isValueOn(npos)) {
NodeMaskSegmentType* nsegment =
findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeXUp)], npos);
const Index idx = findNodeMaskSegmentIndex(segments, pos);
connections[idx].insert(nsegment);
}
}
}
for (size_t i = 0, I = connections.size(); i < I; ++i) {
typename std::set<NodeMaskSegmentType*>::iterator
it = connections[i].begin(), end = connections[i].end();
std::vector<NodeMaskSegmentType*>& segmentConnections = segments[i]->connections;
segmentConnections.reserve(connections.size());
for (; it != end; ++it) {
segmentConnections.push_back(*it);
}
}
} // end range loop
}
private:
static inline size_t getNodeOffset(const NodeType& node) {
return static_cast<size_t>(node.origin()[0]);
}
static inline NodeMaskSegmentType*
findNodeMaskSegment(NodeMaskSegmentVector& segments, Index pos)
{
NodeMaskSegmentType* segment = nullptr;
for (size_t n = 0, N = segments.size(); n < N; ++n) {
if (segments[n]->mask.isOn(pos)) {
segment = segments[n].get();
break;
}
}
return segment;
}
static inline Index
findNodeMaskSegmentIndex(NodeMaskSegmentVector& segments, Index pos)
{
for (Index n = 0, N = Index(segments.size()); n < N; ++n) {
if (segments[n]->mask.isOn(pos)) return n;
}
return Index(-1);
}
TreeType const * const mTree;
NodeMaskSegmentVector * const mNodeMaskArray;
}; // struct ConnectNodeMaskSegments
template<typename TreeType>
struct MaskSegmentGroup
{
using LeafNodeType = typename TreeType::LeafNodeType;
using TreeTypePtr = typename TreeType::Ptr;
using NodeMaskSegmentType = NodeMaskSegment<LeafNodeType>;
MaskSegmentGroup(const std::vector<NodeMaskSegmentType*>& segments)
: mSegments(!segments.empty() ? &segments.front() : nullptr)
, mTree(new TreeType(false))
{
}
MaskSegmentGroup(const MaskSegmentGroup& rhs, tbb::split)
: mSegments(rhs.mSegments)
, mTree(new TreeType(false))
{
}
TreeTypePtr& mask() { return mTree; }
void join(MaskSegmentGroup& rhs) { mTree->merge(*rhs.mTree); }
void operator()(const tbb::blocked_range<size_t>& range) {
tree::ValueAccessor<TreeType> acc(*mTree);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
NodeMaskSegmentType& segment = *mSegments[n];
LeafNodeType* node = acc.touchLeaf(segment.origin);
node->getValueMask() |= segment.mask;
}
}
private:
NodeMaskSegmentType * const * const mSegments;
TreeTypePtr mTree;
}; // struct MaskSegmentGroup
////////////////////////////////////////
template<typename TreeType>
struct ExpandLeafNodeRegion
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using NodeMaskType = typename LeafNodeType::NodeMaskType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
/////
ExpandLeafNodeRegion(const TreeType& distTree, BoolTreeType& maskTree,
std::vector<BoolLeafNodeType*>& maskNodes)
: mDistTree(&distTree)
, mMaskTree(&maskTree)
, mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr)
, mNewMaskTree(false)
{
}
ExpandLeafNodeRegion(const ExpandLeafNodeRegion& rhs, tbb::split)
: mDistTree(rhs.mDistTree)
, mMaskTree(rhs.mMaskTree)
, mMaskNodes(rhs.mMaskNodes)
, mNewMaskTree(false)
{
}
BoolTreeType& newMaskTree() { return mNewMaskTree; }
void join(ExpandLeafNodeRegion& rhs) { mNewMaskTree.merge(rhs.mNewMaskTree); }
void operator()(const tbb::blocked_range<size_t>& range) {
using NodeType = LeafNodeType;
tree::ValueAccessor<const TreeType> distAcc(*mDistTree);
tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree);
tree::ValueAccessor<BoolTreeType> newMaskAcc(mNewMaskTree);
NodeMaskType maskZUp, maskZDown, maskYUp, maskYDown, maskXUp, maskXDown;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
BoolLeafNodeType& maskNode = *mMaskNodes[n];
if (maskNode.isEmpty()) continue;
Coord ijk = maskNode.origin(), nijk;
const LeafNodeType* distNode = distAcc.probeConstLeaf(ijk);
if (!distNode) continue;
const ValueType *dataZUp = nullptr, *dataZDown = nullptr,
*dataYUp = nullptr, *dataYDown = nullptr,
*dataXUp = nullptr, *dataXDown = nullptr;
ijk[2] += NodeType::DIM;
getData(ijk, distAcc, maskAcc, maskZUp, dataZUp);
ijk[2] -= (NodeType::DIM + NodeType::DIM);
getData(ijk, distAcc, maskAcc, maskZDown, dataZDown);
ijk[2] += NodeType::DIM;
ijk[1] += NodeType::DIM;
getData(ijk, distAcc, maskAcc, maskYUp, dataYUp);
ijk[1] -= (NodeType::DIM + NodeType::DIM);
getData(ijk, distAcc, maskAcc, maskYDown, dataYDown);
ijk[1] += NodeType::DIM;
ijk[0] += NodeType::DIM;
getData(ijk, distAcc, maskAcc, maskXUp, dataXUp);
ijk[0] -= (NodeType::DIM + NodeType::DIM);
getData(ijk, distAcc, maskAcc, maskXDown, dataXDown);
ijk[0] += NodeType::DIM;
for (typename BoolLeafNodeType::ValueOnIter it = maskNode.beginValueOn(); it; ++it) {
const Index pos = it.pos();
const ValueType val = std::abs(distNode->getValue(pos));
ijk = BoolLeafNodeType::offsetToLocalCoord(pos);
nijk = ijk + maskNode.origin();
if (dataZUp && ijk[2] == (BoolLeafNodeType::DIM - 1)) {
const Index npos = pos - (NodeType::DIM - 1);
if (maskZUp.isOn(npos) && std::abs(dataZUp[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(0, 0, 1));
}
} else if (dataZDown && ijk[2] == 0) {
const Index npos = pos + (NodeType::DIM - 1);
if (maskZDown.isOn(npos) && std::abs(dataZDown[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(0, 0, -1));
}
}
if (dataYUp && ijk[1] == (BoolLeafNodeType::DIM - 1)) {
const Index npos = pos - (NodeType::DIM - 1) * NodeType::DIM;
if (maskYUp.isOn(npos) && std::abs(dataYUp[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(0, 1, 0));
}
} else if (dataYDown && ijk[1] == 0) {
const Index npos = pos + (NodeType::DIM - 1) * NodeType::DIM;
if (maskYDown.isOn(npos) && std::abs(dataYDown[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(0, -1, 0));
}
}
if (dataXUp && ijk[0] == (BoolLeafNodeType::DIM - 1)) {
const Index npos = pos - (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM;
if (maskXUp.isOn(npos) && std::abs(dataXUp[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(1, 0, 0));
}
} else if (dataXDown && ijk[0] == 0) {
const Index npos = pos + (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM;
if (maskXDown.isOn(npos) && std::abs(dataXDown[npos]) > val) {
newMaskAcc.setValueOn(nijk.offsetBy(-1, 0, 0));
}
}
} // end value on loop
} // end range loop
}
private:
static inline void
getData(const Coord& ijk, tree::ValueAccessor<const TreeType>& distAcc,
tree::ValueAccessor<const BoolTreeType>& maskAcc, NodeMaskType& mask,
const ValueType*& data)
{
const LeafNodeType* node = distAcc.probeConstLeaf(ijk);
if (node) {
data = node->buffer().data();
mask = node->getValueMask();
const BoolLeafNodeType* maskNodePt = maskAcc.probeConstLeaf(ijk);
if (maskNodePt) mask -= maskNodePt->getValueMask();
}
}
TreeType const * const mDistTree;
BoolTreeType * const mMaskTree;
BoolLeafNodeType ** const mMaskNodes;
BoolTreeType mNewMaskTree;
}; // struct ExpandLeafNodeRegion
template<typename TreeType>
struct FillLeafNodeVoxels
{
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using NodeMaskType = typename LeafNodeType::NodeMaskType;
using BoolLeafNodeType = tree::LeafNode<bool, LeafNodeType::LOG2DIM>;
FillLeafNodeVoxels(const TreeType& tree, std::vector<BoolLeafNodeType*>& maskNodes)
: mTree(&tree), mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
tree::ValueAccessor<const TreeType> distAcc(*mTree);
std::vector<Index> indexList;
indexList.reserve(NodeMaskType::SIZE);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
BoolLeafNodeType& maskNode = *mMaskNodes[n];
const LeafNodeType * distNode = distAcc.probeConstLeaf(maskNode.origin());
if (!distNode) continue;
NodeMaskType mask(distNode->getValueMask());
NodeMaskType& narrowbandMask = maskNode.getValueMask();
for (Index pos = narrowbandMask.findFirstOn(); pos < NodeMaskType::SIZE; ++pos) {
if (narrowbandMask.isOn(pos)) indexList.push_back(pos);
}
mask -= narrowbandMask; // bitwise difference
narrowbandMask.setOff();
const ValueType* data = distNode->buffer().data();
Coord ijk(0, 0, 0);
while (!indexList.empty()) {
const Index pos = indexList.back();
indexList.pop_back();
if (narrowbandMask.isOn(pos)) continue;
narrowbandMask.setOn(pos);
const ValueType dist = std::abs(data[pos]);
ijk = LeafNodeType::offsetToLocalCoord(pos);
Index npos = pos - 1;
if (ijk[2] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) {
mask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + 1;
if ((ijk[2] != (LeafNodeType::DIM - 1)) && mask.isOn(npos)
&& std::abs(data[npos]) > dist)
{
mask.setOff(npos);
indexList.push_back(npos);
}
npos = pos - LeafNodeType::DIM;
if (ijk[1] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) {
mask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + LeafNodeType::DIM;
if ((ijk[1] != (LeafNodeType::DIM - 1)) && mask.isOn(npos)
&& std::abs(data[npos]) > dist)
{
mask.setOff(npos);
indexList.push_back(npos);
}
npos = pos - LeafNodeType::DIM * LeafNodeType::DIM;
if (ijk[0] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) {
mask.setOff(npos);
indexList.push_back(npos);
}
npos = pos + LeafNodeType::DIM * LeafNodeType::DIM;
if ((ijk[0] != (LeafNodeType::DIM - 1)) && mask.isOn(npos)
&& std::abs(data[npos]) > dist)
{
mask.setOff(npos);
indexList.push_back(npos);
}
} // end flood fill loop
} // end range loop
}
TreeType const * const mTree;
BoolLeafNodeType ** const mMaskNodes;
}; // FillLeafNodeVoxels
template<typename TreeType>
struct ExpandNarrowbandMask
{
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
using BoolTreeTypePtr = typename BoolTreeType::Ptr;
ExpandNarrowbandMask(const TreeType& tree, std::vector<BoolTreeTypePtr>& segments)
: mTree(&tree), mSegments(!segments.empty() ? &segments.front() : nullptr)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
const TreeType& distTree = *mTree;
std::vector<BoolLeafNodeType*> nodes;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
BoolTreeType& narrowBandMask = *mSegments[n];
BoolTreeType candidateMask(narrowBandMask, false, TopologyCopy());
while (true) {
nodes.clear();
candidateMask.getNodes(nodes);
if (nodes.empty()) break;
const tbb::blocked_range<size_t> nodeRange(0, nodes.size());
tbb::parallel_for(nodeRange, FillLeafNodeVoxels<TreeType>(distTree, nodes));
narrowBandMask.topologyUnion(candidateMask);
ExpandLeafNodeRegion<TreeType> op(distTree, narrowBandMask, nodes);
tbb::parallel_reduce(nodeRange, op);
if (op.newMaskTree().empty()) break;
candidateMask.clear();
candidateMask.merge(op.newMaskTree());
} // end expand loop
} // end range loop
}
TreeType const * const mTree;
BoolTreeTypePtr * const mSegments;
}; // ExpandNarrowbandMask
template<typename TreeType>
struct FloodFillSign
{
using TreeTypePtr = typename TreeType::Ptr;
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using RootNodeType = typename TreeType::RootNodeType;
using NodeChainType = typename RootNodeType::NodeChainType;
using InternalNodeType = typename NodeChainType::template Get<1>;
FloodFillSign(const TreeType& tree, std::vector<TreeTypePtr>& segments)
: mTree(&tree)
, mSegments(!segments.empty() ? &segments.front() : nullptr)
, mMinValue(ValueType(0.0))
{
ValueType minSDFValue = std::numeric_limits<ValueType>::max();
{
std::vector<const InternalNodeType*> nodes;
tree.getNodes(nodes);
if (!nodes.empty()) {
FindMinTileValue<InternalNodeType> minOp(nodes.data());
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp);
minSDFValue = std::min(minSDFValue, minOp.minValue);
}
}
if (minSDFValue > ValueType(0.0)) {
std::vector<const LeafNodeType*> nodes;
tree.getNodes(nodes);
if (!nodes.empty()) {
FindMinVoxelValue<LeafNodeType> minOp(nodes.data());
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp);
minSDFValue = std::min(minSDFValue, minOp.minValue);
}
}
mMinValue = minSDFValue;
}
void operator()(const tbb::blocked_range<size_t>& range) const {
const ValueType interiorValue = -std::abs(mMinValue);
const ValueType exteriorValue = std::abs(mTree->background());
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
tools::signedFloodFillWithValues(*mSegments[n], exteriorValue, interiorValue);
}
}
private:
TreeType const * const mTree;
TreeTypePtr * const mSegments;
ValueType mMinValue;
}; // FloodFillSign
template<typename TreeType>
struct MaskedCopy
{
using TreeTypePtr = typename TreeType::Ptr;
using ValueType = typename TreeType::ValueType;
using LeafNodeType = typename TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolTreeTypePtr = typename BoolTreeType::Ptr;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MaskedCopy(const TreeType& tree, std::vector<TreeTypePtr>& segments,
std::vector<BoolTreeTypePtr>& masks)
: mTree(&tree)
, mSegments(!segments.empty() ? &segments.front() : nullptr)
, mMasks(!masks.empty() ? &masks.front() : nullptr)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
std::vector<const BoolLeafNodeType*> nodes;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const BoolTreeType& mask = *mMasks[n];
nodes.clear();
mask.getNodes(nodes);
Copy op(*mTree, nodes);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op);
mSegments[n] = op.outputTree();
}
}
private:
struct Copy {
Copy(const TreeType& inputTree, std::vector<const BoolLeafNodeType*>& maskNodes)
: mInputTree(&inputTree)
, mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr)
, mOutputTreePtr(new TreeType(inputTree.background()))
{
}
Copy(const Copy& rhs, tbb::split)
: mInputTree(rhs.mInputTree)
, mMaskNodes(rhs.mMaskNodes)
, mOutputTreePtr(new TreeType(mInputTree->background()))
{
}
TreeTypePtr& outputTree() { return mOutputTreePtr; }
void join(Copy& rhs) { mOutputTreePtr->merge(*rhs.mOutputTreePtr); }
void operator()(const tbb::blocked_range<size_t>& range) {
tree::ValueAccessor<const TreeType> inputAcc(*mInputTree);
tree::ValueAccessor<TreeType> outputAcc(*mOutputTreePtr);
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const BoolLeafNodeType& maskNode = *mMaskNodes[n];
if (maskNode.isEmpty()) continue;
const Coord& ijk = maskNode.origin();
const LeafNodeType* inputNode = inputAcc.probeConstLeaf(ijk);
if (inputNode) {
LeafNodeType* outputNode = outputAcc.touchLeaf(ijk);
for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn();
it; ++it)
{
const Index idx = it.pos();
outputNode->setValueOn(idx, inputNode->getValue(idx));
}
} else {
const int valueDepth = inputAcc.getValueDepth(ijk);
if (valueDepth >= 0) {
outputAcc.addTile(TreeType::RootNodeType::LEVEL - valueDepth,
ijk, inputAcc.getValue(ijk), true);
}
}
}
}
private:
TreeType const * const mInputTree;
BoolLeafNodeType const * const * const mMaskNodes;
TreeTypePtr mOutputTreePtr;
}; // struct Copy
TreeType const * const mTree;
TreeTypePtr * const mSegments;
BoolTreeTypePtr * const mMasks;
}; // MaskedCopy
////////////////////////////////////////
template<typename VolumePtrType>
struct ComputeActiveVoxelCount
{
ComputeActiveVoxelCount(std::vector<VolumePtrType>& segments, size_t *countArray)
: mSegments(!segments.empty() ? &segments.front() : nullptr)
, mCountArray(countArray)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mCountArray[n] = mSegments[n]->activeVoxelCount();
}
}
VolumePtrType * const mSegments;
size_t * const mCountArray;
};
struct GreaterCount
{
GreaterCount(const size_t *countArray) : mCountArray(countArray) {}
inline bool operator() (const size_t& lhs, const size_t& rhs) const
{
return (mCountArray[lhs] > mCountArray[rhs]);
}
size_t const * const mCountArray;
};
////////////////////////////////////////
template<typename TreeType>
struct GridOrTreeConstructor
{
using TreeTypePtr = typename TreeType::Ptr;
using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr;
static BoolTreePtrType constructMask(const TreeType&, BoolTreePtrType& maskTree)
{ return maskTree; }
static TreeTypePtr construct(const TreeType&, TreeTypePtr& tree) { return tree; }
};
template<typename TreeType>
struct GridOrTreeConstructor<Grid<TreeType> >
{
using GridType = Grid<TreeType>;
using GridTypePtr = typename Grid<TreeType>::Ptr;
using TreeTypePtr = typename TreeType::Ptr;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolTreePtrType = typename BoolTreeType::Ptr;
using BoolGridType = Grid<BoolTreeType>;
using BoolGridPtrType = typename BoolGridType::Ptr;
static BoolGridPtrType constructMask(const GridType& grid, BoolTreePtrType& maskTree) {
BoolGridPtrType maskGrid(BoolGridType::create(maskTree));
maskGrid->setTransform(grid.transform().copy());
return maskGrid;
}
static GridTypePtr construct(const GridType& grid, TreeTypePtr& maskTree) {
GridTypePtr maskGrid(GridType::create(maskTree));
maskGrid->setTransform(grid.transform().copy());
maskGrid->insertMeta(grid);
return maskGrid;
}
};
} // namespace level_set_util_internal
////////////////////////////////////////
template <class GridType>
inline void
sdfToFogVolume(GridType& grid, typename GridType::ValueType cutoffDistance)
{
using ValueType = typename GridType::ValueType;
using TreeType = typename GridType::TreeType;
using LeafNodeType = typename TreeType::LeafNodeType;
using RootNodeType = typename TreeType::RootNodeType;
using NodeChainType = typename RootNodeType::NodeChainType;
using InternalNodeType = typename NodeChainType::template Get<1>;
//////////
TreeType& tree = grid.tree();
size_t numLeafNodes = 0, numInternalNodes = 0;
std::vector<LeafNodeType*> nodes;
std::vector<size_t> leafnodeCount;
{
// Compute the prefix sum of the leafnode count in each internal node.
std::vector<InternalNodeType*> internalNodes;
tree.getNodes(internalNodes);
numInternalNodes = internalNodes.size();
leafnodeCount.push_back(0);
for (size_t n = 0; n < numInternalNodes; ++n) {
leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount());
}
numLeafNodes = leafnodeCount.back();
// Steal all leafnodes (Removes them from the tree and transfers ownership.)
nodes.reserve(numLeafNodes);
for (size_t n = 0; n < numInternalNodes; ++n) {
internalNodes[n]->stealNodes(nodes, tree.background(), false);
}
// Clamp cutoffDistance to min sdf value
ValueType minSDFValue = std::numeric_limits<ValueType>::max();
{
level_set_util_internal::FindMinTileValue<InternalNodeType> minOp(internalNodes.data());
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), minOp);
minSDFValue = std::min(minSDFValue, minOp.minValue);
}
if (minSDFValue > ValueType(0.0)) {
level_set_util_internal::FindMinVoxelValue<LeafNodeType> minOp(nodes.data());
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp);
minSDFValue = std::min(minSDFValue, minOp.minValue);
}
cutoffDistance = -std::abs(cutoffDistance);
cutoffDistance = minSDFValue > cutoffDistance ? minSDFValue : cutoffDistance;
}
// Transform voxel values and delete leafnodes that are uniformly zero after the transformation.
// (Positive values are set to zero with inactive state and negative values are remapped
// from zero to one with active state.)
tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()),
level_set_util_internal::SDFVoxelsToFogVolume<LeafNodeType>(nodes.data(), cutoffDistance));
// Populate a new tree with the remaining leafnodes
typename TreeType::Ptr newTree(new TreeType(ValueType(0.0)));
level_set_util_internal::PopulateTree<TreeType> populate(
*newTree, nodes.data(), leafnodeCount.data(), 0);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate);
// Transform tile values (Negative valued tiles are set to 1.0 with active state.)
std::vector<InternalNodeType*> internalNodes;
newTree->getNodes(internalNodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, internalNodes.size()),
level_set_util_internal::SDFTilesToFogVolume<TreeType, InternalNodeType>(
tree, internalNodes.data()));
{
tree::ValueAccessor<const TreeType> acc(tree);
typename TreeType::ValueAllIter it(*newTree);
it.setMaxDepth(TreeType::ValueAllIter::LEAF_DEPTH - 2);
for ( ; it; ++it) {
if (acc.getValue(it.getCoord()) < ValueType(0.0)) {
it.setValue(ValueType(1.0));
it.setActiveState(true);
}
}
}
// Insert missing root level tiles. (The new tree is constructed from the remaining leafnodes
// and will therefore not contain any root level tiles that may exist in the original tree.)
{
typename TreeType::ValueAllIter it(tree);
it.setMaxDepth(TreeType::ValueAllIter::ROOT_DEPTH);
for ( ; it; ++it) {
if (it.getValue() < ValueType(0.0)) {
newTree->addTile(TreeType::ValueAllIter::ROOT_LEVEL, it.getCoord(),
ValueType(1.0), true);
}
}
}
grid.setTree(newTree);
grid.setGridClass(GRID_FOG_VOLUME);
}
////////////////////////////////////////
template <class GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
sdfInteriorMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume);
using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr;
BoolTreePtrType mask = level_set_util_internal::computeInteriorMask(tree, isovalue);
return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask(
volume, mask);
}
template<typename GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
extractEnclosedRegion(const GridOrTreeType& volume,
typename GridOrTreeType::ValueType isovalue,
const typename TreeAdapter<GridOrTreeType>::TreeType::template ValueConverter<bool>::Type*
fillMask)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume);
using CharTreePtrType = typename TreeType::template ValueConverter<char>::Type::Ptr;
CharTreePtrType regionMask = level_set_util_internal::computeEnclosedRegionMask(
tree, isovalue, fillMask);
using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr;
BoolTreePtrType mask = level_set_util_internal::computeInteriorMask(*regionMask, 0);
return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask(
volume, mask);
}
////////////////////////////////////////
template<typename GridOrTreeType>
inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr
extractIsosurfaceMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume);
std::vector<const typename TreeType::LeafNodeType*> nodes;
tree.getNodes(nodes);
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
typename BoolTreeType::Ptr mask(new BoolTreeType(false));
level_set_util_internal::MaskIsovalueCrossingVoxels<TreeType> op(tree, nodes, *mask, isovalue);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op);
return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask(
volume, mask);
}
////////////////////////////////////////
template<typename GridOrTreeType>
inline void
extractActiveVoxelSegmentMasks(const GridOrTreeType& volume,
std::vector<typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr>& masks)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolTreePtrType = typename BoolTreeType::Ptr;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
using NodeMaskSegmentType = level_set_util_internal::NodeMaskSegment<BoolLeafNodeType>;
using NodeMaskSegmentPtrType = typename NodeMaskSegmentType::Ptr;
using NodeMaskSegmentPtrVector = typename std::vector<NodeMaskSegmentPtrType>;
using NodeMaskSegmentRawPtrVector = typename std::vector<NodeMaskSegmentType*>;
/////
const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume);
BoolTreeType topologyMask(tree, false, TopologyCopy());
// prune out any inactive leaf nodes or inactive tiles
tools::pruneInactive(topologyMask);
if (topologyMask.hasActiveTiles()) {
topologyMask.voxelizeActiveTiles();
}
std::vector<BoolLeafNodeType*> leafnodes;
topologyMask.getNodes(leafnodes);
if (leafnodes.empty()) return;
// 1. Split node masks into disjoint segments
// Note: The LeafNode origin coord is modified to record the 'leafnodes' array offset.
std::unique_ptr<NodeMaskSegmentPtrVector[]> nodeSegmentArray(
new NodeMaskSegmentPtrVector[leafnodes.size()]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()),
level_set_util_internal::SegmentNodeMask<BoolLeafNodeType>(
leafnodes, nodeSegmentArray.get()));
// 2. Compute segment connectivity
tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()),
level_set_util_internal::ConnectNodeMaskSegments<BoolTreeType, BoolLeafNodeType>(
topologyMask, nodeSegmentArray.get()));
topologyMask.clear();
size_t nodeSegmentCount = 0;
for (size_t n = 0, N = leafnodes.size(); n < N; ++n) {
nodeSegmentCount += nodeSegmentArray[n].size();
}
// 3. Group connected segments
std::deque<NodeMaskSegmentRawPtrVector> nodeSegmentGroups;
NodeMaskSegmentType* nextSegment = nodeSegmentArray[0][0].get();
while (nextSegment) {
nodeSegmentGroups.push_back(NodeMaskSegmentRawPtrVector());
std::vector<NodeMaskSegmentType*>& segmentGroup = nodeSegmentGroups.back();
segmentGroup.reserve(nodeSegmentCount);
std::deque<NodeMaskSegmentType*> segmentQueue;
segmentQueue.push_back(nextSegment);
nextSegment = nullptr;
while (!segmentQueue.empty()) {
NodeMaskSegmentType* segment = segmentQueue.back();
segmentQueue.pop_back();
if (segment->visited) continue;
segment->visited = true;
segmentGroup.push_back(segment);
// queue connected segments
std::vector<NodeMaskSegmentType*>& connections = segment->connections;
for (size_t n = 0, N = connections.size(); n < N; ++n) {
if (!connections[n]->visited) segmentQueue.push_back(connections[n]);
}
}
// find first unvisited segment
for (size_t n = 0, N = leafnodes.size(); n < N; ++n) {
NodeMaskSegmentPtrVector& nodeSegments = nodeSegmentArray[n];
for (size_t i = 0, I = nodeSegments.size(); i < I; ++i) {
if (!nodeSegments[i]->visited) nextSegment = nodeSegments[i].get();
}
}
}
// 4. Mask segment groups
if (nodeSegmentGroups.size() == 1) {
BoolTreePtrType mask(new BoolTreeType(tree, false, TopologyCopy()));
tools::pruneInactive(*mask);
if (mask->hasActiveTiles()) {
mask->voxelizeActiveTiles();
}
masks.push_back(
level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask(
volume, mask));
} else if (nodeSegmentGroups.size() > 1) {
for (size_t n = 0, N = nodeSegmentGroups.size(); n < N; ++n) {
NodeMaskSegmentRawPtrVector& segmentGroup = nodeSegmentGroups[n];
level_set_util_internal::MaskSegmentGroup<BoolTreeType> op(segmentGroup);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, segmentGroup.size()), op);
masks.push_back(
level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask(
volume, op.mask()));
}
}
// 5. Sort segments in descending order based on the active voxel count.
if (masks.size() > 1) {
const size_t segmentCount = masks.size();
std::unique_ptr<size_t[]> segmentOrderArray(new size_t[segmentCount]);
std::unique_ptr<size_t[]> voxelCountArray(new size_t[segmentCount]);
for (size_t n = 0; n < segmentCount; ++n) {
segmentOrderArray[n] = n;
}
tbb::parallel_for(tbb::blocked_range<size_t>(0, segmentCount),
level_set_util_internal::ComputeActiveVoxelCount<BoolTreePtrType>(
masks, voxelCountArray.get()));
size_t *begin = segmentOrderArray.get();
tbb::parallel_sort(begin, begin + masks.size(), level_set_util_internal::GreaterCount(
voxelCountArray.get()));
std::vector<BoolTreePtrType> orderedMasks;
orderedMasks.reserve(masks.size());
for (size_t n = 0; n < segmentCount; ++n) {
orderedMasks.push_back(masks[segmentOrderArray[n]]);
}
masks.swap(orderedMasks);
}
} // extractActiveVoxelSegmentMasks()
template<typename GridOrTreeType>
inline void
segmentActiveVoxels(const GridOrTreeType& volume,
std::vector<typename GridOrTreeType::Ptr>& segments)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
using TreePtrType = typename TreeType::Ptr;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolTreePtrType = typename BoolTreeType::Ptr;
const TreeType& inputTree = TreeAdapter<GridOrTreeType>::tree(volume);
// 1. Segment active topology mask
std::vector<BoolTreePtrType> maskSegmentArray;
extractActiveVoxelSegmentMasks(inputTree, maskSegmentArray);
// 2. Export segments
const size_t numSegments = std::max(size_t(1), maskSegmentArray.size());
std::vector<TreePtrType> outputSegmentArray(numSegments);
if (maskSegmentArray.empty()) {
// if no active voxels in the original volume, copy just the background
// value of the input tree
outputSegmentArray[0] = TreePtrType(new TreeType(inputTree.background()));
} else if (numSegments == 1) {
// if there's only one segment with active voxels, copy the input tree
TreePtrType segment(new TreeType(inputTree));
// however, if the leaf counts do not match due to the pruning of inactive leaf
// nodes in the mask, do a topology intersection to drop these inactive leafs
if (segment->leafCount() != inputTree.leafCount()) {
segment->topologyIntersection(*maskSegmentArray[0]);
}
outputSegmentArray[0] = segment;
} else {
const tbb::blocked_range<size_t> segmentRange(0, numSegments);
tbb::parallel_for(segmentRange,
level_set_util_internal::MaskedCopy<TreeType>(inputTree, outputSegmentArray,
maskSegmentArray));
}
for (auto& segment : outputSegmentArray) {
segments.push_back(
level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::construct(
volume, segment));
}
}
template<typename GridOrTreeType>
inline void
segmentSDF(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments)
{
using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType;
using TreePtrType = typename TreeType::Ptr;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
using BoolTreePtrType = typename BoolTreeType::Ptr;
const TreeType& inputTree = TreeAdapter<GridOrTreeType>::tree(volume);
// 1. Mask zero crossing voxels
BoolTreePtrType mask = extractIsosurfaceMask(inputTree, lsutilGridZero<GridOrTreeType>());
// 2. Segment the zero crossing mask
std::vector<BoolTreePtrType> maskSegmentArray;
extractActiveVoxelSegmentMasks(*mask, maskSegmentArray);
const size_t numSegments = std::max(size_t(1), maskSegmentArray.size());
std::vector<TreePtrType> outputSegmentArray(numSegments);
if (maskSegmentArray.empty()) {
// if no active voxels in the original volume, copy just the background
// value of the input tree
outputSegmentArray[0] = TreePtrType(new TreeType(inputTree.background()));
} else {
const tbb::blocked_range<size_t> segmentRange(0, numSegments);
// 3. Expand zero crossing mask to capture sdf narrow band
tbb::parallel_for(segmentRange,
level_set_util_internal::ExpandNarrowbandMask<TreeType>(inputTree, maskSegmentArray));
// 4. Export sdf segments
tbb::parallel_for(segmentRange, level_set_util_internal::MaskedCopy<TreeType>(
inputTree, outputSegmentArray, maskSegmentArray));
tbb::parallel_for(segmentRange,
level_set_util_internal::FloodFillSign<TreeType>(inputTree, outputSegmentArray));
}
for (auto& segment : outputSegmentArray) {
segments.push_back(
level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::construct(
volume, segment));
}
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED
| 94,368 | C | 35.337697 | 100 | 0.589151 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Filter.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth
///
/// @file tools/Filter.h
///
/// @brief Filtering of VDB volumes. Note that only the values in the
/// grid are changed, not its topology! All operations can optionally
/// be masked with another grid that acts as an alpha-mask.
#ifndef OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED
#include <tbb/parallel_for.h>
#include <openvdb/Types.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Stencils.h>
#include <openvdb/math/Transform.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/util/NullInterrupter.h>
#include <openvdb/Grid.h>
#include "Interpolation.h"
#include <algorithm> // for std::max()
#include <functional>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Volume filtering (e.g., diffusion) with optional alpha masking
///
/// @note Only the values in the grid are changed, not its topology!
template<typename GridT,
typename MaskT = typename GridT::template ValueConverter<float>::Type,
typename InterruptT = util::NullInterrupter>
class Filter
{
public:
using GridType = GridT;
using MaskType = MaskT;
using TreeType = typename GridType::TreeType;
using LeafType = typename TreeType::LeafNodeType;
using ValueType = typename GridType::ValueType;
using AlphaType = typename MaskType::ValueType;
using LeafManagerType = typename tree::LeafManager<TreeType>;
using RangeType = typename LeafManagerType::LeafRange;
using BufferType = typename LeafManagerType::BufferType;
static_assert(std::is_floating_point<AlphaType>::value,
"openvdb::tools::Filter requires a mask grid with floating-point values");
/// Constructor
/// @param grid Grid to be filtered.
/// @param interrupt Optional interrupter.
Filter(GridT& grid, InterruptT* interrupt = nullptr)
: mGrid(&grid)
, mTask(nullptr)
, mInterrupter(interrupt)
, mMask(nullptr)
, mGrainSize(1)
, mMinMask(0)
, mMaxMask(1)
, mInvertMask(false)
{
}
/// @brief Shallow copy constructor called by tbb::parallel_for()
/// threads during filtering.
/// @param other The other Filter from which to copy.
Filter(const Filter& other)
: mGrid(other.mGrid)
, mTask(other.mTask)
, mInterrupter(other.mInterrupter)
, mMask(other.mMask)
, mGrainSize(other.mGrainSize)
, mMinMask(other.mMinMask)
, mMaxMask(other.mMaxMask)
, mInvertMask(other.mInvertMask)
{
}
/// @return the grain-size used for multi-threading
int getGrainSize() const { return mGrainSize; }
/// @brief Set the grain-size used for multi-threading.
/// @note A grain size of 0 or less disables multi-threading!
void setGrainSize(int grainsize) { mGrainSize = grainsize; }
/// @brief Return the minimum value of the mask to be used for the
/// derivation of a smooth alpha value.
AlphaType minMask() const { return mMinMask; }
/// @brief Return the maximum value of the mask to be used for the
/// derivation of a smooth alpha value.
AlphaType maxMask() const { return mMaxMask; }
/// @brief Define the range for the (optional) scalar mask.
/// @param min Minimum value of the range.
/// @param max Maximum value of the range.
/// @details Mask values outside the range are clamped to zero or one, and
/// values inside the range map smoothly to 0->1 (unless the mask is inverted).
/// @throw ValueError if @a min is not smaller than @a max.
void setMaskRange(AlphaType min, AlphaType max)
{
if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)");
mMinMask = min;
mMaxMask = max;
}
/// @brief Return true if the mask is inverted, i.e. min->max in the
/// original mask maps to 1->0 in the inverted alpha mask.
bool isMaskInverted() const { return mInvertMask; }
/// @brief Invert the optional mask, i.e. min->max in the original
/// mask maps to 1->0 in the inverted alpha mask.
void invertMask(bool invert=true) { mInvertMask = invert; }
/// @brief One iteration of a fast separable mean-value (i.e. box) filter.
/// @param width The width of the mean-value filter is 2*width+1 voxels.
/// @param iterations Number of times the mean-value filter is applied.
/// @param mask Optional alpha mask.
void mean(int width = 1, int iterations = 1, const MaskType* mask = nullptr);
/// @brief One iteration of a fast separable Gaussian filter.
///
/// @note This is approximated as 4 iterations of a separable mean filter
/// which typically leads an approximation that's better than 95%!
/// @param width The width of the mean-value filter is 2*width+1 voxels.
/// @param iterations Number of times the mean-value filter is applied.
/// @param mask Optional alpha mask.
void gaussian(int width = 1, int iterations = 1, const MaskType* mask = nullptr);
/// @brief One iteration of a median-value filter
///
/// @note This filter is not separable and is hence relatively slow!
/// @param width The width of the mean-value filter is 2*width+1 voxels.
/// @param iterations Number of times the mean-value filter is applied.
/// @param mask Optional alpha mask.
void median(int width = 1, int iterations = 1, const MaskType* mask = nullptr);
/// Offsets (i.e. adds) a constant value to all active voxels.
/// @param offset Offset in the same units as the grid.
/// @param mask Optional alpha mask.
void offset(ValueType offset, const MaskType* mask = nullptr);
/// @brief Used internally by tbb::parallel_for()
/// @param range Range of LeafNodes over which to multi-thread.
///
/// @warning Never call this method directly!
void operator()(const RangeType& range) const
{
if (mTask) mTask(const_cast<Filter*>(this), range);
else OPENVDB_THROW(ValueError, "task is undefined - call median(), mean(), etc.");
}
private:
using LeafT = typename TreeType::LeafNodeType;
using VoxelIterT = typename LeafT::ValueOnIter;
using VoxelCIterT = typename LeafT::ValueOnCIter;
using BufferT = typename tree::LeafManager<TreeType>::BufferType;
using LeafIterT = typename RangeType::Iterator;
using AlphaMaskT = tools::AlphaMask<GridT, MaskT>;
void cook(LeafManagerType& leafs);
template<size_t Axis>
struct Avg {
Avg(const GridT* grid, Int32 w): acc(grid->tree()), width(w), frac(1.f/float(2*w+1)) {}
inline ValueType operator()(Coord xyz);
typename GridT::ConstAccessor acc;
const Int32 width;
const float frac;
};
// Private filter methods called by tbb::parallel_for threads
template <typename AvgT>
void doBox( const RangeType& r, Int32 w);
void doBoxX(const RangeType& r, Int32 w) { this->doBox<Avg<0> >(r,w); }
void doBoxY(const RangeType& r, Int32 w) { this->doBox<Avg<1> >(r,w); }
void doBoxZ(const RangeType& r, Int32 w) { this->doBox<Avg<2> >(r,w); }
void doMedian(const RangeType&, int);
void doOffset(const RangeType&, ValueType);
/// @return true if the process was interrupted
bool wasInterrupted();
GridType* mGrid;
typename std::function<void (Filter*, const RangeType&)> mTask;
InterruptT* mInterrupter;
const MaskType* mMask;
int mGrainSize;
AlphaType mMinMask, mMaxMask;
bool mInvertMask;
}; // end of Filter class
////////////////////////////////////////
namespace filter_internal {
// Helper function for Filter::Avg::operator()
template<typename T> static inline void accum(T& sum, T addend) { sum += addend; }
// Overload for bool ValueType
inline void accum(bool& sum, bool addend) { sum = sum || addend; }
}
template<typename GridT, typename MaskT, typename InterruptT>
template<size_t Axis>
inline typename GridT::ValueType
Filter<GridT, MaskT, InterruptT>::Avg<Axis>::operator()(Coord xyz)
{
ValueType sum = zeroVal<ValueType>();
Int32 &i = xyz[Axis], j = i + width;
for (i -= width; i <= j; ++i) filter_internal::accum(sum, acc.getValue(xyz));
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
ValueType value = static_cast<ValueType>(sum * frac);
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return value;
}
////////////////////////////////////////
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::mean(int width, int iterations, const MaskType* mask)
{
mMask = mask;
if (mInterrupter) mInterrupter->start("Applying mean filter");
const int w = std::max(1, width);
LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0);
for (int i=0; i<iterations && !this->wasInterrupted(); ++i) {
mTask = std::bind(&Filter::doBoxX, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
// note that the order of the YZ passes are flipped to maintain backwards-compatibility
// with an indexing typo in the original logic
mTask = std::bind(&Filter::doBoxZ, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
mTask = std::bind(&Filter::doBoxY, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
}
if (mInterrupter) mInterrupter->end();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::gaussian(int width, int iterations, const MaskType* mask)
{
mMask = mask;
if (mInterrupter) mInterrupter->start("Applying Gaussian filter");
const int w = std::max(1, width);
LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0);
for (int i=0; i<iterations; ++i) {
for (int n=0; n<4 && !this->wasInterrupted(); ++n) {
mTask = std::bind(&Filter::doBoxX, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
// note that the order of the YZ passes are flipped to maintain backwards-compatibility
// with an indexing typo in the original logic
mTask = std::bind(&Filter::doBoxZ, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
mTask = std::bind(&Filter::doBoxY, std::placeholders::_1, std::placeholders::_2, w);
this->cook(leafs);
}
}
if (mInterrupter) mInterrupter->end();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::median(int width, int iterations, const MaskType* mask)
{
mMask = mask;
if (mInterrupter) mInterrupter->start("Applying median filter");
LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0);
mTask = std::bind(&Filter::doMedian,
std::placeholders::_1, std::placeholders::_2, std::max(1, width));
for (int i=0; i<iterations && !this->wasInterrupted(); ++i) this->cook(leafs);
if (mInterrupter) mInterrupter->end();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::offset(ValueType value, const MaskType* mask)
{
mMask = mask;
if (mInterrupter) mInterrupter->start("Applying offset");
LeafManagerType leafs(mGrid->tree(), 0, mGrainSize==0);
mTask = std::bind(&Filter::doOffset, std::placeholders::_1, std::placeholders::_2, value);
this->cook(leafs);
if (mInterrupter) mInterrupter->end();
}
////////////////////////////////////////
/// Private method to perform the task (serial or threaded) and
/// subsequently swap the leaf buffers.
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::cook(LeafManagerType& leafs)
{
if (mGrainSize>0) {
tbb::parallel_for(leafs.leafRange(mGrainSize), *this);
} else {
(*this)(leafs.leafRange());
}
leafs.swapLeafBuffer(1, mGrainSize==0);
}
/// One dimensional convolution of a separable box filter
template<typename GridT, typename MaskT, typename InterruptT>
template <typename AvgT>
inline void
Filter<GridT, MaskT, InterruptT>::doBox(const RangeType& range, Int32 w)
{
this->wasInterrupted();
AvgT avg(mGrid, w);
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask);
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
BufferT& buffer = leafIter.buffer(1);
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
const Coord xyz = iter.getCoord();
if (alpha(xyz, a, b)) {
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
const ValueType value(b*(*iter) + a*avg(xyz));
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
buffer.setValue(iter.pos(), value);
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
BufferT& buffer = leafIter.buffer(1);
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
buffer.setValue(iter.pos(), avg(iter.getCoord()));
}
}
}
}
/// Performs simple but slow median-value diffusion
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::doMedian(const RangeType& range, int width)
{
this->wasInterrupted();
typename math::DenseStencil<GridType> stencil(*mGrid, width);//creates local cache!
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask);
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
BufferT& buffer = leafIter.buffer(1);
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) {
stencil.moveTo(iter);
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
ValueType value(b*(*iter) + a*stencil.median());
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
buffer.setValue(iter.pos(), value);
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
BufferT& buffer = leafIter.buffer(1);
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
stencil.moveTo(iter);
buffer.setValue(iter.pos(), stencil.median());
}
}
}
}
/// Offsets the values by a constant
template<typename GridT, typename MaskT, typename InterruptT>
inline void
Filter<GridT, MaskT, InterruptT>::doOffset(const RangeType& range, ValueType offset)
{
this->wasInterrupted();
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask);
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) {
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
ValueType value(*iter + a*offset);
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
iter.setValue(value);
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) {
iter.setValue(*iter + offset);
}
}
}
}
template<typename GridT, typename MaskT, typename InterruptT>
inline bool
Filter<GridT, MaskT, InterruptT>::wasInterrupted()
{
if (util::wasInterrupted(mInterrupter)) {
tbb::task::self().cancel_group_execution();
return true;
}
return false;
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED
| 16,322 | C | 35.112832 | 99 | 0.641037 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetMorph.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @author Ken Museth
///
/// @file tools/LevelSetMorph.h
///
/// @brief Shape morphology of level sets. Morphing from a source
/// narrow-band level sets to a target narrow-band level set.
#ifndef OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED
#include "LevelSetTracker.h"
#include "Interpolation.h" // for BoxSampler, etc.
#include <openvdb/math/FiniteDifference.h>
#include <functional>
#include <limits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Shape morphology of level sets. Morphing from a source
/// narrow-band level sets to a target narrow-band level set.
///
/// @details
/// The @c InterruptType template argument below refers to any class
/// with the following interface:
/// @code
/// class Interrupter {
/// ...
/// public:
/// void start(const char* name = nullptr) // called when computations begin
/// void end() // called when computations end
/// bool wasInterrupted(int percent=-1) // return true to break computation
/// };
/// @endcode
///
/// @note If no template argument is provided for this InterruptType,
/// the util::NullInterrupter is used, which implies that all interrupter
/// calls are no-ops (i.e., they incur no computational overhead).
template<typename GridT,
typename InterruptT = util::NullInterrupter>
class LevelSetMorphing
{
public:
using GridType = GridT;
using TreeType = typename GridT::TreeType;
using TrackerT = LevelSetTracker<GridT, InterruptT>;
using LeafRange = typename TrackerT::LeafRange;
using LeafType = typename TrackerT::LeafType;
using BufferType = typename TrackerT::BufferType;
using ValueType = typename TrackerT::ValueType;
/// Main constructor
LevelSetMorphing(GridT& sourceGrid, const GridT& targetGrid, InterruptT* interrupt = nullptr)
: mTracker(sourceGrid, interrupt)
, mTarget(&targetGrid)
, mMask(nullptr)
, mSpatialScheme(math::HJWENO5_BIAS)
, mTemporalScheme(math::TVD_RK2)
, mMinMask(0)
, mDeltaMask(1)
, mInvertMask(false)
{
}
virtual ~LevelSetMorphing() {}
/// Redefine the target level set
void setTarget(const GridT& targetGrid) { mTarget = &targetGrid; }
/// Define the alpha mask
void setAlphaMask(const GridT& maskGrid) { mMask = &maskGrid; }
/// Return the spatial finite-difference scheme
math::BiasedGradientScheme getSpatialScheme() const { return mSpatialScheme; }
/// Set the spatial finite-difference scheme
void setSpatialScheme(math::BiasedGradientScheme scheme) { mSpatialScheme = scheme; }
/// Return the temporal integration scheme
math::TemporalIntegrationScheme getTemporalScheme() const { return mTemporalScheme; }
/// Set the temporal integration scheme
void setTemporalScheme(math::TemporalIntegrationScheme scheme) { mTemporalScheme = scheme; }
/// Return the spatial finite-difference scheme
math::BiasedGradientScheme getTrackerSpatialScheme() const
{
return mTracker.getSpatialScheme();
}
/// Set the spatial finite-difference scheme
void setTrackerSpatialScheme(math::BiasedGradientScheme scheme)
{
mTracker.setSpatialScheme(scheme);
}
/// Return the temporal integration scheme
math::TemporalIntegrationScheme getTrackerTemporalScheme() const
{
return mTracker.getTemporalScheme();
}
/// Set the temporal integration scheme
void setTrackerTemporalScheme(math::TemporalIntegrationScheme scheme)
{
mTracker.setTemporalScheme(scheme);
}
/// Return the number of normalizations performed per track or normalize call.
int getNormCount() const { return mTracker.getNormCount(); }
/// Set the number of normalizations performed per track or normalize call.
void setNormCount(int n) { mTracker.setNormCount(n); }
/// Return the grain size used for multithreading
int getGrainSize() const { return mTracker.getGrainSize(); }
/// @brief Set the grain size used for multithreading.
/// @note A grain size of 0 or less disables multithreading!
void setGrainSize(int grainsize) { mTracker.setGrainSize(grainsize); }
/// @brief Return the minimum value of the mask to be used for the
/// derivation of a smooth alpha value.
ValueType minMask() const { return mMinMask; }
/// @brief Return the maximum value of the mask to be used for the
/// derivation of a smooth alpha value.
ValueType maxMask() const { return mDeltaMask + mMinMask; }
/// @brief Define the range for the (optional) scalar mask.
/// @param min Minimum value of the range.
/// @param max Maximum value of the range.
/// @details Mask values outside the range maps to alpha values of
/// respectfully zero and one, and values inside the range maps
/// smoothly to 0->1 (unless of course the mask is inverted).
/// @throw ValueError if @a min is not smaller than @a max.
void setMaskRange(ValueType min, ValueType max)
{
if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)");
mMinMask = min;
mDeltaMask = max-min;
}
/// @brief Return true if the mask is inverted, i.e. min->max in the
/// original mask maps to 1->0 in the inverted alpha mask.
bool isMaskInverted() const { return mInvertMask; }
/// @brief Invert the optional mask, i.e. min->max in the original
/// mask maps to 1->0 in the inverted alpha mask.
void invertMask(bool invert=true) { mInvertMask = invert; }
/// @brief Advect the level set from its current time, @a time0, to its
/// final time, @a time1. If @a time0 > @a time1, perform backward advection.
///
/// @return the number of CFL iterations used to advect from @a time0 to @a time1
size_t advect(ValueType time0, ValueType time1);
private:
// disallow copy construction and copy by assignment!
LevelSetMorphing(const LevelSetMorphing&);// not implemented
LevelSetMorphing& operator=(const LevelSetMorphing&);// not implemented
template<math::BiasedGradientScheme SpatialScheme>
size_t advect1(ValueType time0, ValueType time1);
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
size_t advect2(ValueType time0, ValueType time1);
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MapType>
size_t advect3(ValueType time0, ValueType time1);
TrackerT mTracker;
const GridT *mTarget, *mMask;
math::BiasedGradientScheme mSpatialScheme;
math::TemporalIntegrationScheme mTemporalScheme;
ValueType mMinMask, mDeltaMask;
bool mInvertMask;
// This templated private class implements all the level set magic.
template<typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
struct Morph
{
/// Main constructor
Morph(LevelSetMorphing<GridT, InterruptT>& parent);
/// Shallow copy constructor called by tbb::parallel_for() threads
Morph(const Morph& other);
/// Shallow copy constructor called by tbb::parallel_reduce() threads
Morph(Morph& other, tbb::split);
/// destructor
virtual ~Morph() {}
/// Advect the level set from its current time, time0, to its final time, time1.
/// @return number of CFL iterations
size_t advect(ValueType time0, ValueType time1);
/// Used internally by tbb::parallel_for()
void operator()(const LeafRange& r) const
{
if (mTask) mTask(const_cast<Morph*>(this), r);
else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly");
}
/// Used internally by tbb::parallel_reduce()
void operator()(const LeafRange& r)
{
if (mTask) mTask(this, r);
else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly");
}
/// This is only called by tbb::parallel_reduce() threads
void join(const Morph& other) { mMaxAbsS = math::Max(mMaxAbsS, other.mMaxAbsS); }
/// Enum to define the type of multithreading
enum ThreadingMode { PARALLEL_FOR, PARALLEL_REDUCE }; // for internal use
// method calling tbb
void cook(ThreadingMode mode, size_t swapBuffer = 0);
/// Sample field and return the CFT time step
typename GridT::ValueType sampleSpeed(ValueType time0, ValueType time1, Index speedBuffer);
void sampleXformedSpeed(const LeafRange& r, Index speedBuffer);
void sampleAlignedSpeed(const LeafRange& r, Index speedBuffer);
// Convex combination of Phi and a forward Euler advection steps:
// Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * Speed(speed)*|Grad[Phi(0)]|);
template <int Nominator, int Denominator>
void euler(const LeafRange&, ValueType, Index, Index, Index);
inline void euler01(const LeafRange& r, ValueType t, Index s) {this->euler<0,1>(r,t,0,1,s);}
inline void euler12(const LeafRange& r, ValueType t) {this->euler<1,2>(r, t, 1, 1, 2);}
inline void euler34(const LeafRange& r, ValueType t) {this->euler<3,4>(r, t, 1, 2, 3);}
inline void euler13(const LeafRange& r, ValueType t) {this->euler<1,3>(r, t, 1, 2, 3);}
using FuncType = typename std::function<void (Morph*, const LeafRange&)>;
LevelSetMorphing* mParent;
ValueType mMinAbsS, mMaxAbsS;
const MapT* mMap;
FuncType mTask;
}; // end of private Morph struct
};//end of LevelSetMorphing
template<typename GridT, typename InterruptT>
inline size_t
LevelSetMorphing<GridT, InterruptT>::advect(ValueType time0, ValueType time1)
{
switch (mSpatialScheme) {
case math::FIRST_BIAS:
return this->advect1<math::FIRST_BIAS >(time0, time1);
//case math::SECOND_BIAS:
//return this->advect1<math::SECOND_BIAS >(time0, time1);
//case math::THIRD_BIAS:
//return this->advect1<math::THIRD_BIAS >(time0, time1);
//case math::WENO5_BIAS:
//return this->advect1<math::WENO5_BIAS >(time0, time1);
case math::HJWENO5_BIAS:
return this->advect1<math::HJWENO5_BIAS>(time0, time1);
case math::SECOND_BIAS:
case math::THIRD_BIAS:
case math::WENO5_BIAS:
case math::UNKNOWN_BIAS:
default:
OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!");
}
return 0;
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme>
inline size_t
LevelSetMorphing<GridT, InterruptT>::advect1(ValueType time0, ValueType time1)
{
switch (mTemporalScheme) {
case math::TVD_RK1:
return this->advect2<SpatialScheme, math::TVD_RK1>(time0, time1);
case math::TVD_RK2:
return this->advect2<SpatialScheme, math::TVD_RK2>(time0, time1);
case math::TVD_RK3:
return this->advect2<SpatialScheme, math::TVD_RK3>(time0, time1);
case math::UNKNOWN_TIS:
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}
return 0;
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline size_t
LevelSetMorphing<GridT, InterruptT>::advect2(ValueType time0, ValueType time1)
{
const math::Transform& trans = mTracker.grid().transform();
if (trans.mapType() == math::UniformScaleMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleMap>(time0, time1);
} else if (trans.mapType() == math::UniformScaleTranslateMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleTranslateMap>(
time0, time1);
} else if (trans.mapType() == math::UnitaryMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UnitaryMap >(time0, time1);
} else if (trans.mapType() == math::TranslationMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::TranslationMap>(time0, time1);
} else {
OPENVDB_THROW(ValueError, "MapType not supported!");
}
return 0;
}
template<typename GridT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MapT>
inline size_t
LevelSetMorphing<GridT, InterruptT>::advect3(ValueType time0, ValueType time1)
{
Morph<MapT, SpatialScheme, TemporalScheme> tmp(*this);
return tmp.advect(time0, time1);
}
///////////////////////////////////////////////////////////////////////
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
Morph(LevelSetMorphing<GridT, InterruptT>& parent)
: mParent(&parent)
, mMinAbsS(ValueType(1e-6))
, mMap(parent.mTracker.grid().transform().template constMap<MapT>().get())
, mTask(nullptr)
{
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
Morph(const Morph& other)
: mParent(other.mParent)
, mMinAbsS(other.mMinAbsS)
, mMaxAbsS(other.mMaxAbsS)
, mMap(other.mMap)
, mTask(other.mTask)
{
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
Morph(Morph& other, tbb::split)
: mParent(other.mParent)
, mMinAbsS(other.mMinAbsS)
, mMaxAbsS(other.mMaxAbsS)
, mMap(other.mMap)
, mTask(other.mTask)
{
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline size_t
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
advect(ValueType time0, ValueType time1)
{
namespace ph = std::placeholders;
// Make sure we have enough temporal auxiliary buffers for the time
// integration AS WELL AS an extra buffer with the speed function!
static const Index auxBuffers = 1 + (TemporalScheme == math::TVD_RK3 ? 2 : 1);
size_t countCFL = 0;
while (time0 < time1 && mParent->mTracker.checkInterrupter()) {
mParent->mTracker.leafs().rebuildAuxBuffers(auxBuffers);
const ValueType dt = this->sampleSpeed(time0, time1, auxBuffers);
if ( math::isZero(dt) ) break;//V is essentially zero so terminate
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN //switch is resolved at compile-time
switch(TemporalScheme) {
case math::TVD_RK1:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * Speed(2) * |Grad[Phi(0)]|
mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/2);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook(PARALLEL_FOR, 1);
break;
case math::TVD_RK2:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * Speed(2) * |Grad[Phi(0)]|
mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/2);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook(PARALLEL_FOR, 1);
// Convex combine explict Euler step: t2 = t0 + dt
// Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * Speed(2) * |Grad[Phi(0)]|)
mTask = std::bind(&Morph::euler12, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1)
this->cook(PARALLEL_FOR, 1);
break;
case math::TVD_RK3:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * Speed(3) * |Grad[Phi(0)]|
mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/3);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook(PARALLEL_FOR, 1);
// Convex combine explict Euler step: t2 = t0 + dt/2
// Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * Speed(3) * |Grad[Phi(0)]|)
mTask = std::bind(&Morph::euler34, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2)
this->cook(PARALLEL_FOR, 2);
// Convex combine explict Euler step: t3 = t0 + dt
// Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * Speed(3) * |Grad[Phi(0)]|)
mTask = std::bind(&Morph::euler13, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2)
this->cook(PARALLEL_FOR, 2);
break;
case math::UNKNOWN_TIS:
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}//end of compile-time resolved switch
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
time0 += dt;
++countCFL;
mParent->mTracker.leafs().removeAuxBuffers();
// Track the narrow band
mParent->mTracker.track();
}//end wile-loop over time
return countCFL;//number of CLF propagation steps
}
template<typename GridT, typename InterruptT>
template<typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline typename GridT::ValueType
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
sampleSpeed(ValueType time0, ValueType time1, Index speedBuffer)
{
namespace ph = std::placeholders;
mMaxAbsS = mMinAbsS;
const size_t leafCount = mParent->mTracker.leafs().leafCount();
if (leafCount==0 || time0 >= time1) return ValueType(0);
const math::Transform& xform = mParent->mTracker.grid().transform();
if (mParent->mTarget->transform() == xform &&
(mParent->mMask == nullptr || mParent->mMask->transform() == xform)) {
mTask = std::bind(&Morph::sampleAlignedSpeed, ph::_1, ph::_2, speedBuffer);
} else {
mTask = std::bind(&Morph::sampleXformedSpeed, ph::_1, ph::_2, speedBuffer);
}
this->cook(PARALLEL_REDUCE);
if (math::isApproxEqual(mMinAbsS, mMaxAbsS)) return ValueType(0);//speed is essentially zero
static const ValueType CFL = (TemporalScheme == math::TVD_RK1 ? ValueType(0.3) :
TemporalScheme == math::TVD_RK2 ? ValueType(0.9) :
ValueType(1.0))/math::Sqrt(ValueType(3.0));
const ValueType dt = math::Abs(time1 - time0), dx = mParent->mTracker.voxelSize();
return math::Min(dt, ValueType(CFL*dx/mMaxAbsS));
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline void
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
sampleXformedSpeed(const LeafRange& range, Index speedBuffer)
{
using VoxelIterT = typename LeafType::ValueOnCIter;
using SamplerT = tools::GridSampler<typename GridT::ConstAccessor, tools::BoxSampler>;
const MapT& map = *mMap;
mParent->mTracker.checkInterrupter();
typename GridT::ConstAccessor targetAcc = mParent->mTarget->getAccessor();
SamplerT target(targetAcc, mParent->mTarget->transform());
if (mParent->mMask == nullptr) {
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueType* speed = leafIter.buffer(speedBuffer).data();
bool isZero = true;
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
ValueType& s = speed[voxelIter.pos()];
s -= target.wsSample(map.applyMap(voxelIter.getCoord().asVec3d()));
if (!math::isApproxZero(s)) isZero = false;
mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s));
}
if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel
}
} else {
const ValueType min = mParent->mMinMask, invNorm = 1.0f/(mParent->mDeltaMask);
const bool invMask = mParent->isMaskInverted();
typename GridT::ConstAccessor maskAcc = mParent->mMask->getAccessor();
SamplerT mask(maskAcc, mParent->mMask->transform());
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueType* speed = leafIter.buffer(speedBuffer).data();
bool isZero = true;
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
const Vec3R xyz = map.applyMap(voxelIter.getCoord().asVec3d());//world space
const ValueType a = math::SmoothUnitStep((mask.wsSample(xyz)-min)*invNorm);
ValueType& s = speed[voxelIter.pos()];
s -= target.wsSample(xyz);
s *= invMask ? 1 - a : a;
if (!math::isApproxZero(s)) isZero = false;
mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s));
}
if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel
}
}
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline void
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
sampleAlignedSpeed(const LeafRange& range, Index speedBuffer)
{
using VoxelIterT = typename LeafType::ValueOnCIter;
mParent->mTracker.checkInterrupter();
typename GridT::ConstAccessor target = mParent->mTarget->getAccessor();
if (mParent->mMask == nullptr) {
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueType* speed = leafIter.buffer(speedBuffer).data();
bool isZero = true;
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
ValueType& s = speed[voxelIter.pos()];
s -= target.getValue(voxelIter.getCoord());
if (!math::isApproxZero(s)) isZero = false;
mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s));
}
if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel
}
} else {
const ValueType min = mParent->mMinMask, invNorm = 1.0f/(mParent->mDeltaMask);
const bool invMask = mParent->isMaskInverted();
typename GridT::ConstAccessor mask = mParent->mMask->getAccessor();
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
ValueType* speed = leafIter.buffer(speedBuffer).data();
bool isZero = true;
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
const Coord ijk = voxelIter.getCoord();//index space
const ValueType a = math::SmoothUnitStep((mask.getValue(ijk)-min)*invNorm);
ValueType& s = speed[voxelIter.pos()];
s -= target.getValue(ijk);
s *= invMask ? 1 - a : a;
if (!math::isApproxZero(s)) isZero = false;
mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s));
}
if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel
}
}
}
template<typename GridT, typename InterruptT>
template <typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline void
LevelSetMorphing<GridT, InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
cook(ThreadingMode mode, size_t swapBuffer)
{
mParent->mTracker.startInterrupter("Morphing level set");
const int grainSize = mParent->mTracker.getGrainSize();
const LeafRange range = mParent->mTracker.leafs().leafRange(grainSize);
if (mParent->mTracker.getGrainSize()==0) {
(*this)(range);
} else if (mode == PARALLEL_FOR) {
tbb::parallel_for(range, *this);
} else if (mode == PARALLEL_REDUCE) {
tbb::parallel_reduce(range, *this);
} else {
OPENVDB_THROW(ValueError, "expected threading mode " << int(PARALLEL_FOR)
<< " or " << int(PARALLEL_REDUCE) << ", got " << int(mode));
}
mParent->mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize == 0);
mParent->mTracker.endInterrupter();
}
template<typename GridT, typename InterruptT>
template<typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
template <int Nominator, int Denominator>
inline void
LevelSetMorphing<GridT,InterruptT>::
Morph<MapT, SpatialScheme, TemporalScheme>::
euler(const LeafRange& range, ValueType dt,
Index phiBuffer, Index resultBuffer, Index speedBuffer)
{
using SchemeT = math::BIAS_SCHEME<SpatialScheme>;
using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType;
using VoxelIterT = typename LeafType::ValueOnCIter;
using NumGrad = math::GradientNormSqrd<MapT, SpatialScheme>;
static const ValueType Alpha = ValueType(Nominator)/ValueType(Denominator);
static const ValueType Beta = ValueType(1) - Alpha;
mParent->mTracker.checkInterrupter();
const MapT& map = *mMap;
StencilT stencil(mParent->mTracker.grid());
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
const ValueType* speed = leafIter.buffer(speedBuffer).data();
if (math::isExactlyEqual(speed[0], std::numeric_limits<ValueType>::max())) continue;
const ValueType* phi = leafIter.buffer(phiBuffer).data();
ValueType* result = leafIter.buffer(resultBuffer).data();
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) {
const Index n = voxelIter.pos();
if (math::isApproxZero(speed[n])) continue;
stencil.moveTo(voxelIter);
const ValueType v = stencil.getValue() - dt * speed[n] * NumGrad::result(map, stencil);
result[n] = Nominator ? Alpha * phi[n] + Beta * v : v;
}//loop over active voxels in the leaf of the mask
}//loop over leafs of the level set
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED
| 27,359 | C | 41.418605 | 100 | 0.656128 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/GridTransformer.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file GridTransformer.h
/// @author Peter Cucka
#ifndef OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/Types.h>
#include <openvdb/math/Math.h> // for isApproxEqual()
#include <openvdb/util/NullInterrupter.h>
#include "ChangeBackground.h"
#include "Interpolation.h"
#include "LevelSetRebuild.h" // for doLevelSetRebuild()
#include "SignedFloodFill.h" // for signedFloodFill
#include "Prune.h" // for pruneLevelSet
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
#include <cmath>
#include <functional>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Resample an input grid into an output grid of the same type such that,
/// after resampling, the input and output grids coincide (apart from sampling
/// artifacts), but the output grid's transform is unchanged.
/// @details Specifically, this function resamples the input grid into the output
/// grid's index space, using a sampling kernel like PointSampler, BoxSampler,
/// or QuadraticSampler.
/// @param inGrid the grid to be resampled
/// @param outGrid the grid into which to write the resampled voxel data
/// @param interrupter an object adhering to the util::NullInterrupter interface
/// @par Example:
/// @code
/// // Create an input grid with the default identity transform
/// // and populate it with a level-set sphere.
/// FloatGrid::ConstPtr src = tools::makeSphere(...);
/// // Create an output grid and give it a uniform-scale transform.
/// FloatGrid::Ptr dest = FloatGrid::create();
/// const float voxelSize = 0.5;
/// dest->setTransform(math::Transform::createLinearTransform(voxelSize));
/// // Resample the input grid into the output grid, reproducing
/// // the level-set sphere at a smaller voxel size.
/// MyInterrupter interrupter = ...;
/// tools::resampleToMatch<tools::QuadraticSampler>(*src, *dest, interrupter);
/// @endcode
template<typename Sampler, typename Interrupter, typename GridType>
inline void
resampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter);
/// @brief Resample an input grid into an output grid of the same type such that,
/// after resampling, the input and output grids coincide (apart from sampling
/// artifacts), but the output grid's transform is unchanged.
/// @details Specifically, this function resamples the input grid into the output
/// grid's index space, using a sampling kernel like PointSampler, BoxSampler,
/// or QuadraticSampler.
/// @param inGrid the grid to be resampled
/// @param outGrid the grid into which to write the resampled voxel data
/// @par Example:
/// @code
/// // Create an input grid with the default identity transform
/// // and populate it with a level-set sphere.
/// FloatGrid::ConstPtr src = tools::makeSphere(...);
/// // Create an output grid and give it a uniform-scale transform.
/// FloatGrid::Ptr dest = FloatGrid::create();
/// const float voxelSize = 0.5;
/// dest->setTransform(math::Transform::createLinearTransform(voxelSize));
/// // Resample the input grid into the output grid, reproducing
/// // the level-set sphere at a smaller voxel size.
/// tools::resampleToMatch<tools::QuadraticSampler>(*src, *dest);
/// @endcode
template<typename Sampler, typename GridType>
inline void
resampleToMatch(const GridType& inGrid, GridType& outGrid);
////////////////////////////////////////
namespace internal {
/// @brief A TileSampler wraps a grid sampler of another type (BoxSampler,
/// QuadraticSampler, etc.), and for samples that fall within a given tile
/// of the grid, it returns a cached tile value instead of accessing the grid.
template<typename Sampler, typename TreeT>
class TileSampler: public Sampler
{
public:
using ValueT = typename TreeT::ValueType;
/// @param b the index-space bounding box of a particular grid tile
/// @param tileVal the tile's value
/// @param on the tile's active state
TileSampler(const CoordBBox& b, const ValueT& tileVal, bool on):
mBBox(b.min().asVec3d(), b.max().asVec3d()), mVal(tileVal), mActive(on), mEmpty(false)
{
mBBox.expand(-this->radius()); // shrink the bounding box by the sample radius
mEmpty = mBBox.empty();
}
bool sample(const TreeT& inTree, const Vec3R& inCoord, ValueT& result) const
{
if (!mEmpty && mBBox.isInside(inCoord)) { result = mVal; return mActive; }
return Sampler::sample(inTree, inCoord, result);
}
protected:
BBoxd mBBox;
ValueT mVal;
bool mActive, mEmpty;
};
/// @brief For point sampling, tree traversal is less expensive than testing
/// bounding box membership.
template<typename TreeT>
class TileSampler<PointSampler, TreeT>: public PointSampler {
public:
TileSampler(const CoordBBox&, const typename TreeT::ValueType&, bool) {}
};
/// @brief For point sampling, tree traversal is less expensive than testing
/// bounding box membership.
template<typename TreeT>
class TileSampler<StaggeredPointSampler, TreeT>: public StaggeredPointSampler {
public:
TileSampler(const CoordBBox&, const typename TreeT::ValueType&, bool) {}
};
} // namespace internal
////////////////////////////////////////
/// A GridResampler applies a geometric transformation to an
/// input grid using one of several sampling schemes, and stores
/// the result in an output grid.
///
/// Usage:
/// @code
/// GridResampler resampler();
/// resampler.transformGrid<BoxSampler>(xform, inGrid, outGrid);
/// @endcode
/// where @c xform is a functor that implements the following methods:
/// @code
/// bool isAffine() const
/// openvdb::Vec3d transform(const openvdb::Vec3d&) const
/// openvdb::Vec3d invTransform(const openvdb::Vec3d&) const
/// @endcode
/// @note When the transform is affine and can be expressed as a 4 x 4 matrix,
/// a GridTransformer is much more efficient than a GridResampler.
class GridResampler
{
public:
using Ptr = SharedPtr<GridResampler>;
using InterruptFunc = std::function<bool (void)>;
GridResampler(): mThreaded(true), mTransformTiles(true) {}
virtual ~GridResampler() {}
GridResampler(const GridResampler&) = default;
GridResampler& operator=(const GridResampler&) = default;
/// Enable or disable threading. (Threading is enabled by default.)
void setThreaded(bool b) { mThreaded = b; }
/// Return @c true if threading is enabled.
bool threaded() const { return mThreaded; }
/// Enable or disable processing of tiles. (Enabled by default, except for level set grids.)
void setTransformTiles(bool b) { mTransformTiles = b; }
/// Return @c true if tile processing is enabled.
bool transformTiles() const { return mTransformTiles; }
/// @brief Allow processing to be aborted by providing an interrupter object.
/// The interrupter will be queried periodically during processing.
/// @see util/NullInterrupter.h for interrupter interface requirements.
template<typename InterrupterType> void setInterrupter(InterrupterType&);
template<typename Sampler, typename GridT, typename Transformer>
void transformGrid(const Transformer&,
const GridT& inGrid, GridT& outGrid) const;
protected:
template<typename Sampler, typename GridT, typename Transformer>
void applyTransform(const Transformer&, const GridT& inGrid, GridT& outGrid) const;
bool interrupt() const { return mInterrupt && mInterrupt(); }
private:
template<typename Sampler, typename InTreeT, typename OutTreeT, typename Transformer>
static void transformBBox(const Transformer&, const CoordBBox& inBBox,
const InTreeT& inTree, OutTreeT& outTree, const InterruptFunc&,
const Sampler& = Sampler());
template<typename Sampler, typename TreeT, typename Transformer>
class RangeProcessor;
bool mThreaded, mTransformTiles;
InterruptFunc mInterrupt;
};
////////////////////////////////////////
/// @brief A GridTransformer applies a geometric transformation to an
/// input grid using one of several sampling schemes, and stores
/// the result in an output grid.
///
/// @note GridTransformer is optimized for affine transformations.
///
/// Usage:
/// @code
/// Mat4R xform = ...;
/// GridTransformer transformer(xform);
/// transformer.transformGrid<BoxSampler>(inGrid, outGrid);
/// @endcode
/// or
/// @code
/// Vec3R pivot = ..., scale = ..., rotate = ..., translate = ...;
/// GridTransformer transformer(pivot, scale, rotate, translate);
/// transformer.transformGrid<QuadraticSampler>(inGrid, outGrid);
/// @endcode
class GridTransformer: public GridResampler
{
public:
using Ptr = SharedPtr<GridTransformer>;
GridTransformer(const Mat4R& xform);
GridTransformer(
const Vec3R& pivot,
const Vec3R& scale,
const Vec3R& rotate,
const Vec3R& translate,
const std::string& xformOrder = "tsr",
const std::string& rotationOrder = "zyx");
~GridTransformer() override = default;
GridTransformer(const GridTransformer&) = default;
GridTransformer& operator=(const GridTransformer&) = default;
const Mat4R& getTransform() const { return mTransform; }
template<class Sampler, class GridT>
void transformGrid(const GridT& inGrid, GridT& outGrid) const;
private:
struct MatrixTransform;
inline void init(const Vec3R& pivot, const Vec3R& scale,
const Vec3R& rotate, const Vec3R& translate,
const std::string& xformOrder, const std::string& rotOrder);
Vec3R mPivot;
Vec3i mMipLevels;
Mat4R mTransform, mPreScaleTransform, mPostScaleTransform;
};
////////////////////////////////////////
namespace local_util {
enum { DECOMP_INVALID = 0, DECOMP_VALID = 1, DECOMP_UNIQUE = 2 };
/// @brief Decompose an affine transform into scale, rotation (XYZ order),
/// and translation components.
/// @return DECOMP_INVALID if the given matrix is not affine or cannot
/// be decomposed, DECOMP_UNIQUE if the matrix has a unique decomposition,
/// DECOMP_VALID otherwise
template<typename T>
inline int
decompose(const math::Mat4<T>& m, math::Vec3<T>& scale,
math::Vec3<T>& rotate, math::Vec3<T>& translate)
{
if (!math::isAffine(m)) return DECOMP_INVALID;
// This is the translation in world space
translate = m.getTranslation();
// Extract translation.
const math::Mat3<T> xform = m.getMat3();
const math::Vec3<T> unsignedScale(
(math::Vec3<T>(1, 0, 0) * xform).length(),
(math::Vec3<T>(0, 1, 0) * xform).length(),
(math::Vec3<T>(0, 0, 1) * xform).length());
const bool hasUniformScale = unsignedScale.eq(math::Vec3<T>(unsignedScale[0]));
bool hasRotation = false;
bool validDecomposition = false;
T minAngle = std::numeric_limits<T>::max();
// If the transformation matrix contains a reflection, test different negative scales
// to find a decomposition that favors the optimal resampling algorithm.
for (size_t n = 0; n < 8; ++n) {
const math::Vec3<T> signedScale(
n & 0x1 ? -unsignedScale.x() : unsignedScale.x(),
n & 0x2 ? -unsignedScale.y() : unsignedScale.y(),
n & 0x4 ? -unsignedScale.z() : unsignedScale.z());
// Extract scale and potentially reflection.
const math::Mat3<T> mat = xform * math::scale<math::Mat3<T> >(signedScale).inverse();
if (mat.det() < T(0.0)) continue; // Skip if mat contains a reflection.
const math::Vec3<T> tmpAngle = math::eulerAngles(mat, math::XYZ_ROTATION);
const math::Mat3<T> rebuild =
math::rotation<math::Mat3<T> >(math::Vec3<T>(0, 0, 1), tmpAngle.z()) *
math::rotation<math::Mat3<T> >(math::Vec3<T>(0, 1, 0), tmpAngle.y()) *
math::rotation<math::Mat3<T> >(math::Vec3<T>(1, 0, 0), tmpAngle.x()) *
math::scale<math::Mat3<T> >(signedScale);
if (xform.eq(rebuild)) {
const T maxAngle = std::max(std::abs(tmpAngle[0]),
std::max(std::abs(tmpAngle[1]), std::abs(tmpAngle[2])));
if (!(minAngle < maxAngle)) { // Update if less or equal.
minAngle = maxAngle;
rotate = tmpAngle;
scale = signedScale;
hasRotation = !rotate.eq(math::Vec3<T>::zero());
validDecomposition = true;
if (hasUniformScale || !hasRotation) {
// Current decomposition is optimal.
break;
}
}
}
}
if (!validDecomposition) {
// The decomposition is invalid if the transformation matrix contains shear.
return DECOMP_INVALID;
}
if (hasRotation && !hasUniformScale) {
// No unique decomposition if scale is nonuniform and rotation is nonzero.
return DECOMP_VALID;
}
return DECOMP_UNIQUE;
}
} // namespace local_util
////////////////////////////////////////
/// This class implements the Transformer functor interface (specifically,
/// the isAffine(), transform() and invTransform() methods) for a transform
/// that is expressed as a 4 x 4 matrix.
struct GridTransformer::MatrixTransform
{
MatrixTransform(): mat(Mat4R::identity()), invMat(Mat4R::identity()) {}
MatrixTransform(const Mat4R& xform): mat(xform), invMat(xform.inverse()) {}
bool isAffine() const { return math::isAffine(mat); }
Vec3R transform(const Vec3R& pos) const { return mat.transformH(pos); }
Vec3R invTransform(const Vec3R& pos) const { return invMat.transformH(pos); }
Mat4R mat, invMat;
};
////////////////////////////////////////
/// @brief This class implements the Transformer functor interface (specifically,
/// the isAffine(), transform() and invTransform() methods) for a transform
/// that maps an A grid into a B grid's index space such that, after resampling,
/// A's index space and transform match B's index space and transform.
class ABTransform
{
public:
/// @param aXform the A grid's transform
/// @param bXform the B grid's transform
ABTransform(const math::Transform& aXform, const math::Transform& bXform):
mAXform(aXform),
mBXform(bXform),
mIsAffine(mAXform.isLinear() && mBXform.isLinear()),
mIsIdentity(mIsAffine && mAXform == mBXform)
{}
bool isAffine() const { return mIsAffine; }
bool isIdentity() const { return mIsIdentity; }
openvdb::Vec3R transform(const openvdb::Vec3R& pos) const
{
return mBXform.worldToIndex(mAXform.indexToWorld(pos));
}
openvdb::Vec3R invTransform(const openvdb::Vec3R& pos) const
{
return mAXform.worldToIndex(mBXform.indexToWorld(pos));
}
const math::Transform& getA() const { return mAXform; }
const math::Transform& getB() const { return mBXform; }
private:
const math::Transform &mAXform, &mBXform;
const bool mIsAffine;
const bool mIsIdentity;
};
/// The normal entry points for resampling are the resampleToMatch() functions,
/// which correctly handle level set grids under scaling and shearing.
/// doResampleToMatch() is mainly for internal use but is typically faster
/// for level sets, and correct provided that no scaling or shearing is needed.
///
/// @warning Do not use this function to scale or shear a level set grid.
template<typename Sampler, typename Interrupter, typename GridType>
inline void
doResampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter)
{
ABTransform xform(inGrid.transform(), outGrid.transform());
if (Sampler::consistent() && xform.isIdentity()) {
// If the transforms of the input and output are identical, the
// output tree is simply a deep copy of the input tree.
outGrid.setTree(inGrid.tree().copy());
} else if (xform.isAffine()) {
// If the input and output transforms are both affine, create an
// input to output transform (in:index-to-world * out:world-to-index)
// and use the fast GridTransformer API.
Mat4R mat = xform.getA().baseMap()->getAffineMap()->getMat4() *
( xform.getB().baseMap()->getAffineMap()->getMat4().inverse() );
GridTransformer transformer(mat);
transformer.setInterrupter(interrupter);
// Transform the input grid and store the result in the output grid.
transformer.transformGrid<Sampler>(inGrid, outGrid);
} else {
// If either the input or the output transform is non-affine,
// use the slower GridResampler API.
GridResampler resampler;
resampler.setInterrupter(interrupter);
resampler.transformGrid<Sampler>(xform, inGrid, outGrid);
}
}
template<typename ValueType>
struct HalfWidthOp {
static ValueType eval(const ValueType& background, const Vec3d& voxelSize)
{
OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN
ValueType result(background * (1.0 / voxelSize[0]));
OPENVDB_NO_TYPE_CONVERSION_WARNING_END
return result;
}
}; // struct HalfWidthOp
template<>
struct HalfWidthOp<bool> {
static bool eval(const bool& background, const Vec3d& /*voxelSize*/)
{
return background;
}
}; // struct HalfWidthOp<bool>
template<typename Sampler, typename Interrupter, typename GridType>
inline void
resampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter)
{
if (inGrid.getGridClass() == GRID_LEVEL_SET) {
// If the input grid is a level set, resample it using the level set rebuild tool.
if (inGrid.constTransform() == outGrid.constTransform()) {
// If the transforms of the input and output grids are identical,
// the output tree is simply a deep copy of the input tree.
outGrid.setTree(inGrid.tree().copy());
return;
}
// If the output grid is a level set, resample the input grid to have the output grid's
// background value. Otherwise, preserve the input grid's background value.
using ValueT = typename GridType::ValueType;
const bool outIsLevelSet = outGrid.getGridClass() == openvdb::GRID_LEVEL_SET;
const ValueT halfWidth = outIsLevelSet
? HalfWidthOp<ValueT>::eval(outGrid.background(), outGrid.voxelSize())
: HalfWidthOp<ValueT>::eval(inGrid.background(), inGrid.voxelSize());
typename GridType::Ptr tempGrid;
try {
tempGrid = doLevelSetRebuild(inGrid, /*iso=*/zeroVal<ValueT>(),
/*exWidth=*/halfWidth, /*inWidth=*/halfWidth,
&outGrid.constTransform(), &interrupter);
} catch (TypeError&) {
// The input grid is classified as a level set, but it has a value type
// that is not supported by the level set rebuild tool. Fall back to
// using the generic resampler.
tempGrid.reset();
}
if (tempGrid) {
outGrid.setTree(tempGrid->treePtr());
return;
}
}
// If the input grid is not a level set, use the generic resampler.
doResampleToMatch<Sampler>(inGrid, outGrid, interrupter);
}
template<typename Sampler, typename GridType>
inline void
resampleToMatch(const GridType& inGrid, GridType& outGrid)
{
util::NullInterrupter interrupter;
resampleToMatch<Sampler>(inGrid, outGrid, interrupter);
}
////////////////////////////////////////
inline
GridTransformer::GridTransformer(const Mat4R& xform):
mPivot(0, 0, 0),
mMipLevels(0, 0, 0),
mTransform(xform),
mPreScaleTransform(Mat4R::identity()),
mPostScaleTransform(Mat4R::identity())
{
Vec3R scale, rotate, translate;
if (local_util::decompose(mTransform, scale, rotate, translate)) {
// If the transform can be decomposed into affine components,
// use them to set up a mipmapping-like scheme for downsampling.
init(mPivot, scale, rotate, translate, "rst", "zyx");
}
}
inline
GridTransformer::GridTransformer(
const Vec3R& pivot, const Vec3R& scale,
const Vec3R& rotate, const Vec3R& translate,
const std::string& xformOrder, const std::string& rotOrder):
mPivot(0, 0, 0),
mMipLevels(0, 0, 0),
mPreScaleTransform(Mat4R::identity()),
mPostScaleTransform(Mat4R::identity())
{
init(pivot, scale, rotate, translate, xformOrder, rotOrder);
}
////////////////////////////////////////
inline void
GridTransformer::init(
const Vec3R& pivot, const Vec3R& scale,
const Vec3R& rotate, const Vec3R& translate,
const std::string& xformOrder, const std::string& rotOrder)
{
if (xformOrder.size() != 3) {
OPENVDB_THROW(ValueError, "invalid transform order (" + xformOrder + ")");
}
if (rotOrder.size() != 3) {
OPENVDB_THROW(ValueError, "invalid rotation order (" + rotOrder + ")");
}
mPivot = pivot;
// Scaling is handled via a mipmapping-like scheme of successive
// halvings of the tree resolution, until the remaining scale
// factor is greater than or equal to 1/2.
Vec3R scaleRemainder = scale;
for (int i = 0; i < 3; ++i) {
double s = std::fabs(scale(i));
if (s < 0.5) {
mMipLevels(i) = int(std::floor(-std::log(s)/std::log(2.0)));
scaleRemainder(i) = scale(i) * (1 << mMipLevels(i));
}
}
// Build pre-scale and post-scale transform matrices based on
// the user-specified order of operations.
// Note that we iterate over the transform order string in reverse order
// (e.g., "t", "r", "s", given "srt"). This is because math::Mat matrices
// postmultiply row vectors rather than premultiplying column vectors.
mTransform = mPreScaleTransform = mPostScaleTransform = Mat4R::identity();
Mat4R* remainder = &mPostScaleTransform;
int rpos, spos, tpos;
rpos = spos = tpos = 3;
for (int ix = 2; ix >= 0; --ix) { // reverse iteration
switch (xformOrder[ix]) {
case 'r':
rpos = ix;
mTransform.preTranslate(pivot);
remainder->preTranslate(pivot);
int xpos, ypos, zpos;
xpos = ypos = zpos = 3;
for (int ir = 2; ir >= 0; --ir) {
switch (rotOrder[ir]) {
case 'x':
xpos = ir;
mTransform.preRotate(math::X_AXIS, rotate.x());
remainder->preRotate(math::X_AXIS, rotate.x());
break;
case 'y':
ypos = ir;
mTransform.preRotate(math::Y_AXIS, rotate.y());
remainder->preRotate(math::Y_AXIS, rotate.y());
break;
case 'z':
zpos = ir;
mTransform.preRotate(math::Z_AXIS, rotate.z());
remainder->preRotate(math::Z_AXIS, rotate.z());
break;
}
}
// Reject rotation order strings that don't contain exactly one
// instance of "x", "y" and "z".
if (xpos > 2 || ypos > 2 || zpos > 2) {
OPENVDB_THROW(ValueError, "invalid rotation order (" + rotOrder + ")");
}
mTransform.preTranslate(-pivot);
remainder->preTranslate(-pivot);
break;
case 's':
spos = ix;
mTransform.preTranslate(pivot);
mTransform.preScale(scale);
mTransform.preTranslate(-pivot);
remainder->preTranslate(pivot);
remainder->preScale(scaleRemainder);
remainder->preTranslate(-pivot);
remainder = &mPreScaleTransform;
break;
case 't':
tpos = ix;
mTransform.preTranslate(translate);
remainder->preTranslate(translate);
break;
}
}
// Reject transform order strings that don't contain exactly one
// instance of "t", "r" and "s".
if (tpos > 2 || rpos > 2 || spos > 2) {
OPENVDB_THROW(ValueError, "invalid transform order (" + xformOrder + ")");
}
}
////////////////////////////////////////
template<typename InterrupterType>
void
GridResampler::setInterrupter(InterrupterType& interrupter)
{
mInterrupt = std::bind(&InterrupterType::wasInterrupted,
/*this=*/&interrupter, /*percent=*/-1);
}
template<typename Sampler, typename GridT, typename Transformer>
void
GridResampler::transformGrid(const Transformer& xform,
const GridT& inGrid, GridT& outGrid) const
{
tools::changeBackground(outGrid.tree(), inGrid.background());
applyTransform<Sampler>(xform, inGrid, outGrid);
}
template<class Sampler, class GridT>
void
GridTransformer::transformGrid(const GridT& inGrid, GridT& outGrid) const
{
tools::changeBackground(outGrid.tree(), inGrid.background());
if (!Sampler::mipmap() || mMipLevels == Vec3i::zero()) {
// Skip the mipmapping step.
const MatrixTransform xform(mTransform);
applyTransform<Sampler>(xform, inGrid, outGrid);
} else {
bool firstPass = true;
const typename GridT::ValueType background = inGrid.background();
typename GridT::Ptr tempGrid = GridT::create(background);
if (!mPreScaleTransform.eq(Mat4R::identity())) {
firstPass = false;
// Apply the pre-scale transform to the input grid
// and store the result in a temporary grid.
const MatrixTransform xform(mPreScaleTransform);
applyTransform<Sampler>(xform, inGrid, *tempGrid);
}
// While the scale factor along one or more axes is less than 1/2,
// scale the grid by half along those axes.
Vec3i count = mMipLevels; // # of halvings remaining per axis
while (count != Vec3i::zero()) {
MatrixTransform xform;
xform.mat.setTranslation(mPivot);
xform.mat.preScale(Vec3R(
count.x() ? .5 : 1, count.y() ? .5 : 1, count.z() ? .5 : 1));
xform.mat.preTranslate(-mPivot);
xform.invMat = xform.mat.inverse();
if (firstPass) {
firstPass = false;
// Scale the input grid and store the result in a temporary grid.
applyTransform<Sampler>(xform, inGrid, *tempGrid);
} else {
// Scale the temporary grid and store the result in a transient grid,
// then swap the two and discard the transient grid.
typename GridT::Ptr destGrid = GridT::create(background);
applyTransform<Sampler>(xform, *tempGrid, *destGrid);
tempGrid.swap(destGrid);
}
// (3, 2, 1) -> (2, 1, 0) -> (1, 0, 0) -> (0, 0, 0), etc.
count = math::maxComponent(count - 1, Vec3i::zero());
}
// Apply the post-scale transform and store the result in the output grid.
if (!mPostScaleTransform.eq(Mat4R::identity())) {
const MatrixTransform xform(mPostScaleTransform);
applyTransform<Sampler>(xform, *tempGrid, outGrid);
} else {
outGrid.setTree(tempGrid->treePtr());
}
}
}
////////////////////////////////////////
template<class Sampler, class TreeT, typename Transformer>
class GridResampler::RangeProcessor
{
public:
using LeafIterT = typename TreeT::LeafCIter;
using TileIterT = typename TreeT::ValueAllCIter;
using LeafRange = typename tree::IteratorRange<LeafIterT>;
using TileRange = typename tree::IteratorRange<TileIterT>;
using InTreeAccessor = typename tree::ValueAccessor<const TreeT>;
using OutTreeAccessor = typename tree::ValueAccessor<TreeT>;
RangeProcessor(const Transformer& xform, const CoordBBox& b, const TreeT& inT, TreeT& outT):
mIsRoot(true), mXform(xform), mBBox(b),
mInTree(inT), mOutTree(&outT), mInAcc(mInTree), mOutAcc(*mOutTree)
{}
RangeProcessor(const Transformer& xform, const CoordBBox& b, const TreeT& inTree):
mIsRoot(false), mXform(xform), mBBox(b),
mInTree(inTree), mOutTree(new TreeT(inTree.background())),
mInAcc(mInTree), mOutAcc(*mOutTree)
{}
~RangeProcessor() { if (!mIsRoot) delete mOutTree; }
/// Splitting constructor: don't copy the original processor's output tree
RangeProcessor(RangeProcessor& other, tbb::split):
mIsRoot(false),
mXform(other.mXform),
mBBox(other.mBBox),
mInTree(other.mInTree),
mOutTree(new TreeT(mInTree.background())),
mInAcc(mInTree),
mOutAcc(*mOutTree),
mInterrupt(other.mInterrupt)
{}
void setInterrupt(const InterruptFunc& f) { mInterrupt = f; }
/// Transform each leaf node in the given range.
void operator()(LeafRange& r)
{
for ( ; r; ++r) {
if (interrupt()) break;
LeafIterT i = r.iterator();
CoordBBox bbox(i->origin(), i->origin() + Coord(i->dim()));
if (!mBBox.empty()) {
// Intersect the leaf node's bounding box with mBBox.
bbox = CoordBBox(
Coord::maxComponent(bbox.min(), mBBox.min()),
Coord::minComponent(bbox.max(), mBBox.max()));
}
if (!bbox.empty()) {
transformBBox<Sampler>(mXform, bbox, mInAcc, mOutAcc, mInterrupt);
}
}
}
/// Transform each non-background tile in the given range.
void operator()(TileRange& r)
{
for ( ; r; ++r) {
if (interrupt()) break;
TileIterT i = r.iterator();
// Skip voxels and background tiles.
if (!i.isTileValue()) continue;
if (!i.isValueOn() && math::isApproxEqual(*i, mOutTree->background())) continue;
CoordBBox bbox;
i.getBoundingBox(bbox);
if (!mBBox.empty()) {
// Intersect the tile's bounding box with mBBox.
bbox = CoordBBox(
Coord::maxComponent(bbox.min(), mBBox.min()),
Coord::minComponent(bbox.max(), mBBox.max()));
}
if (!bbox.empty()) {
/// @todo This samples the tile voxel-by-voxel, which is much too slow.
/// Instead, compute the largest axis-aligned bounding box that is
/// contained in the transformed tile (adjusted for the sampler radius)
/// and fill it with the tile value. Then transform the remaining voxels.
internal::TileSampler<Sampler, InTreeAccessor>
sampler(bbox, i.getValue(), i.isValueOn());
transformBBox(mXform, bbox, mInAcc, mOutAcc, mInterrupt, sampler);
}
}
}
/// Merge another processor's output tree into this processor's tree.
void join(RangeProcessor& other)
{
if (!interrupt()) mOutTree->merge(*other.mOutTree);
}
private:
bool interrupt() const { return mInterrupt && mInterrupt(); }
const bool mIsRoot; // true if mOutTree is the top-level tree
Transformer mXform;
CoordBBox mBBox;
const TreeT& mInTree;
TreeT* mOutTree;
InTreeAccessor mInAcc;
OutTreeAccessor mOutAcc;
InterruptFunc mInterrupt;
};
////////////////////////////////////////
template<class Sampler, class GridT, typename Transformer>
void
GridResampler::applyTransform(const Transformer& xform,
const GridT& inGrid, GridT& outGrid) const
{
using TreeT = typename GridT::TreeType;
const TreeT& inTree = inGrid.tree();
TreeT& outTree = outGrid.tree();
using RangeProc = RangeProcessor<Sampler, TreeT, Transformer>;
const GridClass gridClass = inGrid.getGridClass();
if (gridClass != GRID_LEVEL_SET && mTransformTiles) {
// Independently transform the tiles of the input grid.
// Note: Tiles in level sets can only be background tiles, and they
// are handled more efficiently with a signed flood fill (see below).
RangeProc proc(xform, CoordBBox(), inTree, outTree);
proc.setInterrupt(mInterrupt);
typename RangeProc::TileIterT tileIter = inTree.cbeginValueAll();
tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf nodes
typename RangeProc::TileRange tileRange(tileIter);
if (mThreaded) {
tbb::parallel_reduce(tileRange, proc);
} else {
proc(tileRange);
}
}
CoordBBox clipBBox;
if (gridClass == GRID_LEVEL_SET) {
// Inactive voxels in level sets can only be background voxels, and they
// are handled more efficiently with a signed flood fill (see below).
clipBBox = inGrid.evalActiveVoxelBoundingBox();
}
// Independently transform the leaf nodes of the input grid.
RangeProc proc(xform, clipBBox, inTree, outTree);
proc.setInterrupt(mInterrupt);
typename RangeProc::LeafRange leafRange(inTree.cbeginLeaf());
if (mThreaded) {
tbb::parallel_reduce(leafRange, proc);
} else {
proc(leafRange);
}
// If the grid is a level set, mark inactive voxels as inside or outside.
if (gridClass == GRID_LEVEL_SET) {
tools::pruneLevelSet(outTree);
tools::signedFloodFill(outTree);
}
}
////////////////////////////////////////
//static
template<class Sampler, class InTreeT, class OutTreeT, class Transformer>
void
GridResampler::transformBBox(
const Transformer& xform,
const CoordBBox& bbox,
const InTreeT& inTree,
OutTreeT& outTree,
const InterruptFunc& interrupt,
const Sampler& sampler)
{
using ValueT = typename OutTreeT::ValueType;
// Transform the corners of the input tree's bounding box
// and compute the enclosing bounding box in the output tree.
Vec3R
inRMin(bbox.min().x(), bbox.min().y(), bbox.min().z()),
inRMax(bbox.max().x()+1, bbox.max().y()+1, bbox.max().z()+1),
outRMin = math::minComponent(xform.transform(inRMin), xform.transform(inRMax)),
outRMax = math::maxComponent(xform.transform(inRMin), xform.transform(inRMax));
for (int i = 0; i < 8; ++i) {
Vec3R corner(
i & 1 ? inRMax.x() : inRMin.x(),
i & 2 ? inRMax.y() : inRMin.y(),
i & 4 ? inRMax.z() : inRMin.z());
outRMin = math::minComponent(outRMin, xform.transform(corner));
outRMax = math::maxComponent(outRMax, xform.transform(corner));
}
Vec3i
outMin = local_util::floorVec3(outRMin) - Sampler::radius(),
outMax = local_util::ceilVec3(outRMax) + Sampler::radius();
if (!xform.isAffine()) {
// If the transform is not affine, back-project each output voxel
// into the input tree.
Vec3R xyz, inXYZ;
Coord outXYZ;
int &x = outXYZ.x(), &y = outXYZ.y(), &z = outXYZ.z();
for (x = outMin.x(); x <= outMax.x(); ++x) {
if (interrupt && interrupt()) break;
xyz.x() = x;
for (y = outMin.y(); y <= outMax.y(); ++y) {
if (interrupt && interrupt()) break;
xyz.y() = y;
for (z = outMin.z(); z <= outMax.z(); ++z) {
xyz.z() = z;
inXYZ = xform.invTransform(xyz);
ValueT result;
if (sampler.sample(inTree, inXYZ, result)) {
outTree.setValueOn(outXYZ, result);
} else {
// Note: Don't overwrite existing active values with inactive values.
if (!outTree.isValueOn(outXYZ)) {
outTree.setValueOff(outXYZ, result);
}
}
}
}
}
} else { // affine
// Compute step sizes in the input tree that correspond to
// unit steps in x, y and z in the output tree.
const Vec3R
translation = xform.invTransform(Vec3R(0, 0, 0)),
deltaX = xform.invTransform(Vec3R(1, 0, 0)) - translation,
deltaY = xform.invTransform(Vec3R(0, 1, 0)) - translation,
deltaZ = xform.invTransform(Vec3R(0, 0, 1)) - translation;
#if defined(__ICC)
/// @todo The following line is a workaround for bad code generation
/// in opt-icc11.1_64 (but not debug or gcc) builds. It should be
/// removed once the problem has been addressed at its source.
const Vec3R dummy = deltaX;
#endif
// Step by whole voxels through the output tree, sampling the
// corresponding fractional voxels of the input tree.
Vec3R inStartX = xform.invTransform(Vec3R(outMin));
Coord outXYZ;
int &x = outXYZ.x(), &y = outXYZ.y(), &z = outXYZ.z();
for (x = outMin.x(); x <= outMax.x(); ++x, inStartX += deltaX) {
if (interrupt && interrupt()) break;
Vec3R inStartY = inStartX;
for (y = outMin.y(); y <= outMax.y(); ++y, inStartY += deltaY) {
if (interrupt && interrupt()) break;
Vec3R inXYZ = inStartY;
for (z = outMin.z(); z <= outMax.z(); ++z, inXYZ += deltaZ) {
ValueT result;
if (sampler.sample(inTree, inXYZ, result)) {
outTree.setValueOn(outXYZ, result);
} else {
// Note: Don't overwrite existing active values with inactive values.
if (!outTree.isValueOn(outXYZ)) {
outTree.setValueOff(outXYZ, result);
}
}
}
}
}
}
} // GridResampler::transformBBox()
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED
| 37,980 | C | 35.520192 | 97 | 0.623223 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PoissonSolver.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file PoissonSolver.h
///
/// @authors D.J. Hill, Peter Cucka
///
/// @brief Solve Poisson's equation ∇<sup><small>2</small></sup><i>x</i> = <i>b</i>
/// for <i>x</i>, where @e b is a vector comprising the values of all of the active voxels
/// in a grid.
///
/// @par Example:
/// Solve for the pressure in a cubic tank of liquid, assuming uniform boundary conditions:
/// @code
/// FloatTree source(/*background=*/0.0f);
/// // Activate voxels to indicate that they contain liquid.
/// source.fill(CoordBBox(Coord(0, -10, 0), Coord(10, 0, 10)), /*value=*/0.0f);
///
/// math::pcg::State state = math::pcg::terminationDefaults<float>();
/// FloatTree::Ptr solution = tools::poisson::solve(source, state);
/// @endcode
///
/// @par Example:
/// Solve for the pressure, <i>P</i>, in a cubic tank of liquid that is open at the top.
/// Boundary conditions are <i>P</i> = 0 at the top,
/// ∂<i>P</i>/∂<i>y</i> = −1 at the bottom
/// and ∂<i>P</i>/∂<i>x</i> = 0 at the sides:
/// <pre>
/// P = 0
/// +--------+ (N,0,N)
/// /| /|
/// (0,0,0) +--------+ |
/// | | | | dP/dx = 0
/// dP/dx = 0 | +------|-+
/// |/ |/
/// (0,-N,0) +--------+ (N,-N,0)
/// dP/dy = -1
/// </pre>
/// @code
/// const int N = 10;
/// DoubleTree source(/*background=*/0.0);
/// // Activate voxels to indicate that they contain liquid.
/// source.fill(CoordBBox(Coord(0, -N, 0), Coord(N, 0, N)), /*value=*/0.0);
///
/// auto boundary = [](const openvdb::Coord& ijk, const openvdb::Coord& neighbor,
/// double& source, double& diagonal)
/// {
/// if (neighbor.x() == ijk.x() && neighbor.z() == ijk.z()) {
/// if (neighbor.y() < ijk.y()) source -= 1.0;
/// else diagonal -= 1.0;
/// }
/// };
///
/// math::pcg::State state = math::pcg::terminationDefaults<double>();
/// util::NullInterrupter interrupter;
///
/// DoubleTree::Ptr solution = tools::poisson::solveWithBoundaryConditions(
/// source, boundary, state, interrupter);
/// @endcode
#ifndef OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/math/ConjGradient.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/tree/Tree.h>
#include <openvdb/util/NullInterrupter.h>
#include "Morphology.h" // for erodeVoxels
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
namespace poisson {
// This type should be at least as wide as math::pcg::SizeType.
using VIndex = Int32;
/// The type of a matrix used to represent a three-dimensional %Laplacian operator
using LaplacianMatrix = math::pcg::SparseStencilMatrix<double, 7>;
//@{
/// @brief Solve ∇<sup><small>2</small></sup><i>x</i> = <i>b</i> for <i>x</i>,
/// where @e b is a vector comprising the values of all of the active voxels
/// in the input tree.
/// @return a new tree, with the same active voxel topology as the input tree,
/// whose voxel values are the elements of the solution vector <i>x</i>.
/// @details On input, the State object should specify convergence criteria
/// (minimum error and maximum number of iterations); on output, it gives
/// the actual termination conditions.
/// @details The solution is computed using the conjugate gradient method
/// with (where possible) incomplete Cholesky preconditioning, falling back
/// to Jacobi preconditioning.
/// @sa solveWithBoundaryConditions
template<typename TreeType>
inline typename TreeType::Ptr
solve(const TreeType&, math::pcg::State&, bool staggered = false);
template<typename TreeType, typename Interrupter>
inline typename TreeType::Ptr
solve(const TreeType&, math::pcg::State&, Interrupter&, bool staggered = false);
//@}
//@{
/// @brief Solve ∇<sup><small>2</small></sup><i>x</i> = <i>b</i> for <i>x</i>
/// with user-specified boundary conditions, where @e b is a vector comprising
/// the values of all of the active voxels in the input tree or domain mask if provided
/// @return a new tree, with the same active voxel topology as the input tree,
/// whose voxel values are the elements of the solution vector <i>x</i>.
/// @details On input, the State object should specify convergence criteria
/// (minimum error and maximum number of iterations); on output, it gives
/// the actual termination conditions.
/// @details The solution is computed using the conjugate gradient method with
/// the specified type of preconditioner (default: incomplete Cholesky),
/// falling back to Jacobi preconditioning if necessary.
/// @details Each thread gets its own copy of the BoundaryOp, which should be
/// a functor of the form
/// @code
/// struct BoundaryOp {
/// using ValueType = LaplacianMatrix::ValueType;
/// void operator()(
/// const Coord& ijk, // coordinates of a boundary voxel
/// const Coord& ijkNeighbor, // coordinates of an exterior neighbor of ijk
/// ValueType& source, // element of b corresponding to ijk
/// ValueType& diagonal // element of Laplacian matrix corresponding to ijk
/// ) const;
/// };
/// @endcode
/// The functor is called for each of the exterior neighbors of each boundary voxel @ijk,
/// and it must specify a boundary condition for @ijk by modifying one or both of two
/// provided values: the entry in the source vector @e b corresponding to @ijk and
/// the weighting coefficient for @ijk in the Laplacian operator matrix.
///
/// @sa solve
template<typename TreeType, typename BoundaryOp, typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditions(
const TreeType&,
const BoundaryOp&,
math::pcg::State&,
Interrupter&,
bool staggered = false);
template<
typename PreconditionerType,
typename TreeType,
typename BoundaryOp,
typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditionsAndPreconditioner(
const TreeType&,
const BoundaryOp&,
math::pcg::State&,
Interrupter&,
bool staggered = false);
template<
typename PreconditionerType,
typename TreeType,
typename DomainTreeType,
typename BoundaryOp,
typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditionsAndPreconditioner(
const TreeType&,
const DomainTreeType&,
const BoundaryOp&,
math::pcg::State&,
Interrupter&,
bool staggered = false);
//@}
/// @name Low-level functions
//@{
// The following are low-level routines that can be used to assemble custom solvers.
/// @brief Overwrite each active voxel in the given scalar tree
/// with a sequential index, starting from zero.
template<typename VIndexTreeType>
inline void populateIndexTree(VIndexTreeType&);
/// @brief Iterate over the active voxels of the input tree and for each one
/// assign its index in the iteration sequence to the corresponding voxel
/// of an integer-valued output tree.
template<typename TreeType>
inline typename TreeType::template ValueConverter<VIndex>::Type::Ptr
createIndexTree(const TreeType&);
/// @brief Return a vector of the active voxel values of the scalar-valued @a source tree.
/// @details The <i>n</i>th element of the vector corresponds to the voxel whose value
/// in the @a index tree is @e n.
/// @param source a tree with a scalar value type
/// @param index a tree of the same configuration as @a source but with
/// value type VIndex that maps voxels to elements of the output vector
template<typename VectorValueType, typename SourceTreeType>
inline typename math::pcg::Vector<VectorValueType>::Ptr
createVectorFromTree(
const SourceTreeType& source,
const typename SourceTreeType::template ValueConverter<VIndex>::Type& index);
/// @brief Return a tree with the same active voxel topology as the @a index tree
/// but whose voxel values are taken from the the given vector.
/// @details The voxel whose value in the @a index tree is @e n gets assigned
/// the <i>n</i>th element of the vector.
/// @param index a tree with value type VIndex that maps voxels to elements of @a values
/// @param values a vector of values with which to populate the active voxels of the output tree
/// @param background the value for the inactive voxels of the output tree
template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType>
inline typename VIndexTreeType::template ValueConverter<TreeValueType>::Type::Ptr
createTreeFromVector(
const math::pcg::Vector<VectorValueType>& values,
const VIndexTreeType& index,
const TreeValueType& background);
/// @brief Generate a sparse matrix of the index-space (Δ<i>x</i> = 1) %Laplacian operator
/// using second-order finite differences.
/// @details This construction assumes homogeneous Dirichlet boundary conditions
/// (exterior grid points are zero).
template<typename BoolTreeType>
inline LaplacianMatrix::Ptr
createISLaplacian(
const typename BoolTreeType::template ValueConverter<VIndex>::Type& vectorIndexTree,
const BoolTreeType& interiorMask,
bool staggered = false);
/// @brief Generate a sparse matrix of the index-space (Δ<i>x</i> = 1) %Laplacian operator
/// with user-specified boundary conditions using second-order finite differences.
/// @details Each thread gets its own copy of @a boundaryOp, which should be
/// a functor of the form
/// @code
/// struct BoundaryOp {
/// using ValueType = LaplacianMatrix::ValueType;
/// void operator()(
/// const Coord& ijk, // coordinates of a boundary voxel
/// const Coord& ijkNeighbor, // coordinates of an exterior neighbor of ijk
/// ValueType& source, // element of source vector corresponding to ijk
/// ValueType& diagonal // element of Laplacian matrix corresponding to ijk
/// ) const;
/// };
/// @endcode
/// The functor is called for each of the exterior neighbors of each boundary voxel @ijk,
/// and it must specify a boundary condition for @ijk by modifying one or both of two
/// provided values: an entry in the given @a source vector corresponding to @ijk and
/// the weighting coefficient for @ijk in the %Laplacian matrix.
template<typename BoolTreeType, typename BoundaryOp>
inline LaplacianMatrix::Ptr
createISLaplacianWithBoundaryConditions(
const typename BoolTreeType::template ValueConverter<VIndex>::Type& vectorIndexTree,
const BoolTreeType& interiorMask,
const BoundaryOp& boundaryOp,
typename math::pcg::Vector<LaplacianMatrix::ValueType>& source,
bool staggered = false);
/// @brief Dirichlet boundary condition functor
/// @details This is useful in describing fluid/air interfaces, where the pressure
/// of the air is assumed to be zero.
template<typename ValueType>
struct DirichletBoundaryOp {
inline void operator()(const Coord&, const Coord&, ValueType&, ValueType& diag) const {
// Exterior neighbors are empty, so decrement the weighting coefficient
// as for interior neighbors but leave the source vector unchanged.
diag -= 1;
}
};
//@}
////////////////////////////////////////
namespace internal {
/// @brief Functor for use with LeafManager::foreach() to populate an array
/// with per-leaf active voxel counts
template<typename LeafType>
struct LeafCountOp
{
VIndex* count;
LeafCountOp(VIndex* count_): count(count_) {}
void operator()(const LeafType& leaf, size_t leafIdx) const {
count[leafIdx] = static_cast<VIndex>(leaf.onVoxelCount());
}
};
/// @brief Functor for use with LeafManager::foreach() to populate
/// active leaf voxels with sequential indices
template<typename LeafType>
struct LeafIndexOp
{
const VIndex* count;
LeafIndexOp(const VIndex* count_): count(count_) {}
void operator()(LeafType& leaf, size_t leafIdx) const {
VIndex idx = (leafIdx == 0) ? 0 : count[leafIdx - 1];
for (typename LeafType::ValueOnIter it = leaf.beginValueOn(); it; ++it) {
it.setValue(idx++);
}
}
};
} // namespace internal
template<typename VIndexTreeType>
inline void
populateIndexTree(VIndexTreeType& result)
{
using LeafT = typename VIndexTreeType::LeafNodeType;
using LeafMgrT = typename tree::LeafManager<VIndexTreeType>;
// Linearize the tree.
LeafMgrT leafManager(result);
const size_t leafCount = leafManager.leafCount();
if (leafCount == 0) return;
// Count the number of active voxels in each leaf node.
std::unique_ptr<VIndex[]> perLeafCount(new VIndex[leafCount]);
VIndex* perLeafCountPtr = perLeafCount.get();
leafManager.foreach(internal::LeafCountOp<LeafT>(perLeafCountPtr));
// The starting index for each leaf node is the total number
// of active voxels in all preceding leaf nodes.
for (size_t i = 1; i < leafCount; ++i) {
perLeafCount[i] += perLeafCount[i - 1];
}
// The last accumulated value should be the total of all active voxels.
assert(Index64(perLeafCount[leafCount-1]) == result.activeVoxelCount());
// Parallelize over the leaf nodes of the tree, storing a unique index
// in each active voxel.
leafManager.foreach(internal::LeafIndexOp<LeafT>(perLeafCountPtr));
}
template<typename TreeType>
inline typename TreeType::template ValueConverter<VIndex>::Type::Ptr
createIndexTree(const TreeType& tree)
{
using VIdxTreeT = typename TreeType::template ValueConverter<VIndex>::Type;
// Construct an output tree with the same active voxel topology as the input tree.
const VIndex invalidIdx = -1;
typename VIdxTreeT::Ptr result(
new VIdxTreeT(tree, /*background=*/invalidIdx, TopologyCopy()));
// All active voxels are degrees of freedom, including voxels contained in active tiles.
result->voxelizeActiveTiles();
populateIndexTree(*result);
return result;
}
////////////////////////////////////////
namespace internal {
/// @brief Functor for use with LeafManager::foreach() to populate a vector
/// with the values of a tree's active voxels
template<typename VectorValueType, typename SourceTreeType>
struct CopyToVecOp
{
using VIdxTreeT = typename SourceTreeType::template ValueConverter<VIndex>::Type;
using VIdxLeafT = typename VIdxTreeT::LeafNodeType;
using LeafT = typename SourceTreeType::LeafNodeType;
using TreeValueT = typename SourceTreeType::ValueType;
using VectorT = typename math::pcg::Vector<VectorValueType>;
const SourceTreeType* tree;
VectorT* vector;
CopyToVecOp(const SourceTreeType& t, VectorT& v): tree(&t), vector(&v) {}
void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const
{
VectorT& vec = *vector;
if (const LeafT* leaf = tree->probeLeaf(idxLeaf.origin())) {
// If a corresponding leaf node exists in the source tree,
// copy voxel values from the source node to the output vector.
for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) {
vec[*it] = leaf->getValue(it.pos());
}
} else {
// If no corresponding leaf exists in the source tree,
// fill the vector with a uniform value.
const TreeValueT& value = tree->getValue(idxLeaf.origin());
for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) {
vec[*it] = value;
}
}
}
};
} // namespace internal
template<typename VectorValueType, typename SourceTreeType>
inline typename math::pcg::Vector<VectorValueType>::Ptr
createVectorFromTree(const SourceTreeType& tree,
const typename SourceTreeType::template ValueConverter<VIndex>::Type& idxTree)
{
using VIdxTreeT = typename SourceTreeType::template ValueConverter<VIndex>::Type;
using VIdxLeafMgrT = tree::LeafManager<const VIdxTreeT>;
using VectorT = typename math::pcg::Vector<VectorValueType>;
// Allocate the vector.
const size_t numVoxels = idxTree.activeVoxelCount();
typename VectorT::Ptr result(new VectorT(static_cast<math::pcg::SizeType>(numVoxels)));
// Parallelize over the leaf nodes of the index tree, filling the output vector
// with values from corresponding voxels of the source tree.
VIdxLeafMgrT leafManager(idxTree);
leafManager.foreach(internal::CopyToVecOp<VectorValueType, SourceTreeType>(tree, *result));
return result;
}
////////////////////////////////////////
namespace internal {
/// @brief Functor for use with LeafManager::foreach() to populate a tree
/// with values from a vector
template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType>
struct CopyFromVecOp
{
using OutTreeT = typename VIndexTreeType::template ValueConverter<TreeValueType>::Type;
using OutLeafT = typename OutTreeT::LeafNodeType;
using VIdxLeafT = typename VIndexTreeType::LeafNodeType;
using VectorT = typename math::pcg::Vector<VectorValueType>;
const VectorT* vector;
OutTreeT* tree;
CopyFromVecOp(const VectorT& v, OutTreeT& t): vector(&v), tree(&t) {}
void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const
{
const VectorT& vec = *vector;
OutLeafT* leaf = tree->probeLeaf(idxLeaf.origin());
assert(leaf != nullptr);
for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) {
leaf->setValueOnly(it.pos(), static_cast<TreeValueType>(vec[*it]));
}
}
};
} // namespace internal
template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType>
inline typename VIndexTreeType::template ValueConverter<TreeValueType>::Type::Ptr
createTreeFromVector(
const math::pcg::Vector<VectorValueType>& vector,
const VIndexTreeType& idxTree,
const TreeValueType& background)
{
using OutTreeT = typename VIndexTreeType::template ValueConverter<TreeValueType>::Type;
using VIdxLeafMgrT = typename tree::LeafManager<const VIndexTreeType>;
// Construct an output tree with the same active voxel topology as the index tree.
typename OutTreeT::Ptr result(new OutTreeT(idxTree, background, TopologyCopy()));
OutTreeT& tree = *result;
// Parallelize over the leaf nodes of the index tree, populating voxels
// of the output tree with values from the input vector.
VIdxLeafMgrT leafManager(idxTree);
leafManager.foreach(
internal::CopyFromVecOp<TreeValueType, VIndexTreeType, VectorValueType>(vector, tree));
return result;
}
////////////////////////////////////////
namespace internal {
/// Functor for use with LeafManager::foreach() to populate a sparse %Laplacian matrix
template<typename BoolTreeType, typename BoundaryOp>
struct ISStaggeredLaplacianOp
{
using VIdxTreeT = typename BoolTreeType::template ValueConverter<VIndex>::Type;
using VIdxLeafT = typename VIdxTreeT::LeafNodeType;
using ValueT = LaplacianMatrix::ValueType;
using VectorT = typename math::pcg::Vector<ValueT>;
LaplacianMatrix* laplacian;
const VIdxTreeT* idxTree;
const BoolTreeType* interiorMask;
const BoundaryOp boundaryOp;
VectorT* source;
ISStaggeredLaplacianOp(LaplacianMatrix& m, const VIdxTreeT& idx,
const BoolTreeType& mask, const BoundaryOp& op, VectorT& src):
laplacian(&m), idxTree(&idx), interiorMask(&mask), boundaryOp(op), source(&src) {}
void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const
{
// Local accessors
typename tree::ValueAccessor<const BoolTreeType> interior(*interiorMask);
typename tree::ValueAccessor<const VIdxTreeT> vectorIdx(*idxTree);
Coord ijk;
VIndex column;
const ValueT diagonal = -6.f, offDiagonal = 1.f;
// Loop over active voxels in this leaf.
for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) {
assert(it.getValue() > -1);
const math::pcg::SizeType rowNum = static_cast<math::pcg::SizeType>(it.getValue());
LaplacianMatrix::RowEditor row = laplacian->getRowEditor(rowNum);
ijk = it.getCoord();
if (interior.isValueOn(ijk)) {
// The current voxel is an interior voxel.
// All of its neighbors are in the solution domain.
// -x direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(-1, 0, 0)), offDiagonal);
// -y direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(0, -1, 0)), offDiagonal);
// -z direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 0, -1)), offDiagonal);
// diagonal
row.setValue(rowNum, diagonal);
// +z direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 0, 1)), offDiagonal);
// +y direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 1, 0)), offDiagonal);
// +x direction
row.setValue(vectorIdx.getValue(ijk.offsetBy(1, 0, 0)), offDiagonal);
} else {
// The current voxel is a boundary voxel.
// At least one of its neighbors is outside the solution domain.
ValueT modifiedDiagonal = 0.f;
// -x direction
if (vectorIdx.probeValue(ijk.offsetBy(-1, 0, 0), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(-1, 0, 0), source->at(rowNum), modifiedDiagonal);
}
// -y direction
if (vectorIdx.probeValue(ijk.offsetBy(0, -1, 0), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(0, -1, 0), source->at(rowNum), modifiedDiagonal);
}
// -z direction
if (vectorIdx.probeValue(ijk.offsetBy(0, 0, -1), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(0, 0, -1), source->at(rowNum), modifiedDiagonal);
}
// +z direction
if (vectorIdx.probeValue(ijk.offsetBy(0, 0, 1), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(0, 0, 1), source->at(rowNum), modifiedDiagonal);
}
// +y direction
if (vectorIdx.probeValue(ijk.offsetBy(0, 1, 0), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(0, 1, 0), source->at(rowNum), modifiedDiagonal);
}
// +x direction
if (vectorIdx.probeValue(ijk.offsetBy(1, 0, 0), column)) {
row.setValue(column, offDiagonal);
modifiedDiagonal -= 1;
} else {
boundaryOp(ijk, ijk.offsetBy(1, 0, 0), source->at(rowNum), modifiedDiagonal);
}
// diagonal
row.setValue(rowNum, modifiedDiagonal);
}
} // end loop over voxels
}
};
// Stencil 1 is the correct stencil, but stencil 2 requires
// half as many comparisons and produces smoother results at boundaries.
//#define OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL 1
#define OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL 2
/// Functor for use with LeafManager::foreach() to populate a sparse %Laplacian matrix
template<typename VIdxTreeT, typename BoundaryOp>
struct ISLaplacianOp
{
using VIdxLeafT = typename VIdxTreeT::LeafNodeType;
using ValueT = LaplacianMatrix::ValueType;
using VectorT = typename math::pcg::Vector<ValueT>;
LaplacianMatrix* laplacian;
const VIdxTreeT* idxTree;
const BoundaryOp boundaryOp;
VectorT* source;
ISLaplacianOp(LaplacianMatrix& m, const VIdxTreeT& idx, const BoundaryOp& op, VectorT& src):
laplacian(&m), idxTree(&idx), boundaryOp(op), source(&src) {}
void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const
{
typename tree::ValueAccessor<const VIdxTreeT> vectorIdx(*idxTree);
const int kNumOffsets = 6;
const Coord ijkOffset[kNumOffsets] = {
#if OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL == 1
Coord(-1,0,0), Coord(1,0,0), Coord(0,-1,0), Coord(0,1,0), Coord(0,0,-1), Coord(0,0,1)
#else
Coord(-2,0,0), Coord(2,0,0), Coord(0,-2,0), Coord(0,2,0), Coord(0,0,-2), Coord(0,0,2)
#endif
};
// For each active voxel in this leaf...
for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) {
assert(it.getValue() > -1);
const Coord ijk = it.getCoord();
const math::pcg::SizeType rowNum = static_cast<math::pcg::SizeType>(it.getValue());
LaplacianMatrix::RowEditor row = laplacian->getRowEditor(rowNum);
ValueT modifiedDiagonal = 0.f;
// For each of the neighbors of the voxel at (i,j,k)...
for (int dir = 0; dir < kNumOffsets; ++dir) {
const Coord neighbor = ijk + ijkOffset[dir];
VIndex column;
// For collocated vector grids, the central differencing stencil requires
// access to neighbors at a distance of two voxels in each direction
// (-x, +x, -y, +y, -z, +z).
#if OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL == 1
const bool ijkIsInterior = (vectorIdx.probeValue(neighbor + ijkOffset[dir], column)
&& vectorIdx.isValueOn(neighbor));
#else
const bool ijkIsInterior = vectorIdx.probeValue(neighbor, column);
#endif
if (ijkIsInterior) {
// If (i,j,k) is sufficiently far away from the exterior,
// set its weight to one and adjust the center weight accordingly.
row.setValue(column, 1.f);
modifiedDiagonal -= 1.f;
} else {
// If (i,j,k) is adjacent to or one voxel away from the exterior,
// invoke the boundary condition functor.
boundaryOp(ijk, neighbor, source->at(rowNum), modifiedDiagonal);
}
}
// Set the (possibly modified) weight for the voxel at (i,j,k).
row.setValue(rowNum, modifiedDiagonal);
}
}
};
} // namespace internal
template<typename BoolTreeType>
inline LaplacianMatrix::Ptr
createISLaplacian(const typename BoolTreeType::template ValueConverter<VIndex>::Type& idxTree,
const BoolTreeType& interiorMask, bool staggered)
{
using ValueT = LaplacianMatrix::ValueType;
math::pcg::Vector<ValueT> unused(
static_cast<math::pcg::SizeType>(idxTree.activeVoxelCount()));
DirichletBoundaryOp<ValueT> op;
return createISLaplacianWithBoundaryConditions(idxTree, interiorMask, op, unused, staggered);
}
template<typename BoolTreeType, typename BoundaryOp>
inline LaplacianMatrix::Ptr
createISLaplacianWithBoundaryConditions(
const typename BoolTreeType::template ValueConverter<VIndex>::Type& idxTree,
const BoolTreeType& interiorMask,
const BoundaryOp& boundaryOp,
typename math::pcg::Vector<LaplacianMatrix::ValueType>& source,
bool staggered)
{
using VIdxTreeT = typename BoolTreeType::template ValueConverter<VIndex>::Type;
using VIdxLeafMgrT = typename tree::LeafManager<const VIdxTreeT>;
// The number of active voxels is the number of degrees of freedom.
const Index64 numDoF = idxTree.activeVoxelCount();
// Construct the matrix.
LaplacianMatrix::Ptr laplacianPtr(
new LaplacianMatrix(static_cast<math::pcg::SizeType>(numDoF)));
LaplacianMatrix& laplacian = *laplacianPtr;
// Populate the matrix using a second-order, 7-point CD stencil.
VIdxLeafMgrT idxLeafManager(idxTree);
if (staggered) {
idxLeafManager.foreach(internal::ISStaggeredLaplacianOp<BoolTreeType, BoundaryOp>(
laplacian, idxTree, interiorMask, boundaryOp, source));
} else {
idxLeafManager.foreach(internal::ISLaplacianOp<VIdxTreeT, BoundaryOp>(
laplacian, idxTree, boundaryOp, source));
}
return laplacianPtr;
}
////////////////////////////////////////
template<typename TreeType>
inline typename TreeType::Ptr
solve(const TreeType& inTree, math::pcg::State& state, bool staggered)
{
util::NullInterrupter interrupter;
return solve(inTree, state, interrupter, staggered);
}
template<typename TreeType, typename Interrupter>
inline typename TreeType::Ptr
solve(const TreeType& inTree, math::pcg::State& state, Interrupter& interrupter, bool staggered)
{
DirichletBoundaryOp<LaplacianMatrix::ValueType> boundaryOp;
return solveWithBoundaryConditions(inTree, boundaryOp, state, interrupter, staggered);
}
template<typename TreeType, typename BoundaryOp, typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditions(const TreeType& inTree, const BoundaryOp& boundaryOp,
math::pcg::State& state, Interrupter& interrupter, bool staggered)
{
using DefaultPrecondT = math::pcg::IncompleteCholeskyPreconditioner<LaplacianMatrix>;
return solveWithBoundaryConditionsAndPreconditioner<DefaultPrecondT>(
inTree, boundaryOp, state, interrupter, staggered);
}
template<
typename PreconditionerType,
typename TreeType,
typename BoundaryOp,
typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditionsAndPreconditioner(
const TreeType& inTree,
const BoundaryOp& boundaryOp,
math::pcg::State& state,
Interrupter& interrupter,
bool staggered)
{
return solveWithBoundaryConditionsAndPreconditioner<PreconditionerType>(
/*source=*/inTree, /*domain mask=*/inTree, boundaryOp, state, interrupter, staggered);
}
template<
typename PreconditionerType,
typename TreeType,
typename DomainTreeType,
typename BoundaryOp,
typename Interrupter>
inline typename TreeType::Ptr
solveWithBoundaryConditionsAndPreconditioner(
const TreeType& inTree,
const DomainTreeType& domainMask,
const BoundaryOp& boundaryOp,
math::pcg::State& state,
Interrupter& interrupter,
bool staggered)
{
using TreeValueT = typename TreeType::ValueType;
using VecValueT = LaplacianMatrix::ValueType;
using VectorT = typename math::pcg::Vector<VecValueT>;
using VIdxTreeT = typename TreeType::template ValueConverter<VIndex>::Type;
using MaskTreeT = typename TreeType::template ValueConverter<bool>::Type;
// 1. Create a mapping from active voxels of the input tree to elements of a vector.
typename VIdxTreeT::ConstPtr idxTree = createIndexTree(domainMask);
// 2. Populate a vector with values from the input tree.
typename VectorT::Ptr b = createVectorFromTree<VecValueT>(inTree, *idxTree);
// 3. Create a mask of the interior voxels of the input tree (from the densified index tree).
/// @todo Is this really needed?
typename MaskTreeT::Ptr interiorMask(
new MaskTreeT(*idxTree, /*background=*/false, TopologyCopy()));
tools::erodeVoxels(*interiorMask, /*iterations=*/1, tools::NN_FACE);
// 4. Create the Laplacian matrix.
LaplacianMatrix::Ptr laplacian = createISLaplacianWithBoundaryConditions(
*idxTree, *interiorMask, boundaryOp, *b, staggered);
// 5. Solve the Poisson equation.
laplacian->scale(-1.0); // matrix is negative-definite; solve -M x = -b
b->scale(-1.0);
typename VectorT::Ptr x(new VectorT(b->size(), zeroVal<VecValueT>()));
typename math::pcg::Preconditioner<VecValueT>::Ptr precond(
new PreconditionerType(*laplacian));
if (!precond->isValid()) {
precond.reset(new math::pcg::JacobiPreconditioner<LaplacianMatrix>(*laplacian));
}
state = math::pcg::solve(*laplacian, *b, *x, *precond, interrupter, state);
// 6. Populate the output tree with values from the solution vector.
/// @todo if (state.success) ... ?
return createTreeFromVector<TreeValueT>(*x, *idxTree, /*background=*/zeroVal<TreeValueT>());
}
} // namespace poisson
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED
| 32,786 | C | 37.893238 | 99 | 0.669371 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Mask.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file Mask.h
///
/// @brief Construct boolean mask grids from grids of arbitrary type
#ifndef OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include "LevelSetUtil.h" // for tools::sdfInteriorMask()
#include <type_traits> // for std::enable_if, std::is_floating_point
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Given an input grid of any type, return a new, boolean grid
/// whose active voxel topology matches the input grid's or,
/// if the input grid is a level set, matches the input grid's interior.
/// @param grid the grid from which to construct a mask
/// @param isovalue for a level set grid, the isovalue that defines the grid's interior
/// @sa tools::sdfInteriorMask()
template<typename GridType>
inline typename GridType::template ValueConverter<bool>::Type::Ptr
interiorMask(const GridType& grid, const double isovalue = 0.0);
////////////////////////////////////////
namespace mask_internal {
/// @private
template<typename GridType>
struct Traits {
static const bool isBool = std::is_same<typename GridType::ValueType, bool>::value;
using BoolGridType = typename GridType::template ValueConverter<bool>::Type;
using BoolGridPtrType = typename BoolGridType::Ptr;
};
/// @private
template<typename GridType>
inline typename std::enable_if<std::is_floating_point<typename GridType::ValueType>::value,
typename mask_internal::Traits<GridType>::BoolGridPtrType>::type
doLevelSetInteriorMask(const GridType& grid, const double isovalue)
{
using GridValueT = typename GridType::ValueType;
using MaskGridPtrT = typename mask_internal::Traits<GridType>::BoolGridPtrType;
// If the input grid is a level set (and floating-point), return a mask of its interior.
if (grid.getGridClass() == GRID_LEVEL_SET) {
return tools::sdfInteriorMask(grid, static_cast<GridValueT>(isovalue));
}
return MaskGridPtrT{};
}
/// @private
// No-op specialization for non-floating-point grids
template<typename GridType>
inline typename std::enable_if<!std::is_floating_point<typename GridType::ValueType>::value,
typename mask_internal::Traits<GridType>::BoolGridPtrType>::type
doLevelSetInteriorMask(const GridType&, const double /*isovalue*/)
{
using MaskGridPtrT = typename mask_internal::Traits<GridType>::BoolGridPtrType;
return MaskGridPtrT{};
}
/// @private
template<typename GridType>
inline typename std::enable_if<mask_internal::Traits<GridType>::isBool,
typename mask_internal::Traits<GridType>::BoolGridPtrType>::type
doInteriorMask(const GridType& grid, const double /*isovalue*/)
{
// If the input grid is already boolean, return a copy of it.
return grid.deepCopy();
}
/// @private
template<typename GridType>
inline typename std::enable_if<!(mask_internal::Traits<GridType>::isBool),
typename mask_internal::Traits<GridType>::BoolGridPtrType>::type
doInteriorMask(const GridType& grid, const double isovalue)
{
using MaskGridT = typename mask_internal::Traits<GridType>::BoolGridType;
// If the input grid is a level set, return a mask of its interior.
if (auto maskGridPtr = doLevelSetInteriorMask(grid, isovalue)) {
return maskGridPtr;
}
// For any other grid type, return a mask of its active voxels.
auto maskGridPtr = MaskGridT::create(/*background=*/false);
maskGridPtr->setTransform(grid.transform().copy());
maskGridPtr->topologyUnion(grid);
return maskGridPtr;
}
} // namespace mask_internal
template<typename GridType>
inline typename GridType::template ValueConverter<bool>::Type::Ptr
interiorMask(const GridType& grid, const double isovalue)
{
return mask_internal::doInteriorMask(grid, isovalue);
}
////////////////////////////////////////
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED
| 4,060 | C | 31.75 | 92 | 0.72734 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointAdvect.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth, D.J. Hill (openvdb port, added staggered grid support)
///
/// @file tools/PointAdvect.h
///
/// @brief Class PointAdvect advects points (with position) in a static velocity field
#ifndef OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED
#include <openvdb/openvdb.h>
#include <openvdb/math/Math.h> // min
#include <openvdb/Types.h> // Vec3 types and version number
#include <openvdb/Grid.h> // grid
#include <openvdb/util/NullInterrupter.h>
#include "Interpolation.h" // sampling
#include "VelocityFields.h" // VelocityIntegrator
#include <tbb/blocked_range.h> // threading
#include <tbb/parallel_for.h> // threading
#include <tbb/task.h> // for cancel
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// Class that holds a Vec3 grid, to be interpreted as the closest point to a constraint
/// surface. Supports a method to allow a point to be projected onto the closest point
/// on the constraint surface. Uses Caching.
template<typename CptGridT = Vec3fGrid>
class ClosestPointProjector
{
public:
using CptGridType = CptGridT;
using CptAccessor = typename CptGridType::ConstAccessor;
using CptValueType = typename CptGridType::ValueType;
ClosestPointProjector():
mCptIterations(0)
{
}
ClosestPointProjector(const CptGridType& cptGrid, int n):
mCptGrid(&cptGrid),
mCptAccessor(cptGrid.getAccessor()),
mCptIterations(n)
{
}
ClosestPointProjector(const ClosestPointProjector &other):
mCptGrid(other.mCptGrid),
mCptAccessor(mCptGrid->getAccessor()),
mCptIterations(other.mCptIterations)
{
}
void setConstraintIterations(unsigned int cptIterations) { mCptIterations = cptIterations; }
unsigned int numIterations() { return mCptIterations; }
// point constraint
template <typename LocationType>
inline void projectToConstraintSurface(LocationType& W) const
{
/// Entries in the CPT tree are the closest point to the constraint surface.
/// The interpolation step in sample introduces error so that the result
/// of a single sample may not lie exactly on the surface. The iterations
/// in the loop exist to minimize this error.
CptValueType result(W[0], W[1],W[2]);
for (unsigned int i = 0; i < mCptIterations; ++i) {
const Vec3R location = mCptGrid->worldToIndex(Vec3R(result[0], result[1], result[2]));
BoxSampler::sample<CptAccessor>(mCptAccessor, location, result);
}
W[0] = result[0];
W[1] = result[1];
W[2] = result[2];
}
private:
const CptGridType* mCptGrid; // Closest-Point-Transform vector field
CptAccessor mCptAccessor;
unsigned int mCptIterations;
};// end of ClosestPointProjector class
////////////////////////////////////////
/// Performs passive or constrained advection of points in a velocity field
/// represented by an OpenVDB grid and an optional closest-point-transform (CPT)
/// represented in another OpenVDB grid. Note the CPT is assumed to be
/// in world coordinates and NOT index coordinates!
/// Supports both collocated velocity grids and staggered velocity grids
///
/// The @c PointListT template argument refers to any class with the following
/// interface (e.g., std::vector<openvdb::Vec3f>):
/// @code
/// class PointList {
/// ...
/// public:
/// using value_type = internal_vector3_type; // must support [] component access
/// openvdb::Index size() const; // number of points in list
/// value_type& operator[](int n); // world space position of nth point
/// };
/// @endcode
///
/// @note All methods (except size) are assumed to be thread-safe and
/// the positions are returned as non-const references since the
/// advection method needs to modify them!
template<typename GridT = Vec3fGrid,
typename PointListT = std::vector<typename GridT::ValueType>,
bool StaggeredVelocity = false,
typename InterrupterType = util::NullInterrupter>
class PointAdvect
{
public:
using GridType = GridT;
using PointListType = PointListT;
using LocationType = typename PointListT::value_type;
using VelocityFieldIntegrator = VelocityIntegrator<GridT, StaggeredVelocity>;
PointAdvect(const GridT& velGrid, InterrupterType* interrupter = nullptr):
mVelGrid(&velGrid),
mPoints(nullptr),
mIntegrationOrder(1),
mThreaded(true),
mInterrupter(interrupter)
{
}
PointAdvect(const PointAdvect &other) :
mVelGrid(other.mVelGrid),
mPoints(other.mPoints),
mDt(other.mDt),
mAdvIterations(other.mAdvIterations),
mIntegrationOrder(other.mIntegrationOrder),
mThreaded(other.mThreaded),
mInterrupter(other.mInterrupter)
{
}
virtual ~PointAdvect()
{
}
/// If the order of the integration is set to zero no advection is performed
bool earlyOut() const { return (mIntegrationOrder==0);}
/// get & set
void setThreaded(bool threaded) { mThreaded = threaded; }
bool getThreaded() { return mThreaded; }
void setIntegrationOrder(unsigned int order) {mIntegrationOrder = order;}
/// Constrained advection of a list of points over a time = dt * advIterations
void advect(PointListT& points, float dt, unsigned int advIterations = 1)
{
if (this->earlyOut()) return; // nothing to do!
mPoints = &points;
mDt = dt;
mAdvIterations = advIterations;
if (mInterrupter) mInterrupter->start("Advecting points by OpenVDB velocity field: ");
if (mThreaded) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, mPoints->size()), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, mPoints->size()));
}
if (mInterrupter) mInterrupter->end();
}
/// Never call this method directly - it is use by TBB and has to be public!
void operator() (const tbb::blocked_range<size_t> &range) const
{
if (mInterrupter && mInterrupter->wasInterrupted()) {
tbb::task::self().cancel_group_execution();
}
VelocityFieldIntegrator velField(*mVelGrid);
switch (mIntegrationOrder) {
case 1:
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
// loop over number of time steps
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<1>(mDt, X0);
}
}
}
break;
case 2:
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
// loop over number of time steps
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<2>(mDt, X0);
}
}
}
break;
case 3:
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
// loop over number of time steps
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<3>(mDt, X0);
}
}
}
break;
case 4:
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
// loop over number of time steps
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<4>(mDt, X0);
}
}
}
break;
}
}
private:
// the velocity field
const GridType* mVelGrid;
// vertex list of all the points
PointListT* mPoints;
// time integration parameters
float mDt; // time step
unsigned int mAdvIterations; // number of time steps
unsigned int mIntegrationOrder;
// operational parameters
bool mThreaded;
InterrupterType* mInterrupter;
};//end of PointAdvect class
template<typename GridT = Vec3fGrid,
typename PointListT = std::vector<typename GridT::ValueType>,
bool StaggeredVelocity = false,
typename CptGridType = GridT,
typename InterrupterType = util::NullInterrupter>
class ConstrainedPointAdvect
{
public:
using GridType = GridT;
using LocationType = typename PointListT::value_type;
using VelocityIntegratorType = VelocityIntegrator<GridT, StaggeredVelocity>;
using ClosestPointProjectorType = ClosestPointProjector<CptGridType>;
using PointListType = PointListT;
ConstrainedPointAdvect(const GridType& velGrid,
const GridType& cptGrid, int cptn, InterrupterType* interrupter = nullptr):
mVelGrid(&velGrid),
mCptGrid(&cptGrid),
mCptIter(cptn),
mInterrupter(interrupter)
{
}
ConstrainedPointAdvect(const ConstrainedPointAdvect& other):
mVelGrid(other.mVelGrid),
mCptGrid(other.mCptGrid),
mCptIter(other.mCptIter),
mPoints(other.mPoints),
mDt(other.mDt),
mAdvIterations(other.mAdvIterations),
mIntegrationOrder(other.mIntegrationOrder),
mThreaded(other.mThreaded),
mInterrupter(other.mInterrupter)
{
}
virtual ~ConstrainedPointAdvect(){}
void setConstraintIterations(unsigned int cptIter) {mCptIter = cptIter;}
void setIntegrationOrder(unsigned int order) {mIntegrationOrder = order;}
void setThreaded(bool threaded) { mThreaded = threaded; }
bool getThreaded() { return mThreaded; }
/// Constrained Advection a list of points over a time = dt * advIterations
void advect(PointListT& points, float dt, unsigned int advIterations = 1)
{
mPoints = &points;
mDt = dt;
if (mIntegrationOrder==0 && mCptIter == 0) {
return; // nothing to do!
}
(mIntegrationOrder>0) ? mAdvIterations = advIterations : mAdvIterations = 1;
if (mInterrupter) mInterrupter->start("Advecting points by OpenVDB velocity field: ");
const size_t N = mPoints->size();
if (mThreaded) {
tbb::parallel_for(tbb::blocked_range<size_t>(0, N), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, N));
}
if (mInterrupter) mInterrupter->end();
}
/// Never call this method directly - it is use by TBB and has to be public!
void operator() (const tbb::blocked_range<size_t> &range) const
{
if (mInterrupter && mInterrupter->wasInterrupted()) {
tbb::task::self().cancel_group_execution();
}
VelocityIntegratorType velField(*mVelGrid);
ClosestPointProjectorType cptField(*mCptGrid, mCptIter);
switch (mIntegrationOrder) {
case 0://pure CPT projection
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
for (unsigned int i = 0; i < mAdvIterations; ++i) {
cptField.projectToConstraintSurface(X0);
}
}
}
break;
case 1://1'th order advection and CPT projection
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<1>(mDt, X0);
cptField.projectToConstraintSurface(X0);
}
}
}
break;
case 2://2'nd order advection and CPT projection
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<2>(mDt, X0);
cptField.projectToConstraintSurface(X0);
}
}
}
break;
case 3://3'rd order advection and CPT projection
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<3>(mDt, X0);
cptField.projectToConstraintSurface(X0);
}
}
}
break;
case 4://4'th order advection and CPT projection
{
for (size_t n = range.begin(); n != range.end(); ++n) {
LocationType& X0 = (*mPoints)[n];
for (unsigned int i = 0; i < mAdvIterations; ++i) {
velField.template rungeKutta<4>(mDt, X0);
cptField.projectToConstraintSurface(X0);
}
}
}
break;
}
}
private:
const GridType* mVelGrid; // the velocity field
const GridType* mCptGrid;
int mCptIter;
PointListT* mPoints; // vertex list of all the points
// time integration parameters
float mDt; // time step
unsigned int mAdvIterations; // number of time steps
unsigned int mIntegrationOrder; // order of Runge-Kutta integration
// operational parameters
bool mThreaded;
InterrupterType* mInterrupter;
};// end of ConstrainedPointAdvect class
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED
| 14,639 | C | 36.15736 | 98 | 0.574151 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Dense.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file Dense.h
///
/// @brief This file defines a simple dense grid and efficient
/// converters to and from VDB grids.
#ifndef OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/tree/ValueAccessor.h>
#include <openvdb/Exceptions.h>
#include <openvdb/util/Formats.h>
#include "Prune.h"
#include <tbb/parallel_for.h>
#include <iostream>
#include <memory>
#include <string>
#include <utility> // for std::pair
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Populate a dense grid with the values of voxels from a sparse grid,
/// where the sparse grid intersects the dense grid.
/// @param sparse an OpenVDB grid or tree from which to copy values
/// @param dense the dense grid into which to copy values
/// @param serial if false, process voxels in parallel
template<typename DenseT, typename GridOrTreeT>
void
copyToDense(
const GridOrTreeT& sparse,
DenseT& dense,
bool serial = false);
/// @brief Populate a sparse grid with the values of all of the voxels of a dense grid.
/// @param dense the dense grid from which to copy values
/// @param sparse an OpenVDB grid or tree into which to copy values
/// @param tolerance values in the dense grid that are within this tolerance of the sparse
/// grid's background value become inactive background voxels or tiles in the sparse grid
/// @param serial if false, process voxels in parallel
template<typename DenseT, typename GridOrTreeT>
void
copyFromDense(
const DenseT& dense,
GridOrTreeT& sparse,
const typename GridOrTreeT::ValueType& tolerance,
bool serial = false);
////////////////////////////////////////
/// We currently support the following two 3D memory layouts for dense
/// volumes: XYZ, i.e. x is the fastest moving index, and ZYX, i.e. z
/// is the fastest moving index. The ZYX memory layout leads to nested
/// for-loops of the order x, y, z, which we find to be the most
/// intuitive. Hence, ZYX is the layout used throughout VDB. However,
/// other data structures, e.g. Houdini and Maya, employ the XYZ
/// layout. Clearly a dense volume with the ZYX layout converts more
/// efficiently to a VDB, but we support both for convenience.
enum MemoryLayout { LayoutXYZ, LayoutZYX };
/// @brief Base class for Dense which is defined below.
/// @note The constructor of this class is protected to prevent direct
/// instantiation.
template<typename ValueT, MemoryLayout Layout> class DenseBase;
/// @brief Partial template specialization of DenseBase.
/// @note ZYX is the memory-layout in VDB. It leads to nested
/// for-loops of the order x, y, z which we find to be the most intuitive.
template<typename ValueT>
class DenseBase<ValueT, LayoutZYX>
{
public:
/// @brief Return the linear offset into this grid's value array given by
/// unsigned coordinates (i, j, k), i.e., coordinates relative to
/// the origin of this grid's bounding box.
///
/// @warning The input coordinates are assume to be relative to
/// the grid's origin, i.e. minimum of its index bounding box!
inline size_t coordToOffset(size_t i, size_t j, size_t k) const { return i*mX + j*mY + k; }
/// @brief Return the local coordinate corresponding to the specified linear offset.
///
/// @warning The returned coordinate is relative to the origin of this
/// grid's bounding box so add dense.origin() to get absolute coordinates.
inline Coord offsetToLocalCoord(size_t n) const
{
const size_t x = n / mX;
n -= mX*x;
const size_t y = n / mY;
return Coord(Coord::ValueType(x), Coord::ValueType(y), Coord::ValueType(n - mY*y));
}
/// @brief Return the stride of the array in the x direction ( = dimY*dimZ).
/// @note This method is required by both CopyToDense and CopyFromDense.
inline size_t xStride() const { return mX; }
/// @brief Return the stride of the array in the y direction ( = dimZ).
/// @note This method is required by both CopyToDense and CopyFromDense.
inline size_t yStride() const { return mY; }
/// @brief Return the stride of the array in the z direction ( = 1).
/// @note This method is required by both CopyToDense and CopyFromDense.
static size_t zStride() { return 1; }
protected:
/// Protected constructor so as to prevent direct instantiation
DenseBase(const CoordBBox& bbox) : mBBox(bbox), mY(bbox.dim()[2]), mX(mY*bbox.dim()[1]) {}
const CoordBBox mBBox;//signed coordinates of the domain represented by the grid
const size_t mY, mX;//strides in the y and x direction
};// end of DenseBase<ValueT, LayoutZYX>
/// @brief Partial template specialization of DenseBase.
/// @note This is the memory-layout employed in Houdini and Maya. It leads
/// to nested for-loops of the order z, y, x.
template<typename ValueT>
class DenseBase<ValueT, LayoutXYZ>
{
public:
/// @brief Return the linear offset into this grid's value array given by
/// unsigned coordinates (i, j, k), i.e., coordinates relative to
/// the origin of this grid's bounding box.
///
/// @warning The input coordinates are assume to be relative to
/// the grid's origin, i.e. minimum of its index bounding box!
inline size_t coordToOffset(size_t i, size_t j, size_t k) const { return i + j*mY + k*mZ; }
/// @brief Return the index coordinate corresponding to the specified linear offset.
///
/// @warning The returned coordinate is relative to the origin of this
/// grid's bounding box so add dense.origin() to get absolute coordinates.
inline Coord offsetToLocalCoord(size_t n) const
{
const size_t z = n / mZ;
n -= mZ*z;
const size_t y = n / mY;
return Coord(Coord::ValueType(n - mY*y), Coord::ValueType(y), Coord::ValueType(z));
}
/// @brief Return the stride of the array in the x direction ( = 1).
/// @note This method is required by both CopyToDense and CopyFromDense.
static size_t xStride() { return 1; }
/// @brief Return the stride of the array in the y direction ( = dimX).
/// @note This method is required by both CopyToDense and CopyFromDense.
inline size_t yStride() const { return mY; }
/// @brief Return the stride of the array in the y direction ( = dimX*dimY).
/// @note This method is required by both CopyToDense and CopyFromDense.
inline size_t zStride() const { return mZ; }
protected:
/// Protected constructor so as to prevent direct instantiation
DenseBase(const CoordBBox& bbox) : mBBox(bbox), mY(bbox.dim()[0]), mZ(mY*bbox.dim()[1]) {}
const CoordBBox mBBox;//signed coordinates of the domain represented by the grid
const size_t mY, mZ;//strides in the y and z direction
};// end of DenseBase<ValueT, LayoutXYZ>
/// @brief Dense is a simple dense grid API used by the CopyToDense and
/// CopyFromDense classes defined below.
/// @details Use the Dense class to efficiently produce a dense in-memory
/// representation of an OpenVDB grid. However, be aware that a dense grid
/// could have a memory footprint that is orders of magnitude larger than
/// the sparse grid from which it originates.
///
/// @note This class can be used as a simple wrapper for existing dense grid
/// classes if they provide access to the raw data array.
/// @note This implementation allows for the 3D memory layout to be
/// defined by the MemoryLayout template parameter (see above for definition).
/// The default memory layout is ZYX since that's the layout used by OpenVDB grids.
template<typename ValueT, MemoryLayout Layout = LayoutZYX>
class Dense : public DenseBase<ValueT, Layout>
{
public:
using ValueType = ValueT;
using BaseT = DenseBase<ValueT, Layout>;
using Ptr = SharedPtr<Dense>;
using ConstPtr = SharedPtr<const Dense>;
/// @brief Construct a dense grid with a given range of coordinates.
///
/// @param bbox the bounding box of the (signed) coordinate range of this grid
/// @throw ValueError if the bounding box is empty.
/// @note The min and max coordinates of the bounding box are inclusive.
Dense(const CoordBBox& bbox) : BaseT(bbox) { this->init(); }
/// @brief Construct a dense grid with a given range of coordinates and initial value
///
/// @param bbox the bounding box of the (signed) coordinate range of this grid
/// @param value the initial value of the grid.
/// @throw ValueError if the bounding box is empty.
/// @note The min and max coordinates of the bounding box are inclusive.
Dense(const CoordBBox& bbox, const ValueT& value) : BaseT(bbox)
{
this->init();
this->fill(value);
}
/// @brief Construct a dense grid that wraps an external array.
///
/// @param bbox the bounding box of the (signed) coordinate range of this grid
/// @param data a raw C-style array whose size is commensurate with
/// the coordinate domain of @a bbox
///
/// @note The data array is assumed to have a stride of one in the @e z direction.
/// @throw ValueError if the bounding box is empty.
/// @note The min and max coordinates of the bounding box are inclusive.
Dense(const CoordBBox& bbox, ValueT* data) : BaseT(bbox), mData(data)
{
if (BaseT::mBBox.empty()) {
OPENVDB_THROW(ValueError, "can't construct a dense grid with an empty bounding box");
}
}
/// @brief Construct a dense grid with a given origin and dimensions.
///
/// @param dim the desired dimensions of the grid
/// @param min the signed coordinates of the first voxel in the dense grid
/// @throw ValueError if any of the dimensions are zero.
/// @note The @a min coordinate is inclusive, and the max coordinate will be
/// @a min + @a dim - 1.
Dense(const Coord& dim, const Coord& min = Coord(0))
: BaseT(CoordBBox(min, min+dim.offsetBy(-1)))
{
this->init();
}
/// @brief Return the memory layout for this grid (see above for definitions).
static MemoryLayout memoryLayout() { return Layout; }
/// @brief Return a raw pointer to this grid's value array.
/// @note This method is required by CopyToDense.
inline ValueT* data() { return mData; }
/// @brief Return a raw pointer to this grid's value array.
/// @note This method is required by CopyFromDense.
inline const ValueT* data() const { return mData; }
/// @brief Return the bounding box of the signed index domain of this grid.
/// @note This method is required by both CopyToDense and CopyFromDense.
inline const CoordBBox& bbox() const { return BaseT::mBBox; }
/// Return the grid's origin in index coordinates.
inline const Coord& origin() const { return BaseT::mBBox.min(); }
/// @brief Return the number of voxels contained in this grid.
inline Index64 valueCount() const { return BaseT::mBBox.volume(); }
/// @brief Set the value of the voxel at the given array offset.
inline void setValue(size_t offset, const ValueT& value) { mData[offset] = value; }
/// @brief Return a const reference to the value of the voxel at the given array offset.
const ValueT& getValue(size_t offset) const { return mData[offset]; }
/// @brief Return a non-const reference to the value of the voxel at the given array offset.
ValueT& getValue(size_t offset) { return mData[offset]; }
/// @brief Set the value of the voxel at unsigned index coordinates (i, j, k).
/// @note This is somewhat slower than using an array offset.
inline void setValue(size_t i, size_t j, size_t k, const ValueT& value)
{
mData[BaseT::coordToOffset(i,j,k)] = value;
}
/// @brief Return a const reference to the value of the voxel
/// at unsigned index coordinates (i, j, k).
/// @note This is somewhat slower than using an array offset.
inline const ValueT& getValue(size_t i, size_t j, size_t k) const
{
return mData[BaseT::coordToOffset(i,j,k)];
}
/// @brief Return a non-const reference to the value of the voxel
/// at unsigned index coordinates (i, j, k).
/// @note This is somewhat slower than using an array offset.
inline ValueT& getValue(size_t i, size_t j, size_t k)
{
return mData[BaseT::coordToOffset(i,j,k)];
}
/// @brief Set the value of the voxel at the given signed coordinates.
/// @note This is slower than using either an array offset or unsigned index coordinates.
inline void setValue(const Coord& xyz, const ValueT& value)
{
mData[this->coordToOffset(xyz)] = value;
}
/// @brief Return a const reference to the value of the voxel at the given signed coordinates.
/// @note This is slower than using either an array offset or unsigned index coordinates.
inline const ValueT& getValue(const Coord& xyz) const
{
return mData[this->coordToOffset(xyz)];
}
/// @brief Return a non-const reference to the value of the voxel
/// at the given signed coordinates.
/// @note This is slower than using either an array offset or unsigned index coordinates.
inline ValueT& getValue(const Coord& xyz)
{
return mData[this->coordToOffset(xyz)];
}
/// @brief Fill this grid with a constant value.
inline void fill(const ValueT& value)
{
size_t size = this->valueCount();
ValueT* a = mData;
while(size--) *a++ = value;
}
/// @brief Return the linear offset into this grid's value array given by
/// the specified signed coordinates, i.e., coordinates in the space of
/// this grid's bounding box.
///
/// @note This method reflects the fact that we assume the same
/// layout of values as an OpenVDB grid, i.e., the fastest coordinate is @e z.
inline size_t coordToOffset(const Coord& xyz) const
{
assert(BaseT::mBBox.isInside(xyz));
return BaseT::coordToOffset(size_t(xyz[0]-BaseT::mBBox.min()[0]),
size_t(xyz[1]-BaseT::mBBox.min()[1]),
size_t(xyz[2]-BaseT::mBBox.min()[2]));
}
/// @brief Return the global coordinate corresponding to the specified linear offset.
inline Coord offsetToCoord(size_t n) const
{
return this->offsetToLocalCoord(n) + BaseT::mBBox.min();
}
/// @brief Return the memory footprint of this Dense grid in bytes.
inline Index64 memUsage() const
{
return sizeof(*this) + BaseT::mBBox.volume() * sizeof(ValueType);
}
/// @brief Output a human-readable description of this grid to the
/// specified stream.
void print(const std::string& name = "", std::ostream& os = std::cout) const
{
const Coord dim = BaseT::mBBox.dim();
os << "Dense Grid";
if (!name.empty()) os << " \"" << name << "\"";
util::printBytes(os, this->memUsage(), ":\n Memory footprint: ");
os << " Dimensions of grid : " << dim[0] << " x " << dim[1] << " x " << dim[2] << "\n";
os << " Number of voxels: " << util::formattedInt(this->valueCount()) << "\n";
os << " Bounding box of voxels: " << BaseT::mBBox << "\n";
os << " Memory layout: " << (Layout == LayoutZYX ? "ZYX (" : "XYZ (dis")
<< "similar to VDB)\n";
}
private:
/// @brief Private method to initialize the dense value array.
void init()
{
if (BaseT::mBBox.empty()) {
OPENVDB_THROW(ValueError, "can't construct a dense grid with an empty bounding box");
}
mArray.reset(new ValueT[BaseT::mBBox.volume()]);
mData = mArray.get();
}
std::unique_ptr<ValueT[]> mArray;
ValueT* mData;//raw c-style pointer to values
};// end of Dense
////////////////////////////////////////
/// @brief Copy an OpenVDB tree into an existing dense grid.
///
/// @note Only voxels that intersect the dense grid's bounding box are copied
/// from the OpenVDB tree. But both active and inactive voxels are copied,
/// so all existing values in the dense grid are overwritten, regardless of
/// the OpenVDB tree's topology.
template<typename _TreeT, typename _DenseT = Dense<typename _TreeT::ValueType> >
class CopyToDense
{
public:
using DenseT = _DenseT;
using TreeT = _TreeT;
using ValueT = typename TreeT::ValueType;
CopyToDense(const TreeT& tree, DenseT& dense)
: mRoot(&(tree.root())), mDense(&dense) {}
void copy(bool serial = false) const
{
if (serial) {
mRoot->copyToDense(mDense->bbox(), *mDense);
} else {
tbb::parallel_for(mDense->bbox(), *this);
}
}
/// @brief Public method called by tbb::parallel_for
void operator()(const CoordBBox& bbox) const
{
mRoot->copyToDense(bbox, *mDense);
}
private:
const typename TreeT::RootNodeType* mRoot;
DenseT* mDense;
};// CopyToDense
// Convenient wrapper function for the CopyToDense class
template<typename DenseT, typename GridOrTreeT>
void
copyToDense(const GridOrTreeT& sparse, DenseT& dense, bool serial)
{
using Adapter = TreeAdapter<GridOrTreeT>;
using TreeT = typename Adapter::TreeType;
CopyToDense<TreeT, DenseT> op(Adapter::constTree(sparse), dense);
op.copy(serial);
}
////////////////////////////////////////
/// @brief Copy the values from a dense grid into an OpenVDB tree.
///
/// @details Values in the dense grid that are within a tolerance of
/// the background value are truncated to inactive background voxels or tiles.
/// This allows the tree to form a sparse representation of the dense grid.
///
/// @note Since this class allocates leaf nodes concurrently it is recommended
/// to use a scalable implementation of @c new like the one provided by TBB,
/// rather than the mutex-protected standard library @c new.
template<typename _TreeT, typename _DenseT = Dense<typename _TreeT::ValueType> >
class CopyFromDense
{
public:
using DenseT = _DenseT;
using TreeT = _TreeT;
using ValueT = typename TreeT::ValueType;
using LeafT = typename TreeT::LeafNodeType;
using AccessorT = tree::ValueAccessor<TreeT>;
CopyFromDense(const DenseT& dense, TreeT& tree, const ValueT& tolerance)
: mDense(&dense),
mTree(&tree),
mBlocks(nullptr),
mTolerance(tolerance),
mAccessor(tree.empty() ? nullptr : new AccessorT(tree))
{
}
CopyFromDense(const CopyFromDense& other)
: mDense(other.mDense),
mTree(other.mTree),
mBlocks(other.mBlocks),
mTolerance(other.mTolerance),
mAccessor(other.mAccessor.get() == nullptr ? nullptr : new AccessorT(*mTree))
{
}
/// @brief Copy values from the dense grid to the sparse tree.
void copy(bool serial = false)
{
mBlocks = new std::vector<Block>();
const CoordBBox& bbox = mDense->bbox();
// Pre-process: Construct a list of blocks aligned with (potential) leaf nodes
for (CoordBBox sub=bbox; sub.min()[0] <= bbox.max()[0]; sub.min()[0] = sub.max()[0] + 1) {
for (sub.min()[1] = bbox.min()[1]; sub.min()[1] <= bbox.max()[1];
sub.min()[1] = sub.max()[1] + 1)
{
for (sub.min()[2] = bbox.min()[2]; sub.min()[2] <= bbox.max()[2];
sub.min()[2] = sub.max()[2] + 1)
{
sub.max() = Coord::minComponent(bbox.max(),
(sub.min()&(~(LeafT::DIM-1u))).offsetBy(LeafT::DIM-1u));
mBlocks->push_back(Block(sub));
}
}
}
// Multi-threaded process: Convert dense grid into leaf nodes and tiles
if (serial) {
(*this)(tbb::blocked_range<size_t>(0, mBlocks->size()));
} else {
tbb::parallel_for(tbb::blocked_range<size_t>(0, mBlocks->size()), *this);
}
// Post-process: Insert leaf nodes and tiles into the tree, and prune the tiles only!
tree::ValueAccessor<TreeT> acc(*mTree);
for (size_t m=0, size = mBlocks->size(); m<size; ++m) {
Block& block = (*mBlocks)[m];
if (block.leaf) {
acc.addLeaf(block.leaf);
} else if (block.tile.second) {//only background tiles are inactive
acc.addTile(1, block.bbox.min(), block.tile.first, true);//leaf tile
}
}
delete mBlocks;
mBlocks = nullptr;
tools::pruneTiles(*mTree, mTolerance);//multi-threaded
}
/// @brief Public method called by tbb::parallel_for
/// @warning Never call this method directly!
void operator()(const tbb::blocked_range<size_t> &r) const
{
assert(mBlocks);
LeafT* leaf = new LeafT();
for (size_t m=r.begin(), n=0, end = r.end(); m != end; ++m, ++n) {
Block& block = (*mBlocks)[m];
const CoordBBox &bbox = block.bbox;
if (mAccessor.get() == nullptr) {//i.e. empty target tree
leaf->fill(mTree->background(), false);
} else {//account for existing leaf nodes in the target tree
if (const LeafT* target = mAccessor->probeConstLeaf(bbox.min())) {
(*leaf) = (*target);
} else {
ValueT value = zeroVal<ValueT>();
bool state = mAccessor->probeValue(bbox.min(), value);
leaf->fill(value, state);
}
}
leaf->copyFromDense(bbox, *mDense, mTree->background(), mTolerance);
if (!leaf->isConstant(block.tile.first, block.tile.second, mTolerance)) {
leaf->setOrigin(bbox.min() & (~(LeafT::DIM - 1)));
block.leaf = leaf;
leaf = new LeafT();
}
}// loop over blocks
delete leaf;
}
private:
struct Block {
CoordBBox bbox;
LeafT* leaf;
std::pair<ValueT, bool> tile;
Block(const CoordBBox& b) : bbox(b), leaf(nullptr) {}
};
const DenseT* mDense;
TreeT* mTree;
std::vector<Block>* mBlocks;
ValueT mTolerance;
std::unique_ptr<AccessorT> mAccessor;
};// CopyFromDense
// Convenient wrapper function for the CopyFromDense class
template<typename DenseT, typename GridOrTreeT>
void
copyFromDense(const DenseT& dense, GridOrTreeT& sparse,
const typename GridOrTreeT::ValueType& tolerance, bool serial)
{
using Adapter = TreeAdapter<GridOrTreeT>;
using TreeT = typename Adapter::TreeType;
CopyFromDense<TreeT, DenseT> op(dense, Adapter::tree(sparse), tolerance);
op.copy(serial);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED
| 23,080 | C | 38.590051 | 99 | 0.639688 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ParticlesToLevelSet.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @author Ken Museth
///
/// @file tools/ParticlesToLevelSet.h
///
/// @brief Rasterize particles with position, radius and velocity
/// into either a boolean mask grid or a narrow-band level set grid.
///
/// @details Optionally, arbitrary attributes on the particles can be transferred,
/// resulting in additional output grids with the same topology as the main grid.
///
/// @note Particle to level set conversion is intended to be combined with
/// some kind of surface postprocessing, using
/// @vdblink::tools::LevelSetFilter LevelSetFilter@endlink, for example.
/// Without such postprocessing the generated surface is typically too noisy and blobby.
/// However, it serves as a great and fast starting point for subsequent
/// level set surface processing and convolution.
///
/// @details For particle access, any class with the following interface may be used
/// (see the unit test or the From Particles Houdini SOP for practical examples):
/// @code
/// struct ParticleList
/// {
/// // Return the total number of particles in the list.
/// // Always required!
/// size_t size() const;
///
/// // Get the world-space position of the nth particle.
/// // Required by rasterizeSpheres().
/// void getPos(size_t n, Vec3R& xyz) const;
///
/// // Get the world-space position and radius of the nth particle.
/// // Required by rasterizeSpheres().
/// void getPosRad(size_t n, Vec3R& xyz, Real& radius) const;
///
/// // Get the world-space position, radius and velocity of the nth particle.
/// // Required by rasterizeTrails().
/// void getPosRadVel(size_t n, Vec3R& xyz, Real& radius, Vec3R& velocity) const;
///
/// // Get the value of the nth particle's user-defined attribute (of type @c AttributeType).
/// // Required only if attribute transfer is enabled in ParticlesToLevelSet.
/// void getAtt(size_t n, AttributeType& att) const;
/// };
/// @endcode
///
/// Some functions accept an interrupter argument. This refers to any class
/// with the following interface:
/// @code
/// struct Interrupter
/// {
/// void start(const char* name = nullptr) // called when computations begin
/// void end() // called when computations end
/// bool wasInterrupted(int percent=-1) // return true to abort computation
/// };
/// @endcode
///
/// The default interrupter is @vdblink::util::NullInterrupter NullInterrupter@endlink,
/// for which all calls are no-ops that incur no computational overhead.
#ifndef OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED
#include <tbb/parallel_reduce.h>
#include <tbb/blocked_range.h>
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Transform.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/util/logging.h>
#include <openvdb/util/NullInterrupter.h>
#include "Composite.h" // for csgUnion()
#include "PointPartitioner.h"
#include "Prune.h"
#include "SignedFloodFill.h"
#include <functional>
#include <iostream>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Populate a scalar, floating-point grid with CSG-unioned level set spheres
/// described by the given particle positions and radii.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particlesToSdf(const ParticleListT&, GridT&, InterrupterT* = nullptr);
/// @brief Populate a scalar, floating-point grid with fixed-size, CSG-unioned
/// level set spheres described by the given particle positions and the specified radius.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particlesToSdf(const ParticleListT&, GridT&, Real radius, InterrupterT* = nullptr);
/// @brief Populate a scalar, floating-point grid with CSG-unioned trails
/// of level set spheres with decreasing radius, where the starting position and radius
/// and the direction of each trail is given by particle attributes.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
/// @note The @a delta parameter controls the distance between spheres in a trail.
/// Be careful not to use too small a value.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particleTrailsToSdf(const ParticleListT&, GridT&, Real delta=1, InterrupterT* =nullptr);
/// @brief Activate a boolean grid wherever it intersects the spheres
/// described by the given particle positions and radii.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particlesToMask(const ParticleListT&, GridT&, InterrupterT* = nullptr);
/// @brief Activate a boolean grid wherever it intersects the fixed-size spheres
/// described by the given particle positions and the specified radius.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particlesToMask(const ParticleListT&, GridT&, Real radius, InterrupterT* = nullptr);
/// @brief Activate a boolean grid wherever it intersects trails of spheres
/// with decreasing radius, where the starting position and radius and the direction
/// of each trail is given by particle attributes.
/// @details For more control over the output, including attribute transfer,
/// use the ParticlesToLevelSet class directly.
/// @note The @a delta parameter controls the distance between spheres in a trail.
/// Be careful not to use too small a value.
template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter>
inline void particleTrailsToMask(const ParticleListT&, GridT&,Real delta=1,InterrupterT* =nullptr);
////////////////////////////////////////
namespace p2ls_internal {
// This is a simple type that combines a distance value and a particle
// attribute. It's required for attribute transfer which is performed
// in the ParticlesToLevelSet::Raster member class defined below.
/// @private
template<typename VisibleT, typename BlindT> class BlindData;
}
template<typename SdfGridT,
typename AttributeT = void,
typename InterrupterT = util::NullInterrupter>
class ParticlesToLevelSet
{
public:
using DisableT = typename std::is_void<AttributeT>::type;
using InterrupterType = InterrupterT;
using SdfGridType = SdfGridT;
using SdfType = typename SdfGridT::ValueType;
using AttType = typename std::conditional<DisableT::value, size_t, AttributeT>::type;
using AttGridType = typename SdfGridT::template ValueConverter<AttType>::Type;
static const bool OutputIsMask = std::is_same<SdfType, bool>::value;
/// @brief Constructor using an existing boolean or narrow-band level set grid
///
/// @param grid grid into which particles are rasterized
/// @param interrupt callback to interrupt a long-running process
///
/// @details If the input grid is already populated with signed distances,
/// particles are unioned onto the existing level set surface.
///
/// @details The width in voxel units of the generated narrow band level set
/// is given by 2×<I>background</I>/<I>dx</I>, where @a background
/// is the background value stored in the grid and @a dx is the voxel size
/// derived from the transform associated with the grid.
/// Also note that −<I>background</I> corresponds to the constant value
/// inside the generated narrow-band level set.
///
/// @note If attribute transfer is enabled, i.e., if @c AttributeT is not @c void,
/// attributes are generated only for voxels that overlap with particles,
/// not for any other preexisting voxels (for which no attributes exist!).
explicit ParticlesToLevelSet(SdfGridT& grid, InterrupterT* interrupt = nullptr);
~ParticlesToLevelSet() { delete mBlindGrid; }
/// @brief This method syncs up the level set and attribute grids
/// and therefore needs to be called before any of those grids are
/// used and after the last call to any of the rasterizer methods.
/// @details It has no effect or overhead if attribute transfer is disabled
/// (i.e., if @c AttributeT is @c void) and @a prune is @c false.
///
/// @note Avoid calling this method more than once, and call it only after
/// all the particles have been rasterized.
void finalize(bool prune = false);
/// @brief Return a pointer to the grid containing the optional user-defined attribute.
/// @warning If attribute transfer is disabled (i.e., if @c AttributeT is @c void)
/// or if @link finalize() finalize@endlink is not called, the pointer will be null.
typename AttGridType::Ptr attributeGrid() { return mAttGrid; }
/// @brief Return the size of a voxel in world units.
Real getVoxelSize() const { return mDx; }
/// @brief Return the half-width of the narrow band in voxel units.
Real getHalfWidth() const { return mHalfWidth; }
/// @brief Return the smallest radius allowed in voxel units.
Real getRmin() const { return mRmin; }
/// @brief Set the smallest radius allowed in voxel units.
void setRmin(Real Rmin) { mRmin = math::Max(Real(0),Rmin); }
/// @brief Return the largest radius allowed in voxel units.
Real getRmax() const { return mRmax; }
/// @brief Set the largest radius allowed in voxel units.
void setRmax(Real Rmax) { mRmax = math::Max(mRmin,Rmax); }
/// @brief Return @c true if any particles were ignored due to their size.
bool ignoredParticles() const { return mMinCount>0 || mMaxCount>0; }
/// @brief Return the number of particles that were ignored because they were
/// smaller than the minimum radius.
size_t getMinCount() const { return mMinCount; }
/// @brief Return the number of particles that were ignored because they were
/// larger than the maximum radius.
size_t getMaxCount() const { return mMaxCount; }
/// @brief Return the grain size used for threading
int getGrainSize() const { return mGrainSize; }
/// @brief Set the grain size used for threading.
/// @note A grain size of zero or less disables threading.
void setGrainSize(int grainSize) { mGrainSize = grainSize; }
/// @brief Rasterize each particle as a sphere with the particle's position and radius.
/// @details For level set output, all spheres are CSG-unioned.
template<typename ParticleListT>
void rasterizeSpheres(const ParticleListT& pa);
/// @brief Rasterize each particle as a sphere with the particle's position
/// and a fixed radius.
/// @details For level set output, all spheres are CSG-unioned.
///
/// @param pa particles with positions
/// @param radius fixed sphere radius in world units.
template<typename ParticleListT>
void rasterizeSpheres(const ParticleListT& pa, Real radius);
/// @brief Rasterize each particle as a trail comprising the CSG union
/// of spheres of decreasing radius.
///
/// @param pa particles with position, radius and velocity.
/// @param delta controls the distance between sphere instances
///
/// @warning Be careful not to use too small values for @a delta,
/// since this can lead to excessive computation per trail (which the
/// interrupter can't stop).
///
/// @note The direction of a trail is opposite to that of the velocity vector,
/// and its length is given by the magnitude of the velocity.
/// The radius at the head of the trail is given by the radius of the particle,
/// and the radius at the tail is @a Rmin voxel units, which has
/// a default value of 1.5 corresponding to the Nyquist frequency!
template<typename ParticleListT>
void rasterizeTrails(const ParticleListT& pa, Real delta=1.0);
private:
using BlindType = p2ls_internal::BlindData<SdfType, AttType>;
using BlindGridType = typename SdfGridT::template ValueConverter<BlindType>::Type;
/// Class with multi-threaded implementation of particle rasterization
template<typename ParticleListT, typename GridT> struct Raster;
SdfGridType* mSdfGrid;
typename AttGridType::Ptr mAttGrid;
BlindGridType* mBlindGrid;
InterrupterT* mInterrupter;
Real mDx, mHalfWidth;
Real mRmin, mRmax; // ignore particles outside this range of radii in voxel
size_t mMinCount, mMaxCount; // counters for ignored particles
int mGrainSize;
}; // class ParticlesToLevelSet
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
inline ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::
ParticlesToLevelSet(SdfGridT& grid, InterrupterT* interrupter) :
mSdfGrid(&grid),
mBlindGrid(nullptr),
mInterrupter(interrupter),
mDx(grid.voxelSize()[0]),
mHalfWidth(grid.background()/mDx),
mRmin(1.5),// corresponds to the Nyquist grid sampling frequency
mRmax(100.0),// corresponds to a huge particle (probably too large!)
mMinCount(0),
mMaxCount(0),
mGrainSize(1)
{
if (!mSdfGrid->hasUniformVoxels()) {
OPENVDB_THROW(RuntimeError, "ParticlesToLevelSet only supports uniform voxels!");
}
if (!DisableT::value) {
mBlindGrid = new BlindGridType(BlindType(grid.background()));
mBlindGrid->setTransform(mSdfGrid->transform().copy());
}
}
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
template<typename ParticleListT>
inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::
rasterizeSpheres(const ParticleListT& pa)
{
if (DisableT::value) {
Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa);
r.rasterizeSpheres();
} else {
Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa);
r.rasterizeSpheres();
}
}
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
template<typename ParticleListT>
inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::
rasterizeSpheres(const ParticleListT& pa, Real radius)
{
if (DisableT::value) {
Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa);
r.rasterizeSpheres(radius/mDx);
} else {
Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa);
r.rasterizeSpheres(radius/mDx);
}
}
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
template<typename ParticleListT>
inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::
rasterizeTrails(const ParticleListT& pa, Real delta)
{
if (DisableT::value) {
Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa);
r.rasterizeTrails(delta);
} else {
Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa);
r.rasterizeTrails(delta);
}
}
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
inline void
ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::finalize(bool prune)
{
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
if (!mBlindGrid) {
if (prune) {
if (OutputIsMask) {
tools::prune(mSdfGrid->tree());
} else {
tools::pruneLevelSet(mSdfGrid->tree());
}
}
return;
}
if (prune) tools::prune(mBlindGrid->tree());
using AttTreeT = typename AttGridType::TreeType;
using AttLeafT = typename AttTreeT::LeafNodeType;
using BlindTreeT = typename BlindGridType::TreeType;
using BlindLeafIterT = typename BlindTreeT::LeafCIter;
using BlindLeafT = typename BlindTreeT::LeafNodeType;
using SdfTreeT = typename SdfGridType::TreeType;
using SdfLeafT = typename SdfTreeT::LeafNodeType;
// Use topology copy constructors since output grids have the same topology as mBlindDataGrid
const BlindTreeT& blindTree = mBlindGrid->tree();
// Create the output attribute grid.
typename AttTreeT::Ptr attTree(new AttTreeT(
blindTree, blindTree.background().blind(), openvdb::TopologyCopy()));
// Note this overwrites any existing attribute grids!
mAttGrid = typename AttGridType::Ptr(new AttGridType(attTree));
mAttGrid->setTransform(mBlindGrid->transform().copy());
typename SdfTreeT::Ptr sdfTree; // the output mask or level set tree
// Extract the attribute grid and the mask or level set grid from mBlindDataGrid.
if (OutputIsMask) {
sdfTree.reset(new SdfTreeT(blindTree,
/*off=*/SdfType(0), /*on=*/SdfType(1), TopologyCopy()));
// Copy leaf voxels in parallel.
tree::LeafManager<AttTreeT> leafNodes(*attTree);
leafNodes.foreach([&](AttLeafT& attLeaf, size_t /*leafIndex*/) {
if (const auto* blindLeaf = blindTree.probeConstLeaf(attLeaf.origin())) {
for (auto iter = attLeaf.beginValueOn(); iter; ++iter) {
const auto pos = iter.pos();
attLeaf.setValueOnly(pos, blindLeaf->getValue(pos).blind());
}
}
});
// Copy tiles serially.
const auto blindAcc = mBlindGrid->getConstAccessor();
auto iter = attTree->beginValueOn();
iter.setMaxDepth(AttTreeT::ValueOnIter::LEAF_DEPTH - 1);
for ( ; iter; ++iter) {
iter.modifyValue([&](AttType& v) { v = blindAcc.getValue(iter.getCoord()).blind(); });
}
} else {
// Here we exploit the fact that by design level sets have no active tiles.
// Only leaf voxels can be active.
sdfTree.reset(new SdfTreeT(blindTree, blindTree.background().visible(), TopologyCopy()));
for (BlindLeafIterT n = blindTree.cbeginLeaf(); n; ++n) {
const BlindLeafT& leaf = *n;
const openvdb::Coord xyz = leaf.origin();
// Get leafnodes that were allocated during topology construction!
SdfLeafT* sdfLeaf = sdfTree->probeLeaf(xyz);
AttLeafT* attLeaf = attTree->probeLeaf(xyz);
// Use linear offset (vs coordinate) access for better performance!
typename BlindLeafT::ValueOnCIter m=leaf.cbeginValueOn();
if (!m) {//no active values in leaf node so copy everything
for (openvdb::Index k = 0; k!=BlindLeafT::SIZE; ++k) {
const BlindType& v = leaf.getValue(k);
sdfLeaf->setValueOnly(k, v.visible());
attLeaf->setValueOnly(k, v.blind());
}
} else {//only copy active values (using flood fill for the inactive values)
for(; m; ++m) {
const openvdb::Index k = m.pos();
const BlindType& v = *m;
sdfLeaf->setValueOnly(k, v.visible());
attLeaf->setValueOnly(k, v.blind());
}
}
}
tools::signedFloodFill(*sdfTree);//required since we only transferred active voxels!
}
if (mSdfGrid->empty()) {
mSdfGrid->setTree(sdfTree);
} else {
if (OutputIsMask) {
mSdfGrid->tree().topologyUnion(*sdfTree);
tools::prune(mSdfGrid->tree());
} else {
tools::csgUnion(mSdfGrid->tree(), *sdfTree, /*prune=*/true);
}
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
///////////////////////////////////////////////////////////
template<typename SdfGridT, typename AttributeT, typename InterrupterT>
template<typename ParticleListT, typename GridT>
struct ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::Raster
{
using DisableT = typename std::is_void<AttributeT>::type;
using ParticlesToLevelSetT = ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>;
using SdfT = typename ParticlesToLevelSetT::SdfType; // type of signed distance values
using AttT = typename ParticlesToLevelSetT::AttType; // type of particle attribute
using ValueT = typename GridT::ValueType;
using AccessorT = typename GridT::Accessor;
using TreeT = typename GridT::TreeType;
using LeafNodeT = typename TreeT::LeafNodeType;
using PointPartitionerT = PointPartitioner<Index32, LeafNodeT::LOG2DIM>;
static const bool
OutputIsMask = std::is_same<SdfT, bool>::value,
DoAttrXfer = !DisableT::value;
/// @brief Main constructor
Raster(ParticlesToLevelSetT& parent, GridT* grid, const ParticleListT& particles)
: mParent(parent)
, mParticles(particles)
, mGrid(grid)
, mMap(*(mGrid->transform().baseMap()))
, mMinCount(0)
, mMaxCount(0)
, mIsCopy(false)
{
mPointPartitioner = new PointPartitionerT;
mPointPartitioner->construct(particles, mGrid->transform());
}
/// @brief Copy constructor called by tbb threads
Raster(Raster& other, tbb::split)
: mParent(other.mParent)
, mParticles(other.mParticles)
, mGrid(new GridT(*other.mGrid, openvdb::ShallowCopy()))
, mMap(other.mMap)
, mMinCount(0)
, mMaxCount(0)
, mTask(other.mTask)
, mIsCopy(true)
, mPointPartitioner(other.mPointPartitioner)
{
mGrid->newTree();
}
virtual ~Raster()
{
// Copy-constructed Rasters own temporary grids that have to be deleted,
// while the original has ownership of the bucket array.
if (mIsCopy) {
delete mGrid;
} else {
delete mPointPartitioner;
}
}
void rasterizeSpheres()
{
mMinCount = mMaxCount = 0;
if (mParent.mInterrupter) {
mParent.mInterrupter->start("Rasterizing particles to level set using spheres");
}
mTask = std::bind(&Raster::rasterSpheres, std::placeholders::_1, std::placeholders::_2);
this->cook();
if (mParent.mInterrupter) mParent.mInterrupter->end();
}
void rasterizeSpheres(Real radius)
{
mMinCount = radius < mParent.mRmin ? mParticles.size() : 0;
mMaxCount = radius > mParent.mRmax ? mParticles.size() : 0;
if (mMinCount>0 || mMaxCount>0) {//skipping all particles!
mParent.mMinCount = mMinCount;
mParent.mMaxCount = mMaxCount;
} else {
if (mParent.mInterrupter) {
mParent.mInterrupter->start(
"Rasterizing particles to level set using const spheres");
}
mTask = std::bind(&Raster::rasterFixedSpheres,
std::placeholders::_1, std::placeholders::_2, radius);
this->cook();
if (mParent.mInterrupter) mParent.mInterrupter->end();
}
}
void rasterizeTrails(Real delta=1.0)
{
mMinCount = mMaxCount = 0;
if (mParent.mInterrupter) {
mParent.mInterrupter->start("Rasterizing particles to level set using trails");
}
mTask = std::bind(&Raster::rasterTrails,
std::placeholders::_1, std::placeholders::_2, delta);
this->cook();
if (mParent.mInterrupter) mParent.mInterrupter->end();
}
/// @brief Kick off the optionally multithreaded computation.
void operator()(const tbb::blocked_range<size_t>& r)
{
assert(mTask);
mTask(this, r);
mParent.mMinCount = mMinCount;
mParent.mMaxCount = mMaxCount;
}
/// @brief Required by tbb::parallel_reduce
void join(Raster& other)
{
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
if (OutputIsMask) {
if (DoAttrXfer) {
tools::compMax(*mGrid, *other.mGrid);
} else {
mGrid->topologyUnion(*other.mGrid);
}
} else {
tools::csgUnion(*mGrid, *other.mGrid, /*prune=*/true);
}
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
mMinCount += other.mMinCount;
mMaxCount += other.mMaxCount;
}
private:
/// Disallow assignment since some of the members are references
Raster& operator=(const Raster&) { return *this; }
/// @return true if the particle is too small or too large
bool ignoreParticle(Real R)
{
if (R < mParent.mRmin) {// below the cutoff radius
++mMinCount;
return true;
}
if (R > mParent.mRmax) {// above the cutoff radius
++mMaxCount;
return true;
}
return false;
}
/// @brief Threaded rasterization of particles as spheres with variable radius
/// @param r range of indices into the list of particles
void rasterSpheres(const tbb::blocked_range<size_t>& r)
{
AccessorT acc = mGrid->getAccessor(); // local accessor
bool run = true;
const Real invDx = 1 / mParent.mDx;
AttT att;
Vec3R pos;
Real rad;
// Loop over buckets
for (size_t n = r.begin(), N = r.end(); n < N; ++n) {
// Loop over particles in bucket n.
typename PointPartitionerT::IndexIterator iter = mPointPartitioner->indices(n);
for ( ; run && iter; ++iter) {
const Index32& id = *iter;
mParticles.getPosRad(id, pos, rad);
const Real R = invDx * rad;// in voxel units
if (this->ignoreParticle(R)) continue;
const Vec3R P = mMap.applyInverseMap(pos);
this->getAtt<DisableT>(id, att);
run = this->makeSphere(P, R, att, acc);
}//end loop over particles
}//end loop over buckets
}
/// @brief Threaded rasterization of particles as spheres with a fixed radius
/// @param r range of indices into the list of particles
/// @param R radius of fixed-size spheres
void rasterFixedSpheres(const tbb::blocked_range<size_t>& r, Real R)
{
AccessorT acc = mGrid->getAccessor(); // local accessor
AttT att;
Vec3R pos;
// Loop over buckets
for (size_t n = r.begin(), N = r.end(); n < N; ++n) {
// Loop over particles in bucket n.
for (auto iter = mPointPartitioner->indices(n); iter; ++iter) {
const Index32& id = *iter;
this->getAtt<DisableT>(id, att);
mParticles.getPos(id, pos);
const Vec3R P = mMap.applyInverseMap(pos);
this->makeSphere(P, R, att, acc);
}
}
}
/// @brief Threaded rasterization of particles as spheres with velocity trails
/// @param r range of indices into the list of particles
/// @param delta inter-sphere spacing
void rasterTrails(const tbb::blocked_range<size_t>& r, Real delta)
{
AccessorT acc = mGrid->getAccessor(); // local accessor
bool run = true;
AttT att;
Vec3R pos, vel;
Real rad;
const Vec3R origin = mMap.applyInverseMap(Vec3R(0,0,0));
const Real Rmin = mParent.mRmin, invDx = 1 / mParent.mDx;
// Loop over buckets
for (size_t n = r.begin(), N = r.end(); n < N; ++n) {
// Loop over particles in bucket n.
typename PointPartitionerT::IndexIterator iter = mPointPartitioner->indices(n);
for ( ; run && iter; ++iter) {
const Index32& id = *iter;
mParticles.getPosRadVel(id, pos, rad, vel);
const Real R0 = invDx * rad;
if (this->ignoreParticle(R0)) continue;
this->getAtt<DisableT>(id, att);
const Vec3R P0 = mMap.applyInverseMap(pos);
const Vec3R V = mMap.applyInverseMap(vel) - origin; // exclude translation
const Real speed = V.length(), invSpeed = 1.0 / speed;
const Vec3R Nrml = -V * invSpeed; // inverse normalized direction
Vec3R P = P0; // local position of instance
Real R = R0, d = 0; // local radius and length of trail
for (size_t m = 0; run && d <= speed ; ++m) {
run = this->makeSphere(P, R, att, acc);
P += 0.5 * delta * R * Nrml; // adaptive offset along inverse velocity direction
d = (P - P0).length(); // current length of trail
R = R0 - (R0 - Rmin) * d * invSpeed; // R = R0 -> mRmin(e.g. 1.5)
}//end loop over sphere instances
}//end loop over particles
}//end loop over buckets
}
void cook()
{
// parallelize over the point buckets
const Index32 bucketCount = Index32(mPointPartitioner->size());
if (mParent.mGrainSize>0) {
tbb::parallel_reduce(
tbb::blocked_range<size_t>(0, bucketCount, mParent.mGrainSize), *this);
} else {
(*this)(tbb::blocked_range<size_t>(0, bucketCount));
}
}
/// @brief Rasterize sphere at position P and radius R into
/// a narrow-band level set with half-width, mHalfWidth.
/// @return @c false if rasterization was interrupted
///
/// @param P coordinates of the particle position in voxel units
/// @param R radius of particle in voxel units
/// @param att an optional user-defined attribute value to be associated with voxels
/// @param acc grid accessor with a private copy of the grid
///
/// @note For best performance all computations are performed in voxel space,
/// with the important exception of the final level set value that is converted
/// to world units (the grid stores the closest Euclidean signed distances
/// measured in world units). Also note we use the convention of positive distances
/// outside the surface and negative distances inside the surface.
template <bool IsMaskT = OutputIsMask>
typename std::enable_if<!IsMaskT, bool>::type
makeSphere(const Vec3R& P, Real R, const AttT& att, AccessorT& acc)
{
const Real
dx = mParent.mDx,
w = mParent.mHalfWidth,
max = R + w, // maximum distance in voxel units
max2 = math::Pow2(max), // square of maximum distance in voxel units
min2 = math::Pow2(math::Max(Real(0), R - w)); // square of minimum distance
// Bounding box of the sphere
const Coord
lo(math::Floor(P[0]-max),math::Floor(P[1]-max),math::Floor(P[2]-max)),
hi(math::Ceil( P[0]+max),math::Ceil( P[1]+max),math::Ceil( P[2]+max));
const ValueT inside = -mGrid->background();
ValueT v;
size_t count = 0;
for (Coord c = lo; c.x() <= hi.x(); ++c.x()) {
//only check interrupter every 32'th scan in x
if (!(count++ & ((1<<5)-1)) && util::wasInterrupted(mParent.mInterrupter)) {
tbb::task::self().cancel_group_execution();
return false;
}
const Real x2 = math::Pow2(c.x() - P[0]);
for (c.y() = lo.y(); c.y() <= hi.y(); ++c.y()) {
const Real x2y2 = x2 + math::Pow2(c.y() - P[1]);
for (c.z() = lo.z(); c.z() <= hi.z(); ++c.z()) {
const Real x2y2z2 = x2y2 + math::Pow2(c.z()-P[2]); // squared dist from c to P
#if defined __INTEL_COMPILER
_Pragma("warning (push)")
_Pragma("warning (disable:186)") // "pointless comparison of unsigned integer with zero"
#endif
if (x2y2z2 >= max2 || (!acc.probeValue(c, v) && (v < ValueT(0))))
continue;//outside narrow band of the particle or inside existing level set
#if defined __INTEL_COMPILER
_Pragma("warning (pop)")
#endif
if (x2y2z2 <= min2) {//inside narrow band of the particle.
acc.setValueOff(c, inside);
continue;
}
// convert signed distance from voxel units to world units
//const ValueT d=dx*(math::Sqrt(x2y2z2) - R);
const ValueT d = Merge(static_cast<SdfT>(dx*(math::Sqrt(x2y2z2)-R)), att);
if (d < v) acc.setValue(c, d);//CSG union
}//end loop over z
}//end loop over y
}//end loop over x
return true;
}
/// @brief Rasterize a sphere of radius @a r at position @a p into a boolean mask grid.
/// @return @c false if rasterization was interrupted
template <bool IsMaskT = OutputIsMask>
typename std::enable_if<IsMaskT, bool>::type
makeSphere(const Vec3R& p, Real r, const AttT& att, AccessorT& acc)
{
const Real
rSquared = r * r, // sphere radius squared, in voxel units
inW = r / math::Sqrt(6.0); // half the width in voxel units of an inscribed cube
const Coord
// Bounding box of the sphere
outLo(math::Floor(p[0] - r), math::Floor(p[1] - r), math::Floor(p[2] - r)),
outHi(math::Ceil(p[0] + r), math::Ceil(p[1] + r), math::Ceil(p[2] + r)),
// Bounds of the inscribed cube
inLo(math::Ceil(p[0] - inW), math::Ceil(p[1] - inW), math::Ceil(p[2] - inW)),
inHi(math::Floor(p[0] + inW), math::Floor(p[1] + inW), math::Floor(p[2] + inW));
// Bounding boxes of regions comprising out - in
/// @todo These could be divided further into sparsely- and densely-filled subregions.
const std::vector<CoordBBox> padding{
CoordBBox(outLo.x(), outLo.y(), outLo.z(), inLo.x()-1, outHi.y(), outHi.z()),
CoordBBox(inHi.x()+1, outLo.y(), outLo.z(), outHi.x(), outHi.y(), outHi.z()),
CoordBBox(outLo.x(), outLo.y(), outLo.z(), outHi.x(), inLo.y()-1, outHi.z()),
CoordBBox(outLo.x(), inHi.y()+1, outLo.z(), outHi.x(), outHi.y(), outHi.z()),
CoordBBox(outLo.x(), outLo.y(), outLo.z(), outHi.x(), outHi.y(), inLo.z()-1),
CoordBBox(outLo.x(), outLo.y(), inHi.z()+1, outHi.x(), outHi.y(), outHi.z()),
};
const ValueT onValue = Merge(SdfT(1), att);
// Sparsely fill the inscribed cube.
/// @todo Use sparse fill only if 2r > leaf width?
acc.tree().sparseFill(CoordBBox(inLo, inHi), onValue);
// Densely fill the remaining regions.
for (const auto& bbox: padding) {
if (util::wasInterrupted(mParent.mInterrupter)) {
tbb::task::self().cancel_group_execution();
return false;
}
const Coord &bmin = bbox.min(), &bmax = bbox.max();
Coord c;
Real cx, cy, cz;
for (c = bmin, cx = c.x(); c.x() <= bmax.x(); ++c.x(), cx += 1) {
const Real x2 = math::Pow2(cx - p[0]);
for (c.y() = bmin.y(), cy = c.y(); c.y() <= bmax.y(); ++c.y(), cy += 1) {
const Real x2y2 = x2 + math::Pow2(cy - p[1]);
for (c.z() = bmin.z(), cz = c.z(); c.z() <= bmax.z(); ++c.z(), cz += 1) {
const Real x2y2z2 = x2y2 + math::Pow2(cz - p[2]);
if (x2y2z2 < rSquared) {
acc.setValue(c, onValue);
}
}
}
}
}
return true;
}
using FuncType = typename std::function<void (Raster*, const tbb::blocked_range<size_t>&)>;
template<typename DisableType>
typename std::enable_if<DisableType::value>::type
getAtt(size_t, AttT&) const {}
template<typename DisableType>
typename std::enable_if<!DisableType::value>::type
getAtt(size_t n, AttT& a) const { mParticles.getAtt(n, a); }
template<typename T>
typename std::enable_if<std::is_same<T, ValueT>::value, ValueT>::type
Merge(T s, const AttT&) const { return s; }
template<typename T>
typename std::enable_if<!std::is_same<T, ValueT>::value, ValueT>::type
Merge(T s, const AttT& a) const { return ValueT(s,a); }
ParticlesToLevelSetT& mParent;
const ParticleListT& mParticles;//list of particles
GridT* mGrid;
const math::MapBase& mMap;
size_t mMinCount, mMaxCount;//counters for ignored particles!
FuncType mTask;
const bool mIsCopy;
PointPartitionerT* mPointPartitioner;
}; // struct ParticlesToLevelSet::Raster
///////////////////// YOU CAN SAFELY IGNORE THIS SECTION /////////////////////
namespace p2ls_internal {
// This is a simple type that combines a distance value and a particle
// attribute. It's required for attribute transfer which is defined in the
// Raster class above.
/// @private
template<typename VisibleT, typename BlindT>
class BlindData
{
public:
using type = VisibleT;
using VisibleType = VisibleT;
using BlindType = BlindT;
BlindData() {}
explicit BlindData(VisibleT v) : mVisible(v), mBlind(zeroVal<BlindType>()) {}
BlindData(VisibleT v, BlindT b) : mVisible(v), mBlind(b) {}
BlindData(const BlindData&) = default;
BlindData& operator=(const BlindData&) = default;
const VisibleT& visible() const { return mVisible; }
const BlindT& blind() const { return mBlind; }
OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN
bool operator==(const BlindData& rhs) const { return mVisible == rhs.mVisible; }
OPENVDB_NO_FP_EQUALITY_WARNING_END
bool operator< (const BlindData& rhs) const { return mVisible < rhs.mVisible; }
bool operator> (const BlindData& rhs) const { return mVisible > rhs.mVisible; }
BlindData operator+(const BlindData& rhs) const { return BlindData(mVisible + rhs.mVisible); }
BlindData operator-(const BlindData& rhs) const { return BlindData(mVisible - rhs.mVisible); }
BlindData operator-() const { return BlindData(-mVisible, mBlind); }
protected:
VisibleT mVisible;
BlindT mBlind;
};
/// @private
// Required by several of the tree nodes
template<typename VisibleT, typename BlindT>
inline std::ostream& operator<<(std::ostream& ostr, const BlindData<VisibleT, BlindT>& rhs)
{
ostr << rhs.visible();
return ostr;
}
/// @private
// Required by math::Abs
template<typename VisibleT, typename BlindT>
inline BlindData<VisibleT, BlindT> Abs(const BlindData<VisibleT, BlindT>& x)
{
return BlindData<VisibleT, BlindT>(math::Abs(x.visible()), x.blind());
}
/// @private
// Required to support the (zeroVal<BlindData>() + val) idiom.
template<typename VisibleT, typename BlindT, typename T>
inline BlindData<VisibleT, BlindT>
operator+(const BlindData<VisibleT, BlindT>& x, const T& rhs)
{
return BlindData<VisibleT, BlindT>(x.visible() + static_cast<VisibleT>(rhs), x.blind());
}
} // namespace p2ls_internal
//////////////////////////////////////////////////////////////////////////////
// The following are convenience functions for common use cases.
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particlesToSdf(const ParticleListT& plist, GridT& grid, InterrupterT* interrupt)
{
static_assert(std::is_floating_point<typename GridT::ValueType>::value,
"particlesToSdf requires an SDF grid with floating-point values");
if (grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;"
" try Grid::setGridClass(openvdb::GRID_LEVEL_SET)");
}
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeSpheres(plist);
tools::pruneLevelSet(grid.tree());
}
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particlesToSdf(const ParticleListT& plist, GridT& grid, Real radius, InterrupterT* interrupt)
{
static_assert(std::is_floating_point<typename GridT::ValueType>::value,
"particlesToSdf requires an SDF grid with floating-point values");
if (grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;"
" try Grid::setGridClass(openvdb::GRID_LEVEL_SET)");
}
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeSpheres(plist, radius);
tools::pruneLevelSet(grid.tree());
}
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particleTrailsToSdf(const ParticleListT& plist, GridT& grid, Real delta, InterrupterT* interrupt)
{
static_assert(std::is_floating_point<typename GridT::ValueType>::value,
"particleTrailsToSdf requires an SDF grid with floating-point values");
if (grid.getGridClass() != GRID_LEVEL_SET) {
OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;"
" try Grid::setGridClass(openvdb::GRID_LEVEL_SET)");
}
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeTrails(plist, delta);
tools::pruneLevelSet(grid.tree());
}
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particlesToMask(const ParticleListT& plist, GridT& grid, InterrupterT* interrupt)
{
static_assert(std::is_same<bool, typename GridT::ValueType>::value,
"particlesToMask requires a boolean-valued grid");
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeSpheres(plist);
tools::prune(grid.tree());
}
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particlesToMask(const ParticleListT& plist, GridT& grid, Real radius, InterrupterT* interrupt)
{
static_assert(std::is_same<bool, typename GridT::ValueType>::value,
"particlesToMask requires a boolean-valued grid");
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeSpheres(plist, radius);
tools::prune(grid.tree());
}
template<typename GridT, typename ParticleListT, typename InterrupterT>
inline void
particleTrailsToMask(const ParticleListT& plist, GridT& grid, Real delta, InterrupterT* interrupt)
{
static_assert(std::is_same<bool, typename GridT::ValueType>::value,
"particleTrailsToMask requires a boolean-valued grid");
ParticlesToLevelSet<GridT> p2ls(grid, interrupt);
p2ls.rasterizeTrails(plist, delta);
tools::prune(grid.tree());
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED
| 42,950 | C | 41.150147 | 100 | 0.64163 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointIndexGrid.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file PointIndexGrid.h
///
/// @brief Space-partitioning acceleration structure for points. Partitions
/// the points into voxels to accelerate range and nearest neighbor
/// searches.
///
/// @note Leaf nodes store a single point-index array and the voxels are only
/// integer offsets into that array. The actual points are never stored
/// in the acceleration structure, only offsets into an external array.
///
/// @author Mihai Alden
#ifndef OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED
#include "PointPartitioner.h"
#include <openvdb/version.h>
#include <openvdb/Exceptions.h>
#include <openvdb/Grid.h>
#include <openvdb/Types.h>
#include <openvdb/math/Transform.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/tree/LeafNode.h>
#include <openvdb/tree/Tree.h>
#include <tbb/atomic.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <algorithm> // for std::min(), std::max()
#include <cmath> // for std::sqrt()
#include <deque>
#include <iostream>
#include <type_traits> // for std::is_same
#include <utility> // for std::pair
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tree {
template<Index, typename> struct SameLeafConfig; // forward declaration
}
namespace tools {
template<typename T, Index Log2Dim> struct PointIndexLeafNode; // forward declaration
/// Point index tree configured to match the default OpenVDB tree configuration
using PointIndexTree = tree::Tree<tree::RootNode<tree::InternalNode<tree::InternalNode
<PointIndexLeafNode<PointIndex32, 3>, 4>, 5>>>;
/// Point index grid
using PointIndexGrid = Grid<PointIndexTree>;
////////////////////////////////////////
/// @interface PointArray
/// Expected interface for the PointArray container:
/// @code
/// template<typename VectorType>
/// struct PointArray
/// {
/// // The type used to represent world-space point positions
/// using PosType = VectorType;
///
/// // Return the number of points in the array
/// size_t size() const;
///
/// // Return the world-space position of the nth point in the array.
/// void getPos(size_t n, PosType& xyz) const;
/// };
/// @endcode
////////////////////////////////////////
/// @brief Partition points into a point index grid to accelerate range and
/// nearest-neighbor searches.
///
/// @param points world-space point array conforming to the PointArray interface
/// @param voxelSize voxel size in world units
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
createPointIndexGrid(const PointArrayT& points, double voxelSize);
/// @brief Partition points into a point index grid to accelerate range and
/// nearest-neighbor searches.
///
/// @param points world-space point array conforming to the PointArray interface
/// @param xform world-to-index-space transform
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
createPointIndexGrid(const PointArrayT& points, const math::Transform& xform);
/// @brief Return @c true if the given point index grid represents a valid partitioning
/// of the given point array.
///
/// @param points world-space point array conforming to the PointArray interface
/// @param grid point index grid to validate
template<typename PointArrayT, typename GridT>
inline bool
isValidPartition(const PointArrayT& points, const GridT& grid);
/// Repartition the @a points if needed, otherwise return the input @a grid.
template<typename GridT, typename PointArrayT>
inline typename GridT::ConstPtr
getValidPointIndexGrid(const PointArrayT& points, const typename GridT::ConstPtr& grid);
/// Repartition the @a points if needed, otherwise return the input @a grid.
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
getValidPointIndexGrid(const PointArrayT& points, const typename GridT::Ptr& grid);
////////////////////////////////////////
/// Accelerated range and nearest-neighbor searches for point index grids
template<typename TreeType = PointIndexTree>
struct PointIndexIterator
{
using ConstAccessor = tree::ValueAccessor<const TreeType>;
using LeafNodeType = typename TreeType::LeafNodeType;
using ValueType = typename TreeType::ValueType;
PointIndexIterator();
PointIndexIterator(const PointIndexIterator& rhs);
PointIndexIterator& operator=(const PointIndexIterator& rhs);
/// @brief Construct an iterator over the indices of the points contained in voxel (i, j, k).
/// @param ijk the voxel containing the points over which to iterate
/// @param acc an accessor for the grid or tree that holds the point indices
PointIndexIterator(const Coord& ijk, ConstAccessor& acc);
/// @brief Construct an iterator over the indices of the points contained in
/// the given bounding box.
/// @param bbox the bounding box of the voxels containing the points over which to iterate
/// @param acc an accessor for the grid or tree that holds the point indices
/// @note The range of the @a bbox is inclusive. Thus, a bounding box with
/// min = max is not empty but rather encloses a single voxel.
PointIndexIterator(const CoordBBox& bbox, ConstAccessor& acc);
/// @brief Clear the iterator and update it with the result of the given voxel query.
/// @param ijk the voxel containing the points over which to iterate
/// @param acc an accessor for the grid or tree that holds the point indices
void searchAndUpdate(const Coord& ijk, ConstAccessor& acc);
/// @brief Clear the iterator and update it with the result of the given voxel region query.
/// @param bbox the bounding box of the voxels containing the points over which to iterate
/// @param acc an accessor for the grid or tree that holds the point indices
/// @note The range of the @a bbox is inclusive. Thus, a bounding box with
/// min = max is not empty but rather encloses a single voxel.
void searchAndUpdate(const CoordBBox& bbox, ConstAccessor& acc);
/// @brief Clear the iterator and update it with the result of the given
/// index-space bounding box query.
/// @param bbox index-space bounding box
/// @param acc an accessor for the grid or tree that holds the point indices
/// @param points world-space point array conforming to the PointArray interface
/// @param xform linear, uniform-scale transform (i.e., cubical voxels)
template<typename PointArray>
void searchAndUpdate(const BBoxd& bbox, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform);
/// @brief Clear the iterator and update it with the result of the given
/// index-space radial query.
/// @param center index-space center
/// @param radius index-space radius
/// @param acc an accessor for the grid or tree that holds the point indices
/// @param points world-space point array conforming to the PointArray interface
/// @param xform linear, uniform-scale transform (i.e., cubical voxels)
/// @param subvoxelAccuracy if true, check individual points against the search region,
/// otherwise return all points that reside in voxels that are inside
/// or intersect the search region
template<typename PointArray>
void searchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy = true);
/// @brief Clear the iterator and update it with the result of the given
/// world-space bounding box query.
/// @param bbox world-space bounding box
/// @param acc an accessor for the grid or tree that holds the point indices
/// @param points world-space point array conforming to the PointArray interface
/// @param xform linear, uniform-scale transform (i.e., cubical voxels)
template<typename PointArray>
void worldSpaceSearchAndUpdate(const BBoxd& bbox, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform);
/// @brief Clear the iterator and update it with the result of the given
/// world-space radial query.
/// @param center world-space center
/// @param radius world-space radius
/// @param acc an accessor for the grid or tree that holds the point indices
/// @param points world-space point array conforming to the PointArray interface
/// @param xform linear, uniform-scale transform (i.e., cubical voxels)
/// @param subvoxelAccuracy if true, check individual points against the search region,
/// otherwise return all points that reside in voxels that are inside
/// or intersect the search region
template<typename PointArray>
void worldSpaceSearchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy = true);
/// Reset the iterator to point to the first item.
void reset();
/// Return a const reference to the item to which this iterator is pointing.
const ValueType& operator*() const { return *mRange.first; }
/// @{
/// @brief Return @c true if this iterator is not yet exhausted.
bool test() const { return mRange.first < mRange.second || mIter != mRangeList.end(); }
operator bool() const { return this->test(); }
/// @}
/// Advance iterator to next item.
void increment();
/// Advance iterator to next item.
void operator++() { this->increment(); }
/// @brief Advance iterator to next item.
/// @return @c true if this iterator is not yet exhausted.
bool next();
/// Return the number of point indices in the iterator range.
size_t size() const;
/// Return @c true if both iterators point to the same element.
bool operator==(const PointIndexIterator& p) const { return mRange.first == p.mRange.first; }
bool operator!=(const PointIndexIterator& p) const { return !this->operator==(p); }
private:
using Range = std::pair<const ValueType*, const ValueType*>;
using RangeDeque = std::deque<Range>;
using RangeDequeCIter = typename RangeDeque::const_iterator;
using IndexArray = std::unique_ptr<ValueType[]>;
void clear();
// Primary index collection
Range mRange;
RangeDeque mRangeList;
RangeDequeCIter mIter;
// Secondary index collection
IndexArray mIndexArray;
size_t mIndexArraySize;
}; // struct PointIndexIterator
/// @brief Selectively extract and filter point data using a custom filter operator.
///
/// @par FilterType example:
/// @interface FilterType
/// @code
/// template<typename T>
/// struct WeightedAverageAccumulator {
/// using ValueType = T;
///
/// WeightedAverageAccumulator(T const * const array, const T radius)
/// : mValues(array), mInvRadius(1.0/radius), mWeightSum(0.0), mValueSum(0.0) {}
///
/// void reset() { mWeightSum = mValueSum = T(0.0); }
///
/// // the following method is invoked by the PointIndexFilter
/// void operator()(const T distSqr, const size_t pointIndex) {
/// const T weight = T(1.0) - openvdb::math::Sqrt(distSqr) * mInvRadius;
/// mWeightSum += weight;
/// mValueSum += weight * mValues[pointIndex];
/// }
///
/// T result() const { return mWeightSum > T(0.0) ? mValueSum / mWeightSum : T(0.0); }
///
/// private:
/// T const * const mValues;
/// const T mInvRadius;
/// T mWeightSum, mValueSum;
/// }; // struct WeightedAverageAccumulator
/// @endcode
template<typename PointArray, typename TreeType = PointIndexTree>
struct PointIndexFilter
{
using PosType = typename PointArray::PosType;
using ScalarType = typename PosType::value_type;
using ConstAccessor = tree::ValueAccessor<const TreeType>;
/// @brief Constructor
/// @param points world-space point array conforming to the PointArray interface
/// @param tree a point index tree
/// @param xform linear, uniform-scale transform (i.e., cubical voxels)
PointIndexFilter(const PointArray& points, const TreeType& tree, const math::Transform& xform);
/// Thread safe copy constructor
PointIndexFilter(const PointIndexFilter& rhs);
/// @brief Perform a radial search query and apply the given filter
/// operator to the selected points.
/// @param center world-space center
/// @param radius world-space radius
/// @param op custom filter operator (see the FilterType example for interface details)
template<typename FilterType>
void searchAndApply(const PosType& center, ScalarType radius, FilterType& op);
private:
PointArray const * const mPoints;
ConstAccessor mAcc;
const math::Transform mXform;
const ScalarType mInvVoxelSize;
PointIndexIterator<TreeType> mIter;
}; // struct PointIndexFilter
////////////////////////////////////////
// Internal operators and implementation details
namespace point_index_grid_internal {
template<typename PointArrayT>
struct ValidPartitioningOp
{
ValidPartitioningOp(tbb::atomic<bool>& hasChanged,
const PointArrayT& points, const math::Transform& xform)
: mPoints(&points)
, mTransform(&xform)
, mHasChanged(&hasChanged)
{
}
template <typename LeafT>
void operator()(LeafT &leaf, size_t /*leafIndex*/) const
{
if ((*mHasChanged)) {
tbb::task::self().cancel_group_execution();
return;
}
using IndexArrayT = typename LeafT::IndexArray;
using IndexT = typename IndexArrayT::value_type;
using PosType = typename PointArrayT::PosType;
typename LeafT::ValueOnCIter iter;
Coord voxelCoord;
PosType point;
const IndexT
*begin = static_cast<IndexT*>(nullptr),
*end = static_cast<IndexT*>(nullptr);
for (iter = leaf.cbeginValueOn(); iter; ++iter) {
if ((*mHasChanged)) break;
voxelCoord = iter.getCoord();
leaf.getIndices(iter.pos(), begin, end);
while (begin < end) {
mPoints->getPos(*begin, point);
if (voxelCoord != mTransform->worldToIndexCellCentered(point)) {
mHasChanged->fetch_and_store(true);
break;
}
++begin;
}
}
}
private:
PointArrayT const * const mPoints;
math::Transform const * const mTransform;
tbb::atomic<bool> * const mHasChanged;
};
template<typename LeafNodeT>
struct PopulateLeafNodesOp
{
using IndexT = uint32_t;
using Partitioner = PointPartitioner<IndexT, LeafNodeT::LOG2DIM>;
PopulateLeafNodesOp(std::unique_ptr<LeafNodeT*[]>& leafNodes,
const Partitioner& partitioner)
: mLeafNodes(leafNodes.get())
, mPartitioner(&partitioner)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
using VoxelOffsetT = typename Partitioner::VoxelOffsetType;
size_t maxPointCount = 0;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
maxPointCount = std::max(maxPointCount, mPartitioner->indices(n).size());
}
const IndexT voxelCount = LeafNodeT::SIZE;
// allocate histogram buffers
std::unique_ptr<VoxelOffsetT[]> offsets{new VoxelOffsetT[maxPointCount]};
std::unique_ptr<IndexT[]> histogram{new IndexT[voxelCount]};
VoxelOffsetT const * const voxelOffsets = mPartitioner->voxelOffsets().get();
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
LeafNodeT* node = new LeafNodeT();
node->setOrigin(mPartitioner->origin(n));
typename Partitioner::IndexIterator it = mPartitioner->indices(n);
const size_t pointCount = it.size();
IndexT const * const indices = &*it;
// local copy of voxel offsets.
for (IndexT i = 0; i < pointCount; ++i) {
offsets[i] = voxelOffsets[ indices[i] ];
}
// compute voxel-offset histogram
memset(&histogram[0], 0, voxelCount * sizeof(IndexT));
for (IndexT i = 0; i < pointCount; ++i) {
++histogram[ offsets[i] ];
}
typename LeafNodeT::NodeMaskType& mask = node->getValueMask();
typename LeafNodeT::Buffer& buffer = node->buffer();
// scan histogram (all-prefix-sums)
IndexT count = 0, startOffset;
for (int i = 0; i < int(voxelCount); ++i) {
if (histogram[i] > 0) {
startOffset = count;
count += histogram[i];
histogram[i] = startOffset;
mask.setOn(i);
}
buffer.setValue(i, count);
}
// allocate point-index array
node->indices().resize(pointCount);
typename LeafNodeT::ValueType * const orderedIndices = node->indices().data();
// rank and permute
for (IndexT i = 0; i < pointCount; ++i) {
orderedIndices[ histogram[ offsets[i] ]++ ] = indices[i];
}
mLeafNodes[n] = node;
}
}
//////////
LeafNodeT* * const mLeafNodes;
Partitioner const * const mPartitioner;
};
/// Construct a @c PointIndexTree
template<typename TreeType, typename PointArray>
inline void
constructPointTree(TreeType& tree, const math::Transform& xform, const PointArray& points)
{
using LeafType = typename TreeType::LeafNodeType;
std::unique_ptr<LeafType*[]> leafNodes;
size_t leafNodeCount = 0;
{
// Important: Do not disable the cell-centered transform in the PointPartitioner.
// This interpretation is assumed in the PointIndexGrid and all related
// search algorithms.
PointPartitioner<uint32_t, LeafType::LOG2DIM> partitioner;
partitioner.construct(points, xform, /*voxelOrder=*/false, /*recordVoxelOffsets=*/true);
if (!partitioner.usingCellCenteredTransform()) {
OPENVDB_THROW(LookupError, "The PointIndexGrid requires a "
"cell-centered transform.");
}
leafNodeCount = partitioner.size();
leafNodes.reset(new LeafType*[leafNodeCount]);
const tbb::blocked_range<size_t> range(0, leafNodeCount);
tbb::parallel_for(range, PopulateLeafNodesOp<LeafType>(leafNodes, partitioner));
}
tree::ValueAccessor<TreeType> acc(tree);
for (size_t n = 0; n < leafNodeCount; ++n) {
acc.addLeaf(leafNodes[n]);
}
}
////////////////////////////////////////
template<typename T>
inline void
dequeToArray(const std::deque<T>& d, std::unique_ptr<T[]>& a, size_t& size)
{
size = d.size();
a.reset(new T[size]);
typename std::deque<T>::const_iterator it = d.begin(), itEnd = d.end();
T* item = a.get();
for ( ; it != itEnd; ++it, ++item) *item = *it;
}
inline void
constructExclusiveRegions(std::vector<CoordBBox>& regions,
const CoordBBox& bbox, const CoordBBox& ibox)
{
regions.clear();
regions.reserve(6);
Coord cmin = ibox.min();
Coord cmax = ibox.max();
// left-face bbox
regions.push_back(bbox);
regions.back().max().z() = cmin.z();
// right-face bbox
regions.push_back(bbox);
regions.back().min().z() = cmax.z();
--cmax.z(); // accounting for cell centered bucketing.
++cmin.z();
// front-face bbox
regions.push_back(bbox);
CoordBBox* lastRegion = ®ions.back();
lastRegion->min().z() = cmin.z();
lastRegion->max().z() = cmax.z();
lastRegion->max().x() = cmin.x();
// back-face bbox
regions.push_back(*lastRegion);
lastRegion = ®ions.back();
lastRegion->min().x() = cmax.x();
lastRegion->max().x() = bbox.max().x();
--cmax.x();
++cmin.x();
// bottom-face bbox
regions.push_back(*lastRegion);
lastRegion = ®ions.back();
lastRegion->min().x() = cmin.x();
lastRegion->max().x() = cmax.x();
lastRegion->max().y() = cmin.y();
// top-face bbox
regions.push_back(*lastRegion);
lastRegion = ®ions.back();
lastRegion->min().y() = cmax.y();
lastRegion->max().y() = bbox.max().y();
}
template<typename PointArray, typename IndexT>
struct BBoxFilter
{
using PosType = typename PointArray::PosType;
using ScalarType = typename PosType::value_type;
using Range = std::pair<const IndexT*, const IndexT*>;
using RangeDeque = std::deque<Range>;
using IndexDeque = std::deque<IndexT>;
BBoxFilter(RangeDeque& ranges, IndexDeque& indices, const BBoxd& bbox,
const PointArray& points, const math::Transform& xform)
: mRanges(ranges)
, mIndices(indices)
, mRegion(bbox)
, mPoints(points)
, mMap(*xform.baseMap())
{
}
template <typename LeafNodeType>
void filterLeafNode(const LeafNodeType& leaf)
{
typename LeafNodeType::ValueOnCIter iter;
const IndexT
*begin = static_cast<IndexT*>(nullptr),
*end = static_cast<IndexT*>(nullptr);
for (iter = leaf.cbeginValueOn(); iter; ++iter) {
leaf.getIndices(iter.pos(), begin, end);
filterVoxel(iter.getCoord(), begin, end);
}
}
void filterVoxel(const Coord&, const IndexT* begin, const IndexT* end)
{
PosType vec;
for (; begin < end; ++begin) {
mPoints.getPos(*begin, vec);
if (mRegion.isInside(mMap.applyInverseMap(vec))) {
mIndices.push_back(*begin);
}
}
}
private:
RangeDeque& mRanges;
IndexDeque& mIndices;
const BBoxd mRegion;
const PointArray& mPoints;
const math::MapBase& mMap;
};
template<typename PointArray, typename IndexT>
struct RadialRangeFilter
{
using PosType = typename PointArray::PosType;
using ScalarType = typename PosType::value_type;
using Range = std::pair<const IndexT*, const IndexT*>;
using RangeDeque = std::deque<Range>;
using IndexDeque = std::deque<IndexT>;
RadialRangeFilter(RangeDeque& ranges, IndexDeque& indices, const Vec3d& xyz, double radius,
const PointArray& points, const math::Transform& xform,
const double leafNodeDim, const bool subvoxelAccuracy)
: mRanges(ranges)
, mIndices(indices)
, mCenter(xyz)
, mWSCenter(xform.indexToWorld(xyz))
, mVoxelDist1(ScalarType(0.0))
, mVoxelDist2(ScalarType(0.0))
, mLeafNodeDist1(ScalarType(0.0))
, mLeafNodeDist2(ScalarType(0.0))
, mWSRadiusSqr(ScalarType(radius * xform.voxelSize()[0]))
, mPoints(points)
, mSubvoxelAccuracy(subvoxelAccuracy)
{
const ScalarType voxelRadius = ScalarType(std::sqrt(3.0) * 0.5);
mVoxelDist1 = voxelRadius + ScalarType(radius);
mVoxelDist1 *= mVoxelDist1;
if (radius > voxelRadius) {
mVoxelDist2 = ScalarType(radius) - voxelRadius;
mVoxelDist2 *= mVoxelDist2;
}
const ScalarType leafNodeRadius = ScalarType(leafNodeDim * std::sqrt(3.0) * 0.5);
mLeafNodeDist1 = leafNodeRadius + ScalarType(radius);
mLeafNodeDist1 *= mLeafNodeDist1;
if (radius > leafNodeRadius) {
mLeafNodeDist2 = ScalarType(radius) - leafNodeRadius;
mLeafNodeDist2 *= mLeafNodeDist2;
}
mWSRadiusSqr *= mWSRadiusSqr;
}
template <typename LeafNodeType>
void filterLeafNode(const LeafNodeType& leaf)
{
{
const Coord& ijk = leaf.origin();
PosType vec;
vec[0] = ScalarType(ijk[0]);
vec[1] = ScalarType(ijk[1]);
vec[2] = ScalarType(ijk[2]);
vec += ScalarType(LeafNodeType::DIM - 1) * 0.5;
vec -= mCenter;
const ScalarType dist = vec.lengthSqr();
if (dist > mLeafNodeDist1) return;
if (mLeafNodeDist2 > 0.0 && dist < mLeafNodeDist2) {
const IndexT* begin = &leaf.indices().front();
mRanges.push_back(Range(begin, begin + leaf.indices().size()));
return;
}
}
typename LeafNodeType::ValueOnCIter iter;
const IndexT
*begin = static_cast<IndexT*>(nullptr),
*end = static_cast<IndexT*>(nullptr);
for (iter = leaf.cbeginValueOn(); iter; ++iter) {
leaf.getIndices(iter.pos(), begin, end);
filterVoxel(iter.getCoord(), begin, end);
}
}
void filterVoxel(const Coord& ijk, const IndexT* begin, const IndexT* end)
{
PosType vec;
{
vec[0] = mCenter[0] - ScalarType(ijk[0]);
vec[1] = mCenter[1] - ScalarType(ijk[1]);
vec[2] = mCenter[2] - ScalarType(ijk[2]);
const ScalarType dist = vec.lengthSqr();
if (dist > mVoxelDist1) return;
if (!mSubvoxelAccuracy || (mVoxelDist2 > 0.0 && dist < mVoxelDist2)) {
if (!mRanges.empty() && mRanges.back().second == begin) {
mRanges.back().second = end;
} else {
mRanges.push_back(Range(begin, end));
}
return;
}
}
while (begin < end) {
mPoints.getPos(*begin, vec);
vec = mWSCenter - vec;
if (vec.lengthSqr() < mWSRadiusSqr) {
mIndices.push_back(*begin);
}
++begin;
}
}
private:
RangeDeque& mRanges;
IndexDeque& mIndices;
const PosType mCenter, mWSCenter;
ScalarType mVoxelDist1, mVoxelDist2, mLeafNodeDist1, mLeafNodeDist2, mWSRadiusSqr;
const PointArray& mPoints;
const bool mSubvoxelAccuracy;
}; // struct RadialRangeFilter
////////////////////////////////////////
template<typename RangeFilterType, typename LeafNodeType>
inline void
filteredPointIndexSearchVoxels(RangeFilterType& filter,
const LeafNodeType& leaf, const Coord& min, const Coord& max)
{
using PointIndexT = typename LeafNodeType::ValueType;
Index xPos(0), yPos(0), pos(0);
Coord ijk(0);
const PointIndexT* dataPtr = &leaf.indices().front();
PointIndexT beginOffset, endOffset;
for (ijk[0] = min[0]; ijk[0] <= max[0]; ++ijk[0]) {
xPos = (ijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM);
for (ijk[1] = min[1]; ijk[1] <= max[1]; ++ijk[1]) {
yPos = xPos + ((ijk[1] & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM);
for (ijk[2] = min[2]; ijk[2] <= max[2]; ++ijk[2]) {
pos = yPos + (ijk[2] & (LeafNodeType::DIM - 1u));
beginOffset = (pos == 0 ? PointIndexT(0) : leaf.getValue(pos - 1));
endOffset = leaf.getValue(pos);
if (endOffset > beginOffset) {
filter.filterVoxel(ijk, dataPtr + beginOffset, dataPtr + endOffset);
}
}
}
}
}
template<typename RangeFilterType, typename ConstAccessor>
inline void
filteredPointIndexSearch(RangeFilterType& filter, ConstAccessor& acc, const CoordBBox& bbox)
{
using LeafNodeType = typename ConstAccessor::TreeType::LeafNodeType;
Coord ijk(0), ijkMax(0), ijkA(0), ijkB(0);
const Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1);
const Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1);
for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) {
for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) {
for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) {
if (const LeafNodeType* leaf = acc.probeConstLeaf(ijk)) {
ijkMax = ijk;
ijkMax.offset(LeafNodeType::DIM - 1);
// intersect leaf bbox with search region.
ijkA = Coord::maxComponent(bbox.min(), ijk);
ijkB = Coord::minComponent(bbox.max(), ijkMax);
if (ijkA != ijk || ijkB != ijkMax) {
filteredPointIndexSearchVoxels(filter, *leaf, ijkA, ijkB);
} else { // leaf bbox is inside the search region
filter.filterLeafNode(*leaf);
}
}
}
}
}
}
////////////////////////////////////////
template<typename RangeDeque, typename LeafNodeType>
inline void
pointIndexSearchVoxels(RangeDeque& rangeList,
const LeafNodeType& leaf, const Coord& min, const Coord& max)
{
using PointIndexT = typename LeafNodeType::ValueType;
using IntT = typename PointIndexT::IntType;
using Range = typename RangeDeque::value_type;
Index xPos(0), pos(0), zStride = Index(max[2] - min[2]);
const PointIndexT* dataPtr = &leaf.indices().front();
PointIndexT beginOffset(0), endOffset(0),
previousOffset(static_cast<IntT>(leaf.indices().size() + 1u));
Coord ijk(0);
for (ijk[0] = min[0]; ijk[0] <= max[0]; ++ijk[0]) {
xPos = (ijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM);
for (ijk[1] = min[1]; ijk[1] <= max[1]; ++ijk[1]) {
pos = xPos + ((ijk[1] & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM);
pos += (min[2] & (LeafNodeType::DIM - 1u));
beginOffset = (pos == 0 ? PointIndexT(0) : leaf.getValue(pos - 1));
endOffset = leaf.getValue(pos+zStride);
if (endOffset > beginOffset) {
if (beginOffset == previousOffset) {
rangeList.back().second = dataPtr + endOffset;
} else {
rangeList.push_back(Range(dataPtr + beginOffset, dataPtr + endOffset));
}
previousOffset = endOffset;
}
}
}
}
template<typename RangeDeque, typename ConstAccessor>
inline void
pointIndexSearch(RangeDeque& rangeList, ConstAccessor& acc, const CoordBBox& bbox)
{
using LeafNodeType = typename ConstAccessor::TreeType::LeafNodeType;
using PointIndexT = typename LeafNodeType::ValueType;
using Range = typename RangeDeque::value_type;
Coord ijk(0), ijkMax(0), ijkA(0), ijkB(0);
const Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1);
const Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1);
for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) {
for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) {
for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) {
if (const LeafNodeType* leaf = acc.probeConstLeaf(ijk)) {
ijkMax = ijk;
ijkMax.offset(LeafNodeType::DIM - 1);
// intersect leaf bbox with search region.
ijkA = Coord::maxComponent(bbox.min(), ijk);
ijkB = Coord::minComponent(bbox.max(), ijkMax);
if (ijkA != ijk || ijkB != ijkMax) {
pointIndexSearchVoxels(rangeList, *leaf, ijkA, ijkB);
} else {
// leaf bbox is inside the search region, add all indices.
const PointIndexT* begin = &leaf->indices().front();
rangeList.push_back(Range(begin, (begin + leaf->indices().size())));
}
}
}
}
}
}
} // namespace point_index_grid_internal
// PointIndexIterator implementation
template<typename TreeType>
inline
PointIndexIterator<TreeType>::PointIndexIterator()
: mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr))
, mRangeList()
, mIter(mRangeList.begin())
, mIndexArray()
, mIndexArraySize(0)
{
}
template<typename TreeType>
inline
PointIndexIterator<TreeType>::PointIndexIterator(const PointIndexIterator& rhs)
: mRange(rhs.mRange)
, mRangeList(rhs.mRangeList)
, mIter(mRangeList.begin())
, mIndexArray()
, mIndexArraySize(rhs.mIndexArraySize)
{
if (rhs.mIndexArray) {
mIndexArray.reset(new ValueType[mIndexArraySize]);
memcpy(mIndexArray.get(), rhs.mIndexArray.get(), mIndexArraySize * sizeof(ValueType));
}
}
template<typename TreeType>
inline PointIndexIterator<TreeType>&
PointIndexIterator<TreeType>::operator=(const PointIndexIterator& rhs)
{
if (&rhs != this) {
mRange = rhs.mRange;
mRangeList = rhs.mRangeList;
mIter = mRangeList.begin();
mIndexArray.reset();
mIndexArraySize = rhs.mIndexArraySize;
if (rhs.mIndexArray) {
mIndexArray.reset(new ValueType[mIndexArraySize]);
memcpy(mIndexArray.get(), rhs.mIndexArray.get(), mIndexArraySize * sizeof(ValueType));
}
}
return *this;
}
template<typename TreeType>
inline
PointIndexIterator<TreeType>::PointIndexIterator(const Coord& ijk, ConstAccessor& acc)
: mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr))
, mRangeList()
, mIter(mRangeList.begin())
, mIndexArray()
, mIndexArraySize(0)
{
const LeafNodeType* leaf = acc.probeConstLeaf(ijk);
if (leaf && leaf->getIndices(ijk, mRange.first, mRange.second)) {
mRangeList.push_back(mRange);
mIter = mRangeList.begin();
}
}
template<typename TreeType>
inline
PointIndexIterator<TreeType>::PointIndexIterator(const CoordBBox& bbox, ConstAccessor& acc)
: mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr))
, mRangeList()
, mIter(mRangeList.begin())
, mIndexArray()
, mIndexArraySize(0)
{
point_index_grid_internal::pointIndexSearch(mRangeList, acc, bbox);
if (!mRangeList.empty()) {
mIter = mRangeList.begin();
mRange = mRangeList.front();
}
}
template<typename TreeType>
inline void
PointIndexIterator<TreeType>::reset()
{
mIter = mRangeList.begin();
if (!mRangeList.empty()) {
mRange = mRangeList.front();
} else if (mIndexArray) {
mRange.first = mIndexArray.get();
mRange.second = mRange.first + mIndexArraySize;
} else {
mRange.first = static_cast<ValueType*>(nullptr);
mRange.second = static_cast<ValueType*>(nullptr);
}
}
template<typename TreeType>
inline void
PointIndexIterator<TreeType>::increment()
{
++mRange.first;
if (mRange.first >= mRange.second && mIter != mRangeList.end()) {
++mIter;
if (mIter != mRangeList.end()) {
mRange = *mIter;
} else if (mIndexArray) {
mRange.first = mIndexArray.get();
mRange.second = mRange.first + mIndexArraySize;
}
}
}
template<typename TreeType>
inline bool
PointIndexIterator<TreeType>::next()
{
if (!this->test()) return false;
this->increment();
return this->test();
}
template<typename TreeType>
inline size_t
PointIndexIterator<TreeType>::size() const
{
size_t count = 0;
typename RangeDeque::const_iterator it = mRangeList.begin();
for ( ; it != mRangeList.end(); ++it) {
count += it->second - it->first;
}
return count + mIndexArraySize;
}
template<typename TreeType>
inline void
PointIndexIterator<TreeType>::clear()
{
mRange.first = static_cast<ValueType*>(nullptr);
mRange.second = static_cast<ValueType*>(nullptr);
mRangeList.clear();
mIter = mRangeList.end();
mIndexArray.reset();
mIndexArraySize = 0;
}
template<typename TreeType>
inline void
PointIndexIterator<TreeType>::searchAndUpdate(const Coord& ijk, ConstAccessor& acc)
{
this->clear();
const LeafNodeType* leaf = acc.probeConstLeaf(ijk);
if (leaf && leaf->getIndices(ijk, mRange.first, mRange.second)) {
mRangeList.push_back(mRange);
mIter = mRangeList.begin();
}
}
template<typename TreeType>
inline void
PointIndexIterator<TreeType>::searchAndUpdate(const CoordBBox& bbox, ConstAccessor& acc)
{
this->clear();
point_index_grid_internal::pointIndexSearch(mRangeList, acc, bbox);
if (!mRangeList.empty()) {
mIter = mRangeList.begin();
mRange = mRangeList.front();
}
}
template<typename TreeType>
template<typename PointArray>
inline void
PointIndexIterator<TreeType>::searchAndUpdate(const BBoxd& bbox, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform)
{
this->clear();
std::vector<CoordBBox> searchRegions;
CoordBBox region(Coord::round(bbox.min()), Coord::round(bbox.max()));
const Coord dim = region.dim();
const int minExtent = std::min(dim[0], std::min(dim[1], dim[2]));
if (minExtent > 2) {
// collect indices that don't need to be tested
CoordBBox ibox = region;
ibox.expand(-1);
point_index_grid_internal::pointIndexSearch(mRangeList, acc, ibox);
// define regions for the filtered search
ibox.expand(1);
point_index_grid_internal::constructExclusiveRegions(searchRegions, region, ibox);
} else {
searchRegions.push_back(region);
}
// filtered search
std::deque<ValueType> filteredIndices;
point_index_grid_internal::BBoxFilter<PointArray, ValueType>
filter(mRangeList, filteredIndices, bbox, points, xform);
for (size_t n = 0, N = searchRegions.size(); n < N; ++n) {
point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[n]);
}
point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize);
this->reset();
}
template<typename TreeType>
template<typename PointArray>
inline void
PointIndexIterator<TreeType>::searchAndUpdate(const Vec3d& center, double radius,
ConstAccessor& acc, const PointArray& points, const math::Transform& xform,
bool subvoxelAccuracy)
{
this->clear();
std::vector<CoordBBox> searchRegions;
// bounding box
CoordBBox bbox(
Coord::round(Vec3d(center[0] - radius, center[1] - radius, center[2] - radius)),
Coord::round(Vec3d(center[0] + radius, center[1] + radius, center[2] + radius)));
bbox.expand(1);
const double iRadius = radius * double(1.0 / std::sqrt(3.0));
if (iRadius > 2.0) {
// inscribed box
CoordBBox ibox(
Coord::round(Vec3d(center[0] - iRadius, center[1] - iRadius, center[2] - iRadius)),
Coord::round(Vec3d(center[0] + iRadius, center[1] + iRadius, center[2] + iRadius)));
ibox.expand(-1);
// collect indices that don't need to be tested
point_index_grid_internal::pointIndexSearch(mRangeList, acc, ibox);
ibox.expand(1);
point_index_grid_internal::constructExclusiveRegions(searchRegions, bbox, ibox);
} else {
searchRegions.push_back(bbox);
}
// filtered search
std::deque<ValueType> filteredIndices;
const double leafNodeDim = double(TreeType::LeafNodeType::DIM);
using FilterT = point_index_grid_internal::RadialRangeFilter<PointArray, ValueType>;
FilterT filter(mRangeList, filteredIndices,
center, radius, points, xform, leafNodeDim, subvoxelAccuracy);
for (size_t n = 0, N = searchRegions.size(); n < N; ++n) {
point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[n]);
}
point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize);
this->reset();
}
template<typename TreeType>
template<typename PointArray>
inline void
PointIndexIterator<TreeType>::worldSpaceSearchAndUpdate(const BBoxd& bbox, ConstAccessor& acc,
const PointArray& points, const math::Transform& xform)
{
this->searchAndUpdate(
BBoxd(xform.worldToIndex(bbox.min()), xform.worldToIndex(bbox.max())), acc, points, xform);
}
template<typename TreeType>
template<typename PointArray>
inline void
PointIndexIterator<TreeType>::worldSpaceSearchAndUpdate(const Vec3d& center, double radius,
ConstAccessor& acc, const PointArray& points, const math::Transform& xform,
bool subvoxelAccuracy)
{
this->searchAndUpdate(xform.worldToIndex(center),
(radius / xform.voxelSize()[0]), acc, points, xform, subvoxelAccuracy);
}
////////////////////////////////////////
// PointIndexFilter implementation
template<typename PointArray, typename TreeType>
inline
PointIndexFilter<PointArray, TreeType>::PointIndexFilter(
const PointArray& points, const TreeType& tree, const math::Transform& xform)
: mPoints(&points), mAcc(tree), mXform(xform), mInvVoxelSize(1.0/xform.voxelSize()[0])
{
}
template<typename PointArray, typename TreeType>
inline
PointIndexFilter<PointArray, TreeType>::PointIndexFilter(const PointIndexFilter& rhs)
: mPoints(rhs.mPoints)
, mAcc(rhs.mAcc.tree())
, mXform(rhs.mXform)
, mInvVoxelSize(rhs.mInvVoxelSize)
{
}
template<typename PointArray, typename TreeType>
template<typename FilterType>
inline void
PointIndexFilter<PointArray, TreeType>::searchAndApply(
const PosType& center, ScalarType radius, FilterType& op)
{
if (radius * mInvVoxelSize < ScalarType(8.0)) {
mIter.searchAndUpdate(openvdb::CoordBBox(
mXform.worldToIndexCellCentered(center - radius),
mXform.worldToIndexCellCentered(center + radius)), mAcc);
} else {
mIter.worldSpaceSearchAndUpdate(
center, radius, mAcc, *mPoints, mXform, /*subvoxelAccuracy=*/false);
}
const ScalarType radiusSqr = radius * radius;
ScalarType distSqr = 0.0;
PosType pos;
for (; mIter; ++mIter) {
mPoints->getPos(*mIter, pos);
pos -= center;
distSqr = pos.lengthSqr();
if (distSqr < radiusSqr) {
op(distSqr, *mIter);
}
}
}
////////////////////////////////////////
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
createPointIndexGrid(const PointArrayT& points, const math::Transform& xform)
{
typename GridT::Ptr grid = GridT::create(typename GridT::ValueType(0));
grid->setTransform(xform.copy());
if (points.size() > 0) {
point_index_grid_internal::constructPointTree(
grid->tree(), grid->transform(), points);
}
return grid;
}
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
createPointIndexGrid(const PointArrayT& points, double voxelSize)
{
math::Transform::Ptr xform = math::Transform::createLinearTransform(voxelSize);
return createPointIndexGrid<GridT>(points, *xform);
}
template<typename PointArrayT, typename GridT>
inline bool
isValidPartition(const PointArrayT& points, const GridT& grid)
{
tree::LeafManager<const typename GridT::TreeType> leafs(grid.tree());
size_t pointCount = 0;
for (size_t n = 0, N = leafs.leafCount(); n < N; ++n) {
pointCount += leafs.leaf(n).indices().size();
}
if (points.size() != pointCount) {
return false;
}
tbb::atomic<bool> changed;
changed = false;
point_index_grid_internal::ValidPartitioningOp<PointArrayT>
op(changed, points, grid.transform());
leafs.foreach(op);
return !bool(changed);
}
template<typename GridT, typename PointArrayT>
inline typename GridT::ConstPtr
getValidPointIndexGrid(const PointArrayT& points, const typename GridT::ConstPtr& grid)
{
if (isValidPartition(points, *grid)) {
return grid;
}
return createPointIndexGrid<GridT>(points, grid->transform());
}
template<typename GridT, typename PointArrayT>
inline typename GridT::Ptr
getValidPointIndexGrid(const PointArrayT& points, const typename GridT::Ptr& grid)
{
if (isValidPartition(points, *grid)) {
return grid;
}
return createPointIndexGrid<GridT>(points, grid->transform());
}
////////////////////////////////////////
template<typename T, Index Log2Dim>
struct PointIndexLeafNode : public tree::LeafNode<T, Log2Dim>
{
using LeafNodeType = PointIndexLeafNode<T, Log2Dim>;
using Ptr = SharedPtr<PointIndexLeafNode>;
using ValueType = T;
using IndexArray = std::vector<ValueType>;
IndexArray& indices() { return mIndices; }
const IndexArray& indices() const { return mIndices; }
bool getIndices(const Coord& ijk, const ValueType*& begin, const ValueType*& end) const;
bool getIndices(Index offset, const ValueType*& begin, const ValueType*& end) const;
void setOffsetOn(Index offset, const ValueType& val);
void setOffsetOnly(Index offset, const ValueType& val);
bool isEmpty(const CoordBBox& bbox) const;
private:
IndexArray mIndices;
////////////////////////////////////////
// The following methods had to be copied from the LeafNode class
// to make the derived PointIndexLeafNode class compatible with the tree structure.
public:
using BaseLeaf = tree::LeafNode<T, Log2Dim>;
using NodeMaskType = util::NodeMask<Log2Dim>;
using BaseLeaf::LOG2DIM;
using BaseLeaf::TOTAL;
using BaseLeaf::DIM;
using BaseLeaf::NUM_VALUES;
using BaseLeaf::NUM_VOXELS;
using BaseLeaf::SIZE;
using BaseLeaf::LEVEL;
/// Default constructor
PointIndexLeafNode() : BaseLeaf(), mIndices() {}
explicit
PointIndexLeafNode(const Coord& coords, const T& value = zeroVal<T>(), bool active = false)
: BaseLeaf(coords, value, active)
, mIndices()
{
}
PointIndexLeafNode(PartialCreate, const Coord& coords,
const T& value = zeroVal<T>(), bool active = false)
: BaseLeaf(PartialCreate(), coords, value, active)
, mIndices()
{
}
/// Deep copy constructor
PointIndexLeafNode(const PointIndexLeafNode& rhs) : BaseLeaf(rhs), mIndices(rhs.mIndices) {}
/// @brief Return @c true if the given node (which may have a different @c ValueType
/// than this node) has the same active value topology as this node.
template<typename OtherType, Index OtherLog2Dim>
bool hasSameTopology(const PointIndexLeafNode<OtherType, OtherLog2Dim>* other) const {
return BaseLeaf::hasSameTopology(other);
}
/// Check for buffer, state and origin equivalence.
bool operator==(const PointIndexLeafNode& other) const { return BaseLeaf::operator==(other); }
bool operator!=(const PointIndexLeafNode& other) const { return !(other == *this); }
template<MergePolicy Policy> void merge(const PointIndexLeafNode& rhs) {
BaseLeaf::merge<Policy>(rhs);
}
template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive) {
BaseLeaf::template merge<Policy>(tileValue, tileActive);
}
template<MergePolicy Policy>
void merge(const PointIndexLeafNode& other,
const ValueType& /*bg*/, const ValueType& /*otherBG*/)
{
BaseLeaf::template merge<Policy>(other);
}
void addLeaf(PointIndexLeafNode*) {}
template<typename AccessorT>
void addLeafAndCache(PointIndexLeafNode*, AccessorT&) {}
//@{
/// @brief Return a pointer to this node.
PointIndexLeafNode* touchLeaf(const Coord&) { return this; }
template<typename AccessorT>
PointIndexLeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; }
template<typename NodeT, typename AccessorT>
NodeT* probeNodeAndCache(const Coord&, AccessorT&)
{
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
if (!(std::is_same<NodeT, PointIndexLeafNode>::value)) return nullptr;
return reinterpret_cast<NodeT*>(this);
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
PointIndexLeafNode* probeLeaf(const Coord&) { return this; }
template<typename AccessorT>
PointIndexLeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; }
//@}
//@{
/// @brief Return a @const pointer to this node.
const PointIndexLeafNode* probeConstLeaf(const Coord&) const { return this; }
template<typename AccessorT>
const PointIndexLeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const {return this;}
template<typename AccessorT>
const PointIndexLeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; }
const PointIndexLeafNode* probeLeaf(const Coord&) const { return this; }
template<typename NodeT, typename AccessorT>
const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const
{
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN
if (!(std::is_same<NodeT, PointIndexLeafNode>::value)) return nullptr;
return reinterpret_cast<const NodeT*>(this);
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
}
//@}
// I/O methods
void readBuffers(std::istream& is, bool fromHalf = false);
void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false);
void writeBuffers(std::ostream& os, bool toHalf = false) const;
Index64 memUsage() const;
////////////////////////////////////////
// Disable all write methods to avoid unintentional changes
// to the point-array offsets.
void assertNonmodifiable() {
assert(false && "Cannot modify voxel values in a PointIndexTree.");
}
void setActiveState(const Coord&, bool) { assertNonmodifiable(); }
void setActiveState(Index, bool) { assertNonmodifiable(); }
void setValueOnly(const Coord&, const ValueType&) { assertNonmodifiable(); }
void setValueOnly(Index, const ValueType&) { assertNonmodifiable(); }
void setValueOff(const Coord&) { assertNonmodifiable(); }
void setValueOff(Index) { assertNonmodifiable(); }
void setValueOff(const Coord&, const ValueType&) { assertNonmodifiable(); }
void setValueOff(Index, const ValueType&) { assertNonmodifiable(); }
void setValueOn(const Coord&) { assertNonmodifiable(); }
void setValueOn(Index) { assertNonmodifiable(); }
void setValueOn(const Coord&, const ValueType&) { assertNonmodifiable(); }
void setValueOn(Index, const ValueType&) { assertNonmodifiable(); }
void setValue(const Coord&, const ValueType&) { assertNonmodifiable(); }
void setValuesOn() { assertNonmodifiable(); }
void setValuesOff() { assertNonmodifiable(); }
template<typename ModifyOp>
void modifyValue(Index, const ModifyOp&) { assertNonmodifiable(); }
template<typename ModifyOp>
void modifyValue(const Coord&, const ModifyOp&) { assertNonmodifiable(); }
template<typename ModifyOp>
void modifyValueAndActiveState(const Coord&, const ModifyOp&) { assertNonmodifiable(); }
void clip(const CoordBBox&, const ValueType&) { assertNonmodifiable(); }
void fill(const CoordBBox&, const ValueType&, bool) { assertNonmodifiable(); }
void fill(const ValueType&) {}
void fill(const ValueType&, bool) { assertNonmodifiable(); }
template<typename AccessorT>
void setValueOnlyAndCache(const Coord&, const ValueType&, AccessorT&) {assertNonmodifiable();}
template<typename ModifyOp, typename AccessorT>
void modifyValueAndActiveStateAndCache(const Coord&, const ModifyOp&, AccessorT&) {
assertNonmodifiable();
}
template<typename AccessorT>
void setValueOffAndCache(const Coord&, const ValueType&, AccessorT&) { assertNonmodifiable(); }
template<typename AccessorT>
void setActiveStateAndCache(const Coord&, bool, AccessorT&) { assertNonmodifiable(); }
void resetBackground(const ValueType&, const ValueType&) { assertNonmodifiable(); }
void signedFloodFill(const ValueType&) { assertNonmodifiable(); }
void signedFloodFill(const ValueType&, const ValueType&) { assertNonmodifiable(); }
void negate() { assertNonmodifiable(); }
protected:
using ValueOn = typename BaseLeaf::ValueOn;
using ValueOff = typename BaseLeaf::ValueOff;
using ValueAll = typename BaseLeaf::ValueAll;
using ChildOn = typename BaseLeaf::ChildOn;
using ChildOff = typename BaseLeaf::ChildOff;
using ChildAll = typename BaseLeaf::ChildAll;
using MaskOnIterator = typename NodeMaskType::OnIterator;
using MaskOffIterator = typename NodeMaskType::OffIterator;
using MaskDenseIterator = typename NodeMaskType::DenseIterator;
// During topology-only construction, access is needed
// to protected/private members of other template instances.
template<typename, Index> friend struct PointIndexLeafNode;
friend class tree::IteratorBase<MaskOnIterator, PointIndexLeafNode>;
friend class tree::IteratorBase<MaskOffIterator, PointIndexLeafNode>;
friend class tree::IteratorBase<MaskDenseIterator, PointIndexLeafNode>;
public:
using ValueOnIter = typename BaseLeaf::template ValueIter<
MaskOnIterator, PointIndexLeafNode, const ValueType, ValueOn>;
using ValueOnCIter = typename BaseLeaf::template ValueIter<
MaskOnIterator, const PointIndexLeafNode, const ValueType, ValueOn>;
using ValueOffIter = typename BaseLeaf::template ValueIter<
MaskOffIterator, PointIndexLeafNode, const ValueType, ValueOff>;
using ValueOffCIter = typename BaseLeaf::template ValueIter<
MaskOffIterator,const PointIndexLeafNode,const ValueType, ValueOff>;
using ValueAllIter = typename BaseLeaf::template ValueIter<
MaskDenseIterator, PointIndexLeafNode, const ValueType, ValueAll>;
using ValueAllCIter = typename BaseLeaf::template ValueIter<
MaskDenseIterator,const PointIndexLeafNode,const ValueType, ValueAll>;
using ChildOnIter = typename BaseLeaf::template ChildIter<
MaskOnIterator, PointIndexLeafNode, ChildOn>;
using ChildOnCIter = typename BaseLeaf::template ChildIter<
MaskOnIterator, const PointIndexLeafNode, ChildOn>;
using ChildOffIter = typename BaseLeaf::template ChildIter<
MaskOffIterator, PointIndexLeafNode, ChildOff>;
using ChildOffCIter = typename BaseLeaf::template ChildIter<
MaskOffIterator, const PointIndexLeafNode, ChildOff>;
using ChildAllIter = typename BaseLeaf::template DenseIter<
PointIndexLeafNode, ValueType, ChildAll>;
using ChildAllCIter = typename BaseLeaf::template DenseIter<
const PointIndexLeafNode, const ValueType, ChildAll>;
#define VMASK_ this->getValueMask()
ValueOnCIter cbeginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); }
ValueOnCIter beginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); }
ValueOnIter beginValueOn() { return ValueOnIter(VMASK_.beginOn(), this); }
ValueOffCIter cbeginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); }
ValueOffCIter beginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); }
ValueOffIter beginValueOff() { return ValueOffIter(VMASK_.beginOff(), this); }
ValueAllCIter cbeginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); }
ValueAllCIter beginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); }
ValueAllIter beginValueAll() { return ValueAllIter(VMASK_.beginDense(), this); }
ValueOnCIter cendValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); }
ValueOnCIter endValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); }
ValueOnIter endValueOn() { return ValueOnIter(VMASK_.endOn(), this); }
ValueOffCIter cendValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); }
ValueOffCIter endValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); }
ValueOffIter endValueOff() { return ValueOffIter(VMASK_.endOff(), this); }
ValueAllCIter cendValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); }
ValueAllCIter endValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); }
ValueAllIter endValueAll() { return ValueAllIter(VMASK_.endDense(), this); }
ChildOnCIter cbeginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); }
ChildOnCIter beginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); }
ChildOnIter beginChildOn() { return ChildOnIter(VMASK_.endOn(), this); }
ChildOffCIter cbeginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); }
ChildOffCIter beginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); }
ChildOffIter beginChildOff() { return ChildOffIter(VMASK_.endOff(), this); }
ChildAllCIter cbeginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); }
ChildAllCIter beginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); }
ChildAllIter beginChildAll() { return ChildAllIter(VMASK_.beginDense(), this); }
ChildOnCIter cendChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); }
ChildOnCIter endChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); }
ChildOnIter endChildOn() { return ChildOnIter(VMASK_.endOn(), this); }
ChildOffCIter cendChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); }
ChildOffCIter endChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); }
ChildOffIter endChildOff() { return ChildOffIter(VMASK_.endOff(), this); }
ChildAllCIter cendChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); }
ChildAllCIter endChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); }
ChildAllIter endChildAll() { return ChildAllIter(VMASK_.endDense(), this); }
#undef VMASK_
}; // struct PointIndexLeafNode
template<typename T, Index Log2Dim>
inline bool
PointIndexLeafNode<T, Log2Dim>::getIndices(const Coord& ijk,
const ValueType*& begin, const ValueType*& end) const
{
return getIndices(LeafNodeType::coordToOffset(ijk), begin, end);
}
template<typename T, Index Log2Dim>
inline bool
PointIndexLeafNode<T, Log2Dim>::getIndices(Index offset,
const ValueType*& begin, const ValueType*& end) const
{
if (this->isValueMaskOn(offset)) {
const ValueType* dataPtr = &mIndices.front();
begin = dataPtr + (offset == 0 ? ValueType(0) : this->buffer()[offset - 1]);
end = dataPtr + this->buffer()[offset];
return true;
}
return false;
}
template<typename T, Index Log2Dim>
inline void
PointIndexLeafNode<T, Log2Dim>::setOffsetOn(Index offset, const ValueType& val)
{
this->buffer().setValue(offset, val);
this->setValueMaskOn(offset);
}
template<typename T, Index Log2Dim>
inline void
PointIndexLeafNode<T, Log2Dim>::setOffsetOnly(Index offset, const ValueType& val)
{
this->buffer().setValue(offset, val);
}
template<typename T, Index Log2Dim>
inline bool
PointIndexLeafNode<T, Log2Dim>::isEmpty(const CoordBBox& bbox) const
{
Index xPos, pos, zStride = Index(bbox.max()[2] - bbox.min()[2]);
Coord ijk;
for (ijk[0] = bbox.min()[0]; ijk[0] <= bbox.max()[0]; ++ijk[0]) {
xPos = (ijk[0] & (DIM - 1u)) << (2 * LOG2DIM);
for (ijk[1] = bbox.min()[1]; ijk[1] <= bbox.max()[1]; ++ijk[1]) {
pos = xPos + ((ijk[1] & (DIM - 1u)) << LOG2DIM);
pos += (bbox.min()[2] & (DIM - 1u));
if (this->buffer()[pos+zStride] > (pos == 0 ? T(0) : this->buffer()[pos - 1])) {
return false;
}
}
}
return true;
}
template<typename T, Index Log2Dim>
inline void
PointIndexLeafNode<T, Log2Dim>::readBuffers(std::istream& is, bool fromHalf)
{
BaseLeaf::readBuffers(is, fromHalf);
Index64 numIndices = Index64(0);
is.read(reinterpret_cast<char*>(&numIndices), sizeof(Index64));
mIndices.resize(size_t(numIndices));
is.read(reinterpret_cast<char*>(mIndices.data()), numIndices * sizeof(T));
}
template<typename T, Index Log2Dim>
inline void
PointIndexLeafNode<T, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& bbox, bool fromHalf)
{
// Read and clip voxel values.
BaseLeaf::readBuffers(is, bbox, fromHalf);
Index64 numIndices = Index64(0);
is.read(reinterpret_cast<char*>(&numIndices), sizeof(Index64));
const Index64 numBytes = numIndices * sizeof(T);
if (bbox.hasOverlap(this->getNodeBoundingBox())) {
mIndices.resize(size_t(numIndices));
is.read(reinterpret_cast<char*>(mIndices.data()), numBytes);
/// @todo If any voxels were deactivated as a result of clipping in the call to
/// BaseLeaf::readBuffers(), the point index list will need to be regenerated.
} else {
// Read and discard voxel values.
std::unique_ptr<char[]> buf{new char[numBytes]};
is.read(buf.get(), numBytes);
}
// Reserved for future use
Index64 auxDataBytes = Index64(0);
is.read(reinterpret_cast<char*>(&auxDataBytes), sizeof(Index64));
if (auxDataBytes > 0) {
// For now, read and discard any auxiliary data.
std::unique_ptr<char[]> auxData{new char[auxDataBytes]};
is.read(auxData.get(), auxDataBytes);
}
}
template<typename T, Index Log2Dim>
inline void
PointIndexLeafNode<T, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const
{
BaseLeaf::writeBuffers(os, toHalf);
Index64 numIndices = Index64(mIndices.size());
os.write(reinterpret_cast<const char*>(&numIndices), sizeof(Index64));
os.write(reinterpret_cast<const char*>(mIndices.data()), numIndices * sizeof(T));
// Reserved for future use
const Index64 auxDataBytes = Index64(0);
os.write(reinterpret_cast<const char*>(&auxDataBytes), sizeof(Index64));
}
template<typename T, Index Log2Dim>
inline Index64
PointIndexLeafNode<T, Log2Dim>::memUsage() const
{
return BaseLeaf::memUsage() + Index64((sizeof(T)*mIndices.capacity()) + sizeof(mIndices));
}
} // namespace tools
////////////////////////////////////////
namespace tree {
/// Helper metafunction used to implement LeafNode::SameConfiguration
/// (which, as an inner class, can't be independently specialized)
template<Index Dim1, typename T2>
struct SameLeafConfig<Dim1, openvdb::tools::PointIndexLeafNode<T2, Dim1> >
{
static const bool value = true;
};
} // namespace tree
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED
| 62,544 | C | 33.593473 | 99 | 0.650614 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetAdvect.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @author Ken Museth
///
/// @file tools/LevelSetAdvect.h
///
/// @brief Hyperbolic advection of narrow-band level sets
#ifndef OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <openvdb/Platform.h>
#include "LevelSetTracker.h"
#include "VelocityFields.h" // for EnrightField
#include <openvdb/math/FiniteDifference.h>
//#include <openvdb/util/CpuTimer.h>
#include <functional>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Hyperbolic advection of narrow-band level sets in an
/// external velocity field
///
/// The @c FieldType template argument below refers to any functor
/// with the following interface (see tools/VelocityFields.h
/// for examples):
///
/// @code
/// class VelocityField {
/// ...
/// public:
/// openvdb::VectorType operator() (const openvdb::Coord& xyz, ValueType time) const;
/// ...
/// };
/// @endcode
///
/// @note The functor method returns the velocity field at coordinate
/// position xyz of the advection grid, and for the specified
/// time. Note that since the velocity is returned in the local
/// coordinate space of the grid that is being advected, the functor
/// typically depends on the transformation of that grid. This design
/// is chosen for performance reasons. Finally we will assume that the
/// functor method is NOT threadsafe (typically uses a ValueAccessor)
/// and that its lightweight enough that we can copy it per thread.
///
/// The @c InterruptType template argument below refers to any class
/// with the following interface:
/// @code
/// class Interrupter {
/// ...
/// public:
/// void start(const char* name = nullptr) // called when computations begin
/// void end() // called when computations end
/// bool wasInterrupted(int percent=-1) // return true to break computation
///};
/// @endcode
///
/// @note If no template argument is provided for this InterruptType
/// the util::NullInterrupter is used which implies that all
/// interrupter calls are no-ops (i.e. incurs no computational overhead).
///
template<typename GridT,
typename FieldT = EnrightField<typename GridT::ValueType>,
typename InterruptT = util::NullInterrupter>
class LevelSetAdvection
{
public:
using GridType = GridT;
using TrackerT = LevelSetTracker<GridT, InterruptT>;
using LeafRange = typename TrackerT::LeafRange;
using LeafType = typename TrackerT::LeafType;
using BufferType = typename TrackerT::BufferType;
using ValueType = typename TrackerT::ValueType;
using VectorType = typename FieldT::VectorType;
/// Main constructor
LevelSetAdvection(GridT& grid, const FieldT& field, InterruptT* interrupt = nullptr):
mTracker(grid, interrupt), mField(field),
mSpatialScheme(math::HJWENO5_BIAS),
mTemporalScheme(math::TVD_RK2) {}
virtual ~LevelSetAdvection() {}
/// @brief Return the spatial finite difference scheme
math::BiasedGradientScheme getSpatialScheme() const { return mSpatialScheme; }
/// @brief Set the spatial finite difference scheme
void setSpatialScheme(math::BiasedGradientScheme scheme) { mSpatialScheme = scheme; }
/// @brief Return the temporal integration scheme
math::TemporalIntegrationScheme getTemporalScheme() const { return mTemporalScheme; }
/// @brief Set the spatial finite difference scheme
void setTemporalScheme(math::TemporalIntegrationScheme scheme) { mTemporalScheme = scheme; }
/// @brief Return the spatial finite difference scheme
math::BiasedGradientScheme getTrackerSpatialScheme() const {
return mTracker.getSpatialScheme();
}
/// @brief Set the spatial finite difference scheme
void setTrackerSpatialScheme(math::BiasedGradientScheme scheme) {
mTracker.setSpatialScheme(scheme);
}
/// @brief Return the temporal integration scheme
math::TemporalIntegrationScheme getTrackerTemporalScheme() const {
return mTracker.getTemporalScheme();
}
/// @brief Set the spatial finite difference scheme
void setTrackerTemporalScheme(math::TemporalIntegrationScheme scheme) {
mTracker.setTemporalScheme(scheme);
}
/// @brief Return The number of normalizations performed per track or
/// normalize call.
int getNormCount() const { return mTracker.getNormCount(); }
/// @brief Set the number of normalizations performed per track or
/// normalize call.
void setNormCount(int n) { mTracker.setNormCount(n); }
/// @brief Return the grain-size used for multi-threading
int getGrainSize() const { return mTracker.getGrainSize(); }
/// @brief Set the grain-size used for multi-threading.
/// @note A grain size of 0 or less disables multi-threading!
void setGrainSize(int grainsize) { mTracker.setGrainSize(grainsize); }
/// Advect the level set from its current time, time0, to its
/// final time, time1. If time0>time1 backward advection is performed.
///
/// @return number of CFL iterations used to advect from time0 to time1
size_t advect(ValueType time0, ValueType time1);
private:
// disallow copy construction and copy by assinment!
LevelSetAdvection(const LevelSetAdvection&);// not implemented
LevelSetAdvection& operator=(const LevelSetAdvection&);// not implemented
// This templated private struct implements all the level set magic.
template<typename MapT, math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
struct Advect
{
/// Main constructor
Advect(LevelSetAdvection& parent);
/// Shallow copy constructor called by tbb::parallel_for() threads
Advect(const Advect& other);
/// Destructor
virtual ~Advect() { if (mIsMaster) this->clearField(); }
/// Advect the level set from its current time, time0, to its final time, time1.
/// @return number of CFL iterations
size_t advect(ValueType time0, ValueType time1);
/// Used internally by tbb::parallel_for()
void operator()(const LeafRange& r) const
{
if (mTask) mTask(const_cast<Advect*>(this), r);
else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly");
}
/// method calling tbb
void cook(const char* msg, size_t swapBuffer = 0);
/// Sample field and return the CFL time step
typename GridT::ValueType sampleField(ValueType time0, ValueType time1);
template <bool Aligned> void sample(const LeafRange& r, ValueType t0, ValueType t1);
inline void sampleXformed(const LeafRange& r, ValueType t0, ValueType t1)
{
this->sample<false>(r, t0, t1);
}
inline void sampleAligned(const LeafRange& r, ValueType t0, ValueType t1)
{
this->sample<true>(r, t0, t1);
}
void clearField();
// Convex combination of Phi and a forward Euler advection steps:
// Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * Speed(speed)*|Grad[Phi(0)]|);
template <int Nominator, int Denominator>
void euler(const LeafRange&, ValueType, Index, Index);
inline void euler01(const LeafRange& r, ValueType t) {this->euler<0,1>(r, t, 0, 1);}
inline void euler12(const LeafRange& r, ValueType t) {this->euler<1,2>(r, t, 1, 1);}
inline void euler34(const LeafRange& r, ValueType t) {this->euler<3,4>(r, t, 1, 2);}
inline void euler13(const LeafRange& r, ValueType t) {this->euler<1,3>(r, t, 1, 2);}
LevelSetAdvection& mParent;
VectorType* mVelocity;
size_t* mOffsets;
const MapT* mMap;
typename std::function<void (Advect*, const LeafRange&)> mTask;
const bool mIsMaster;
}; // end of private Advect struct
template<math::BiasedGradientScheme SpatialScheme>
size_t advect1(ValueType time0, ValueType time1);
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
size_t advect2(ValueType time0, ValueType time1);
template<math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MapType>
size_t advect3(ValueType time0, ValueType time1);
TrackerT mTracker;
//each thread needs a deep copy of the field since it might contain a ValueAccessor
const FieldT mField;
math::BiasedGradientScheme mSpatialScheme;
math::TemporalIntegrationScheme mTemporalScheme;
};//end of LevelSetAdvection
template<typename GridT, typename FieldT, typename InterruptT>
inline size_t
LevelSetAdvection<GridT, FieldT, InterruptT>::advect(ValueType time0, ValueType time1)
{
switch (mSpatialScheme) {
case math::FIRST_BIAS:
return this->advect1<math::FIRST_BIAS >(time0, time1);
case math::SECOND_BIAS:
return this->advect1<math::SECOND_BIAS >(time0, time1);
case math::THIRD_BIAS:
return this->advect1<math::THIRD_BIAS >(time0, time1);
case math::WENO5_BIAS:
return this->advect1<math::WENO5_BIAS >(time0, time1);
case math::HJWENO5_BIAS:
return this->advect1<math::HJWENO5_BIAS>(time0, time1);
default:
OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!");
}
return 0;
}
template<typename GridT, typename FieldT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme>
inline size_t
LevelSetAdvection<GridT, FieldT, InterruptT>::advect1(ValueType time0, ValueType time1)
{
switch (mTemporalScheme) {
case math::TVD_RK1:
return this->advect2<SpatialScheme, math::TVD_RK1>(time0, time1);
case math::TVD_RK2:
return this->advect2<SpatialScheme, math::TVD_RK2>(time0, time1);
case math::TVD_RK3:
return this->advect2<SpatialScheme, math::TVD_RK3>(time0, time1);
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}
return 0;
}
template<typename GridT, typename FieldT, typename InterruptT>
template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme>
inline size_t
LevelSetAdvection<GridT, FieldT, InterruptT>::advect2(ValueType time0, ValueType time1)
{
const math::Transform& trans = mTracker.grid().transform();
if (trans.mapType() == math::UniformScaleMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleMap>(time0, time1);
} else if (trans.mapType() == math::UniformScaleTranslateMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleTranslateMap>(
time0, time1);
} else if (trans.mapType() == math::UnitaryMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::UnitaryMap >(time0, time1);
} else if (trans.mapType() == math::TranslationMap::mapType()) {
return this->advect3<SpatialScheme, TemporalScheme, math::TranslationMap>(time0, time1);
} else {
OPENVDB_THROW(ValueError, "MapType not supported!");
}
return 0;
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme,
typename MapT>
inline size_t
LevelSetAdvection<GridT, FieldT, InterruptT>::advect3(ValueType time0, ValueType time1)
{
Advect<MapT, SpatialScheme, TemporalScheme> tmp(*this);
return tmp.advect(time0, time1);
}
///////////////////////////////////////////////////////////////////////
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
Advect(LevelSetAdvection& parent)
: mParent(parent)
, mVelocity(nullptr)
, mOffsets(nullptr)
, mMap(parent.mTracker.grid().transform().template constMap<MapT>().get())
, mTask(0)
, mIsMaster(true)
{
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
Advect(const Advect& other)
: mParent(other.mParent)
, mVelocity(other.mVelocity)
, mOffsets(other.mOffsets)
, mMap(other.mMap)
, mTask(other.mTask)
, mIsMaster(false)
{
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline size_t
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
advect(ValueType time0, ValueType time1)
{
namespace ph = std::placeholders;
//util::CpuTimer timer;
size_t countCFL = 0;
if ( math::isZero(time0 - time1) ) return countCFL;
const bool isForward = time0 < time1;
while ((isForward ? time0<time1 : time0>time1) && mParent.mTracker.checkInterrupter()) {
/// Make sure we have enough temporal auxiliary buffers
//timer.start( "\nallocate buffers" );
mParent.mTracker.leafs().rebuildAuxBuffers(TemporalScheme == math::TVD_RK3 ? 2 : 1);
//timer.stop();
const ValueType dt = this->sampleField(time0, time1);
if ( math::isZero(dt) ) break;//V is essentially zero so terminate
OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN //switch is resolved at compile-time
switch(TemporalScheme) {
case math::TVD_RK1:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0)
mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Advecting level set using TVD_RK1", 1);
break;
case math::TVD_RK2:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0)
mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Advecting level set using TVD_RK1 (step 1 of 2)", 1);
// Convex combine explict Euler step: t2 = t0 + dt
// Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * V.Grad_t1(0))
mTask = std::bind(&Advect::euler12, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1)
this->cook("Advecting level set using TVD_RK1 (step 2 of 2)", 1);
break;
case math::TVD_RK3:
// Perform one explicit Euler step: t1 = t0 + dt
// Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0)
mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1)
this->cook("Advecting level set using TVD_RK3 (step 1 of 3)", 1);
// Convex combine explict Euler step: t2 = t0 + dt/2
// Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * V.Grad_t1(0))
mTask = std::bind(&Advect::euler34, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2)
this->cook("Advecting level set using TVD_RK3 (step 2 of 3)", 2);
// Convex combine explict Euler step: t3 = t0 + dt
// Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * V.Grad_t2(0)
mTask = std::bind(&Advect::euler13, ph::_1, ph::_2, dt);
// Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2)
this->cook("Advecting level set using TVD_RK3 (step 3 of 3)", 2);
break;
default:
OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!");
}//end of compile-time resolved switch
OPENVDB_NO_UNREACHABLE_CODE_WARNING_END
time0 += isForward ? dt : -dt;
++countCFL;
mParent.mTracker.leafs().removeAuxBuffers();
this->clearField();
/// Track the narrow band
mParent.mTracker.track();
}//end wile-loop over time
return countCFL;//number of CLF propagation steps
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline typename GridT::ValueType
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
sampleField(ValueType time0, ValueType time1)
{
namespace ph = std::placeholders;
const int grainSize = mParent.mTracker.getGrainSize();
const size_t leafCount = mParent.mTracker.leafs().leafCount();
if (leafCount==0) return ValueType(0.0);
// Compute the prefix sum of offsets to active voxels
size_t size=0, voxelCount=mParent.mTracker.leafs().getPrefixSum(mOffsets, size, grainSize);
// Sample the velocity field
if (mParent.mField.transform() == mParent.mTracker.grid().transform()) {
mTask = std::bind(&Advect::sampleAligned, ph::_1, ph::_2, time0, time1);
} else {
mTask = std::bind(&Advect::sampleXformed, ph::_1, ph::_2, time0, time1);
}
assert(voxelCount == mParent.mTracker.grid().activeVoxelCount());
mVelocity = new VectorType[ voxelCount ];
this->cook("Sampling advection field");
// Find the extrema of the magnitude of the velocities
ValueType maxAbsV = 0;
VectorType* v = mVelocity;
for (size_t i = 0; i < voxelCount; ++i, ++v) {
maxAbsV = math::Max(maxAbsV, ValueType(v->lengthSqr()));
}
// Compute the CFL number
if (math::isApproxZero(maxAbsV, math::Delta<ValueType>::value())) return ValueType(0);
static const ValueType CFL = (TemporalScheme == math::TVD_RK1 ? ValueType(0.3) :
TemporalScheme == math::TVD_RK2 ? ValueType(0.9) :
ValueType(1.0))/math::Sqrt(ValueType(3.0));
const ValueType dt = math::Abs(time1 - time0), dx = mParent.mTracker.voxelSize();
return math::Min(dt, ValueType(CFL*dx/math::Sqrt(maxAbsV)));
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
template<bool Aligned>
inline void
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
sample(const LeafRange& range, ValueType time0, ValueType time1)
{
const bool isForward = time0 < time1;
using VoxelIterT = typename LeafType::ValueOnCIter;
const MapT& map = *mMap;
const FieldT field( mParent.mField );
mParent.mTracker.checkInterrupter();
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
VectorType* vel = mVelocity + mOffsets[ leafIter.pos() ];
for (VoxelIterT iter = leafIter->cbeginValueOn(); iter; ++iter, ++vel) {
const VectorType v = Aligned ? field(iter.getCoord(), time0) ://resolved at compile time
field(map.applyMap(iter.getCoord().asVec3d()), time0);
*vel = isForward ? v : -v;
}
}
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline void
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
clearField()
{
delete [] mOffsets;
delete [] mVelocity;
mOffsets = nullptr;
mVelocity = nullptr;
}
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
inline void
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
cook(const char* msg, size_t swapBuffer)
{
mParent.mTracker.startInterrupter( msg );
const int grainSize = mParent.mTracker.getGrainSize();
const LeafRange range = mParent.mTracker.leafs().leafRange(grainSize);
grainSize == 0 ? (*this)(range) : tbb::parallel_for(range, *this);
mParent.mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize == 0);
mParent.mTracker.endInterrupter();
}
// Convex combination of Phi and a forward Euler advection steps:
// Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * V.Grad(0));
template<typename GridT, typename FieldT, typename InterruptT>
template<
typename MapT,
math::BiasedGradientScheme SpatialScheme,
math::TemporalIntegrationScheme TemporalScheme>
template <int Nominator, int Denominator>
inline void
LevelSetAdvection<GridT, FieldT, InterruptT>::
Advect<MapT, SpatialScheme, TemporalScheme>::
euler(const LeafRange& range, ValueType dt, Index phiBuffer, Index resultBuffer)
{
using SchemeT = math::BIAS_SCHEME<SpatialScheme>;
using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType;
using VoxelIterT = typename LeafType::ValueOnCIter;
using GradT = math::GradientBiased<MapT, SpatialScheme>;
static const ValueType Alpha = ValueType(Nominator)/ValueType(Denominator);
static const ValueType Beta = ValueType(1) - Alpha;
mParent.mTracker.checkInterrupter();
const MapT& map = *mMap;
StencilT stencil(mParent.mTracker.grid());
for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) {
const VectorType* vel = mVelocity + mOffsets[ leafIter.pos() ];
const ValueType* phi = leafIter.buffer(phiBuffer).data();
ValueType* result = leafIter.buffer(resultBuffer).data();
for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter, ++vel) {
const Index i = voxelIter.pos();
stencil.moveTo(voxelIter);
const ValueType a =
stencil.getValue() - dt * vel->dot(GradT::result(map, stencil, *vel));
result[i] = Nominator ? Alpha * phi[i] + Beta * a : a;
}//loop over active voxels in the leaf of the mask
}//loop over leafs of the level set
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED
| 22,887 | C | 38.736111 | 100 | 0.673439 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PotentialFlow.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file tools/PotentialFlow.h
///
/// @brief Tools for creating potential flow fields through solving Laplace's equation
///
/// @authors Todd Keeler, Dan Bailey
#ifndef OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED
#include <openvdb/openvdb.h>
#include "GridOperators.h"
#include "GridTransformer.h"
#include "Mask.h" // interiorMask
#include "Morphology.h" // dilateVoxels, erodeVoxels
#include "PoissonSolver.h"
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Metafunction to convert a vector-valued grid type to a scalar grid type
template<typename VecGridT>
struct VectorToScalarGrid {
using Type =
typename VecGridT::template ValueConverter<typename VecGridT::ValueType::value_type>::Type;
using Ptr = typename Type::Ptr;
using ConstPtr = typename Type::ConstPtr;
};
/// @brief Construct a mask for the Potential Flow domain.
/// @details For a level set, this represents a rebuilt exterior narrow band.
/// For any other grid it is a new region that surrounds the active voxels.
/// @param grid source grid to use for computing the mask
/// @param dilation dilation in voxels of the source grid to form the new potential flow mask
template<typename GridT, typename MaskT = typename GridT::template ValueConverter<ValueMask>::Type>
inline typename MaskT::Ptr
createPotentialFlowMask(const GridT& grid, int dilation = 5);
/// @brief Create a Potential Flow velocities grid for the Neumann boundary.
/// @param collider a level set that represents the boundary
/// @param domain a mask to represent the potential flow domain
/// @param boundaryVelocity an optional grid pointer to stores the velocities of the boundary
/// @param backgroundVelocity a background velocity value
/// @details Typically this method involves supplying a velocity grid for the
/// collider boundary, however it can also be used for a global wind field
/// around the collider by supplying an empty boundary Velocity and a
/// non-zero background velocity.
template<typename Vec3T, typename GridT, typename MaskT>
inline typename GridT::template ValueConverter<Vec3T>::Type::Ptr
createPotentialFlowNeumannVelocities(const GridT& collider, const MaskT& domain,
const typename GridT::template ValueConverter<Vec3T>::Type::ConstPtr boundaryVelocity,
const Vec3T& backgroundVelocity);
/// @brief Compute the Potential on the domain using the Neumann boundary conditions on
/// solid boundaries
/// @param domain a mask to represent the domain in which to perform the solve
/// @param neumann the topology of this grid defines where the solid boundaries are and grid
/// values give the Neumann boundaries that should be applied there
/// @param state the solver parameters for computing the solution
/// @param interrupter pointer to an optional interrupter adhering to the
/// util::NullInterrupter interface
/// @details On input, the State object should specify convergence criteria
/// (minimum error and maximum number of iterations); on output, it gives
/// the actual termination conditions.
template<typename Vec3GridT, typename MaskT, typename InterrupterT = util::NullInterrupter>
inline typename VectorToScalarGrid<Vec3GridT>::Ptr
computeScalarPotential(const MaskT& domain, const Vec3GridT& neumann, math::pcg::State& state,
InterrupterT* interrupter = nullptr);
/// @brief Compute a vector Flow Field comprising the gradient of the potential with Neumann
/// boundary conditions applied
/// @param potential scalar potential, typically computed from computeScalarPotential()
/// @param neumann the topology of this grid defines where the solid boundaries are and grid
/// values give the Neumann boundaries that should be applied there
/// @param backgroundVelocity a background velocity value
template<typename Vec3GridT>
inline typename Vec3GridT::Ptr
computePotentialFlow(const typename VectorToScalarGrid<Vec3GridT>::Type& potential,
const Vec3GridT& neumann,
const typename Vec3GridT::ValueType backgroundVelocity =
zeroVal<typename Vec3GridT::TreeType::ValueType>());
//////////////////////////////////////////////////////////
namespace potential_flow_internal {
/// @private
// helper function for retrieving a mask that comprises the outer-most layer of voxels
template<typename GridT>
inline typename GridT::TreeType::template ValueConverter<ValueMask>::Type::Ptr
extractOuterVoxelMask(GridT& inGrid)
{
using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type;
typename MaskTreeT::Ptr interiorMask(new MaskTreeT(inGrid.tree(), false, TopologyCopy()));
typename MaskTreeT::Ptr boundaryMask(new MaskTreeT(inGrid.tree(), false, TopologyCopy()));
erodeVoxels(*interiorMask, 1, NN_FACE);
boundaryMask->topologyDifference(*interiorMask);
return boundaryMask;
}
// computes Neumann velocities through sampling the gradient and velocities
template<typename Vec3GridT, typename GradientT>
struct ComputeNeumannVelocityOp
{
using ValueT = typename Vec3GridT::ValueType;
using VelocityAccessor = typename Vec3GridT::ConstAccessor;
using VelocitySamplerT = GridSampler<
typename Vec3GridT::ConstAccessor, BoxSampler>;
using GradientValueT = typename GradientT::TreeType::ValueType;
ComputeNeumannVelocityOp( const GradientT& gradient,
const Vec3GridT& velocity,
const ValueT& backgroundVelocity)
: mGradient(gradient)
, mVelocity(&velocity)
, mBackgroundVelocity(backgroundVelocity) { }
ComputeNeumannVelocityOp( const GradientT& gradient,
const ValueT& backgroundVelocity)
: mGradient(gradient)
, mBackgroundVelocity(backgroundVelocity) { }
void operator()(typename Vec3GridT::TreeType::LeafNodeType& leaf, size_t) const {
auto gradientAccessor = mGradient.getConstAccessor();
std::unique_ptr<VelocityAccessor> velocityAccessor;
std::unique_ptr<VelocitySamplerT> velocitySampler;
if (mVelocity) {
velocityAccessor.reset(new VelocityAccessor(mVelocity->getConstAccessor()));
velocitySampler.reset(new VelocitySamplerT(*velocityAccessor, mVelocity->transform()));
}
for (auto it = leaf.beginValueOn(); it; ++it) {
Coord ijk = it.getCoord();
auto gradient = gradientAccessor.getValue(ijk);
if (gradient.normalize()) {
const Vec3d xyz = mGradient.transform().indexToWorld(ijk);
const ValueT sampledVelocity = velocitySampler ?
velocitySampler->wsSample(xyz) : zeroVal<ValueT>();
auto velocity = sampledVelocity + mBackgroundVelocity;
auto value = gradient.dot(velocity) * gradient;
it.setValue(value);
}
else {
it.setValueOff();
}
}
}
private:
const GradientT& mGradient;
const Vec3GridT* mVelocity = nullptr;
const ValueT& mBackgroundVelocity;
}; // struct ComputeNeumannVelocityOp
// initalizes the boundary conditions for use in the Poisson Solver
template<typename Vec3GridT, typename MaskT>
struct SolveBoundaryOp
{
SolveBoundaryOp(const Vec3GridT& velGrid, const MaskT& domainGrid)
: mVoxelSize(domainGrid.voxelSize()[0])
, mVelGrid(velGrid)
, mDomainGrid(domainGrid)
{ }
void operator()(const Coord& ijk, const Coord& neighbor,
double& source, double& diagonal) const {
typename Vec3GridT::ConstAccessor velGridAccessor = mVelGrid.getAccessor();
const Coord diff = (ijk - neighbor);
if (velGridAccessor.isValueOn(ijk)) { // Neumann
const typename Vec3GridT::ValueType& sampleVel = velGridAccessor.getValue(ijk);
source += mVoxelSize*diff[0]*sampleVel[0];
source += mVoxelSize*diff[1]*sampleVel[1];
source += mVoxelSize*diff[2]*sampleVel[2];
} else {
diagonal -= 1; // Zero Dirichlet
}
}
const double mVoxelSize;
const Vec3GridT& mVelGrid;
const MaskT& mDomainGrid;
}; // struct SolveBoundaryOp
} // namespace potential_flow_internal
////////////////////////////////////////////////////////////////////////////
template<typename GridT, typename MaskT>
inline typename MaskT::Ptr
createPotentialFlowMask(const GridT& grid, int dilation)
{
using MaskTreeT = typename MaskT::TreeType;
if (!grid.hasUniformVoxels()) {
OPENVDB_THROW(ValueError, "Transform must have uniform voxels for Potential Flow mask.");
}
// construct a new mask grid representing the interior region
auto interior = interiorMask(grid);
// create a new mask grid from the interior topology
typename MaskTreeT::Ptr maskTree(new MaskTreeT(interior->tree(), false, TopologyCopy()));
typename MaskT::Ptr mask = MaskT::create(maskTree);
mask->setTransform(grid.transform().copy());
dilateActiveValues(*maskTree, dilation, NN_FACE_EDGE);
// subtract the interior region from the mask to leave just the exterior narrow band
mask->tree().topologyDifference(interior->tree());
return mask;
}
template<typename Vec3T, typename GridT, typename MaskT>
typename GridT::template ValueConverter<Vec3T>::Type::Ptr createPotentialFlowNeumannVelocities(
const GridT& collider,
const MaskT& domain,
const typename GridT::template ValueConverter<Vec3T>::Type::ConstPtr boundaryVelocity,
const Vec3T& backgroundVelocity)
{
using Vec3GridT = typename GridT::template ValueConverter<Vec3T>::Type;
using TreeT = typename Vec3GridT::TreeType;
using ValueT = typename TreeT::ValueType;
using GradientT = typename ScalarToVectorConverter<GridT>::Type;
using potential_flow_internal::ComputeNeumannVelocityOp;
// this method requires the collider to be a level set to generate the gradient
// use the tools::topologyToLevelset() method if you need to convert a mask into a level set
if (collider.getGridClass() != GRID_LEVEL_SET ||
!std::is_floating_point<typename GridT::TreeType::ValueType>::value) {
OPENVDB_THROW(TypeError, "Potential Flow expecting the collider to be a level set.");
}
// empty grid if there are no velocities
if (backgroundVelocity == zeroVal<Vec3T>() &&
(!boundaryVelocity || boundaryVelocity->empty())) {
auto neumann = Vec3GridT::create();
neumann->setTransform(collider.transform().copy());
return neumann;
}
// extract the intersection between the collider and the domain
using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type;
typename MaskTreeT::Ptr boundary(new MaskTreeT(domain.tree(), false, TopologyCopy()));
boundary->topologyIntersection(collider.tree());
typename TreeT::Ptr neumannTree(new TreeT(*boundary, zeroVal<ValueT>(), TopologyCopy()));
neumannTree->voxelizeActiveTiles();
// compute the gradient from the collider
const typename GradientT::Ptr gradient = tools::gradient(collider);
typename tree::LeafManager<TreeT> leafManager(*neumannTree);
if (boundaryVelocity && !boundaryVelocity->empty()) {
ComputeNeumannVelocityOp<Vec3GridT, GradientT>
neumannOp(*gradient, *boundaryVelocity, backgroundVelocity);
leafManager.foreach(neumannOp, false);
}
else {
ComputeNeumannVelocityOp<Vec3GridT, GradientT>
neumannOp(*gradient, backgroundVelocity);
leafManager.foreach(neumannOp, false);
}
// prune any inactive values
tools::pruneInactive(*neumannTree);
typename Vec3GridT::Ptr neumann(Vec3GridT::create(neumannTree));
neumann->setTransform(collider.transform().copy());
return neumann;
}
template<typename Vec3GridT, typename MaskT, typename InterrupterT>
inline typename VectorToScalarGrid<Vec3GridT>::Ptr
computeScalarPotential(const MaskT& domain, const Vec3GridT& neumann,
math::pcg::State& state, InterrupterT* interrupter)
{
using ScalarT = typename Vec3GridT::ValueType::value_type;
using ScalarTreeT = typename Vec3GridT::TreeType::template ValueConverter<ScalarT>::Type;
using ScalarGridT = typename Vec3GridT::template ValueConverter<ScalarT>::Type;
using potential_flow_internal::SolveBoundaryOp;
// create the solution tree and activate using domain topology
ScalarTreeT solveTree(domain.tree(), zeroVal<ScalarT>(), TopologyCopy());
solveTree.voxelizeActiveTiles();
util::NullInterrupter nullInterrupt;
if (!interrupter) interrupter = &nullInterrupt;
// solve for scalar potential
SolveBoundaryOp<Vec3GridT, MaskT> solve(neumann, domain);
typename ScalarTreeT::Ptr potentialTree =
poisson::solveWithBoundaryConditions(solveTree, solve, state, *interrupter, true);
auto potential = ScalarGridT::create(potentialTree);
potential->setTransform(domain.transform().copy());
return potential;
}
template<typename Vec3GridT>
inline typename Vec3GridT::Ptr
computePotentialFlow(const typename VectorToScalarGrid<Vec3GridT>::Type& potential,
const Vec3GridT& neumann,
const typename Vec3GridT::ValueType backgroundVelocity)
{
using Vec3T = const typename Vec3GridT::ValueType;
using potential_flow_internal::extractOuterVoxelMask;
// The VDB gradient op uses the background grid value, which is zero by default, when
// computing the gradient at the boundary. This works at the zero-dirichlet boundaries, but
// give spurious values at Neumann ones as the potential should be non-zero there. To avoid
// the extra error, we just substitute the Neumann condition on the boundaries.
// Technically, we should allow for some tangential velocity, coming from the gradient of
// potential. However, considering the voxelized nature of our solve, a decent approximation
// to a tangential derivative isn't probably worth our time. Any tangential component will be
// found in the next interior ring of voxels.
auto gradient = tools::gradient(potential);
// apply Neumann values to the gradient
auto applyNeumann = [&gradient, &neumann] (
const MaskGrid::TreeType::LeafNodeType& leaf, size_t)
{
typename Vec3GridT::Accessor gradientAccessor = gradient->getAccessor();
typename Vec3GridT::ConstAccessor neumannAccessor = neumann.getAccessor();
for (auto it = leaf.beginValueOn(); it; ++it) {
const Coord ijk = it.getCoord();
typename Vec3GridT::ValueType value;
if (neumannAccessor.probeValue(ijk, value)) {
gradientAccessor.setValue(ijk, value);
}
}
};
const MaskGrid::TreeType::Ptr boundary = extractOuterVoxelMask(*gradient);
typename tree::LeafManager<const typename MaskGrid::TreeType> leafManager(*boundary);
leafManager.foreach(applyNeumann);
// apply the background value to the gradient if supplied
if (backgroundVelocity != zeroVal<Vec3T>()) {
auto applyBackgroundVelocity = [&backgroundVelocity] (
typename Vec3GridT::TreeType::LeafNodeType& leaf, size_t)
{
for (auto it = leaf.beginValueOn(); it; ++it) {
it.setValue(it.getValue() - backgroundVelocity);
}
};
typename tree::LeafManager<typename Vec3GridT::TreeType> leafManager2(gradient->tree());
leafManager2.foreach(applyBackgroundVelocity);
}
return gradient;
}
////////////////////////////////////////
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED
| 16,024 | C | 39.467172 | 99 | 0.704631 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/RayTracer.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file RayTracer.h
///
/// @author Ken Museth
///
/// @brief Defines two simple but multithreaded renders, a level-set
/// ray tracer and a volume render. To support these renders we also define
/// perspective and orthographic cameras (both designed to mimic a Houdini camera),
/// a Film class and some rather naive shaders.
///
/// @note These classes are included mainly as reference implementations for
/// ray-tracing of OpenVDB volumes. In other words they are not intended for
/// production-quality rendering, but could be used for fast pre-visualization
/// or as a starting point for a more serious render.
#ifndef OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED
#include <openvdb/Types.h>
#include <openvdb/math/BBox.h>
#include <openvdb/math/Ray.h>
#include <openvdb/math/Math.h>
#include <openvdb/tools/RayIntersector.h>
#include <openvdb/tools/Interpolation.h>
#include <deque>
#include <iostream>
#include <fstream>
#include <limits>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
#ifdef OPENVDB_TOOLS_RAYTRACER_USE_EXR
#include <OpenEXR/ImfPixelType.h>
#include <OpenEXR/ImfChannelList.h>
#include <OpenEXR/ImfOutputFile.h>
#include <OpenEXR/ImfHeader.h>
#include <OpenEXR/ImfFrameBuffer.h>
#endif
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
// Forward declarations
class BaseCamera;
class BaseShader;
/// @brief Ray-trace a volume.
template<typename GridT>
inline void rayTrace(const GridT&,
const BaseShader&,
BaseCamera&,
size_t pixelSamples = 1,
unsigned int seed = 0,
bool threaded = true);
/// @brief Ray-trace a volume using a given ray intersector.
template<typename GridT, typename IntersectorT>
inline void rayTrace(const GridT&,
const IntersectorT&,
const BaseShader&,
BaseCamera&,
size_t pixelSamples = 1,
unsigned int seed = 0,
bool threaded = true);
///////////////////////////////LEVEL SET RAY TRACER ///////////////////////////////////////
/// @brief A (very) simple multithreaded ray tracer specifically for narrow-band level sets.
/// @details Included primarily as a reference implementation.
template<typename GridT, typename IntersectorT = tools::LevelSetRayIntersector<GridT> >
class LevelSetRayTracer
{
public:
using GridType = GridT;
using Vec3Type = typename IntersectorT::Vec3Type;
using RayType = typename IntersectorT::RayType;
/// @brief Constructor based on an instance of the grid to be rendered.
LevelSetRayTracer(const GridT& grid,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples = 1,
unsigned int seed = 0);
/// @brief Constructor based on an instance of the intersector
/// performing the ray-intersections.
LevelSetRayTracer(const IntersectorT& inter,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples = 1,
unsigned int seed = 0);
/// @brief Copy constructor
LevelSetRayTracer(const LevelSetRayTracer& other);
/// @brief Destructor
~LevelSetRayTracer();
/// @brief Set the level set grid to be ray-traced
void setGrid(const GridT& grid);
/// @brief Set the intersector that performs the actual
/// intersection of the rays against the narrow-band level set.
void setIntersector(const IntersectorT& inter);
/// @brief Set the shader derived from the abstract BaseShader class.
///
/// @note The shader is not assumed to be thread-safe so each
/// thread will get its only deep copy. For instance it could
/// contains a ValueAccessor into another grid with auxiliary
/// shading information. Thus, make sure it is relatively
/// light-weight and efficient to copy (which is the case for ValueAccesors).
void setShader(const BaseShader& shader);
/// @brief Set the camera derived from the abstract BaseCamera class.
void setCamera(BaseCamera& camera);
/// @brief Set the number of pixel samples and the seed for
/// jittered sub-rays. A value larger than one implies
/// anti-aliasing by jittered super-sampling.
/// @throw ValueError if pixelSamples is equal to zero.
void setPixelSamples(size_t pixelSamples, unsigned int seed = 0);
/// @brief Perform the actual (potentially multithreaded) ray-tracing.
void render(bool threaded = true) const;
/// @brief Public method required by tbb::parallel_for.
/// @warning Never call it directly.
void operator()(const tbb::blocked_range<size_t>& range) const;
private:
const bool mIsMaster;
double* mRand;
IntersectorT mInter;
std::unique_ptr<const BaseShader> mShader;
BaseCamera* mCamera;
size_t mSubPixels;
};// LevelSetRayTracer
///////////////////////////////VOLUME RENDER ///////////////////////////////////////
/// @brief A (very) simple multithreaded volume render specifically for scalar density.
/// @details Included primarily as a reference implementation.
/// @note It will only compile if the IntersectorT is templated on a Grid with a
/// floating-point voxel type.
template <typename IntersectorT, typename SamplerT = tools::BoxSampler>
class VolumeRender
{
public:
using GridType = typename IntersectorT::GridType;
using RayType = typename IntersectorT::RayType;
using ValueType = typename GridType::ValueType;
using AccessorType = typename GridType::ConstAccessor;
using SamplerType = tools::GridSampler<AccessorType, SamplerT>;
static_assert(std::is_floating_point<ValueType>::value,
"VolumeRender requires a floating-point-valued grid");
/// @brief Constructor taking an intersector and a base camera.
VolumeRender(const IntersectorT& inter, BaseCamera& camera);
/// @brief Copy constructor which creates a thread-safe clone
VolumeRender(const VolumeRender& other);
/// @brief Perform the actual (potentially multithreaded) volume rendering.
void render(bool threaded=true) const;
/// @brief Set the camera derived from the abstract BaseCamera class.
void setCamera(BaseCamera& camera) { mCamera = &camera; }
/// @brief Set the intersector that performs the actual
/// intersection of the rays against the volume.
void setIntersector(const IntersectorT& inter);
/// @brief Set the vector components of a directional light source
/// @throw ArithmeticError if input is a null vector.
void setLightDir(Real x, Real y, Real z) { mLightDir = Vec3R(x,y,z).unit(); }
/// @brief Set the color of the directional light source.
void setLightColor(Real r, Real g, Real b) { mLightColor = Vec3R(r,g,b); }
/// @brief Set the integration step-size in voxel units for the primay ray.
void setPrimaryStep(Real primaryStep) { mPrimaryStep = primaryStep; }
/// @brief Set the integration step-size in voxel units for the primay ray.
void setShadowStep(Real shadowStep) { mShadowStep = shadowStep; }
/// @brief Set Scattering coefficients.
void setScattering(Real x, Real y, Real z) { mScattering = Vec3R(x,y,z); }
/// @brief Set absorption coefficients.
void setAbsorption(Real x, Real y, Real z) { mAbsorption = Vec3R(x,y,z); }
/// @brief Set parameter that imitates multi-scattering. A value
/// of zero implies no multi-scattering.
void setLightGain(Real gain) { mLightGain = gain; }
/// @brief Set the cut-off value for density and transmittance.
void setCutOff(Real cutOff) { mCutOff = cutOff; }
/// @brief Print parameters, statistics, memory usage and other information.
/// @param os a stream to which to write textual information
/// @param verboseLevel 1: print parameters only; 2: include grid
/// statistics; 3: include memory usage
void print(std::ostream& os = std::cout, int verboseLevel = 1);
/// @brief Public method required by tbb::parallel_for.
/// @warning Never call it directly.
void operator()(const tbb::blocked_range<size_t>& range) const;
private:
AccessorType mAccessor;
BaseCamera* mCamera;
std::unique_ptr<IntersectorT> mPrimary, mShadow;
Real mPrimaryStep, mShadowStep, mCutOff, mLightGain;
Vec3R mLightDir, mLightColor, mAbsorption, mScattering;
};//VolumeRender
//////////////////////////////////////// FILM ////////////////////////////////////////
/// @brief A simple class that allows for concurrent writes to pixels in an image,
/// background initialization of the image, and PPM or EXR file output.
class Film
{
public:
/// @brief Floating-point RGBA components in the range [0, 1].
/// @details This is our preferred representation for color processing.
struct RGBA
{
using ValueT = float;
RGBA() : r(0), g(0), b(0), a(1) {}
explicit RGBA(ValueT intensity) : r(intensity), g(intensity), b(intensity), a(1) {}
RGBA(ValueT _r, ValueT _g, ValueT _b, ValueT _a = static_cast<ValueT>(1.0)):
r(_r), g(_g), b(_b), a(_a)
{}
RGBA(double _r, double _g, double _b, double _a = 1.0)
: r(static_cast<ValueT>(_r))
, g(static_cast<ValueT>(_g))
, b(static_cast<ValueT>(_b))
, a(static_cast<ValueT>(_a))
{}
RGBA operator* (ValueT scale) const { return RGBA(r*scale, g*scale, b*scale);}
RGBA operator+ (const RGBA& rhs) const { return RGBA(r+rhs.r, g+rhs.g, b+rhs.b);}
RGBA operator* (const RGBA& rhs) const { return RGBA(r*rhs.r, g*rhs.g, b*rhs.b);}
RGBA& operator+=(const RGBA& rhs) { r+=rhs.r; g+=rhs.g; b+=rhs.b; a+=rhs.a; return *this;}
void over(const RGBA& rhs)
{
const float s = rhs.a*(1.0f-a);
r = a*r+s*rhs.r;
g = a*g+s*rhs.g;
b = a*b+s*rhs.b;
a = a + s;
}
ValueT r, g, b, a;
};
Film(size_t width, size_t height)
: mWidth(width), mHeight(height), mSize(width*height), mPixels(new RGBA[mSize])
{
}
Film(size_t width, size_t height, const RGBA& bg)
: mWidth(width), mHeight(height), mSize(width*height), mPixels(new RGBA[mSize])
{
this->fill(bg);
}
const RGBA& pixel(size_t w, size_t h) const
{
assert(w < mWidth);
assert(h < mHeight);
return mPixels[w + h*mWidth];
}
RGBA& pixel(size_t w, size_t h)
{
assert(w < mWidth);
assert(h < mHeight);
return mPixels[w + h*mWidth];
}
void fill(const RGBA& rgb=RGBA(0)) { for (size_t i=0; i<mSize; ++i) mPixels[i] = rgb; }
void checkerboard(const RGBA& c1=RGBA(0.3f), const RGBA& c2=RGBA(0.6f), size_t size=32)
{
RGBA *p = mPixels.get();
for (size_t j = 0; j < mHeight; ++j) {
for (size_t i = 0; i < mWidth; ++i, ++p) {
*p = ((i & size) ^ (j & size)) ? c1 : c2;
}
}
}
void savePPM(const std::string& fileName)
{
std::string name(fileName);
if (name.find_last_of(".") == std::string::npos) name.append(".ppm");
std::unique_ptr<unsigned char[]> buffer(new unsigned char[3*mSize]);
unsigned char *tmp = buffer.get(), *q = tmp;
RGBA* p = mPixels.get();
size_t n = mSize;
while (n--) {
*q++ = static_cast<unsigned char>(255.0f*(*p ).r);
*q++ = static_cast<unsigned char>(255.0f*(*p ).g);
*q++ = static_cast<unsigned char>(255.0f*(*p++).b);
}
std::ofstream os(name.c_str(), std::ios_base::binary);
if (!os.is_open()) {
std::cerr << "Error opening PPM file \"" << name << "\"" << std::endl;
return;
}
os << "P6\n" << mWidth << " " << mHeight << "\n255\n";
os.write(reinterpret_cast<const char*>(&(*tmp)), 3 * mSize * sizeof(unsigned char));
}
#ifdef OPENVDB_TOOLS_RAYTRACER_USE_EXR
void saveEXR(const std::string& fileName, size_t compression = 2, size_t threads = 8)
{
std::string name(fileName);
if (name.find_last_of(".") == std::string::npos) name.append(".exr");
if (threads>0) Imf::setGlobalThreadCount(threads);
Imf::Header header(mWidth, mHeight);
if (compression==0) header.compression() = Imf::NO_COMPRESSION;
if (compression==1) header.compression() = Imf::RLE_COMPRESSION;
if (compression>=2) header.compression() = Imf::ZIP_COMPRESSION;
header.channels().insert("R", Imf::Channel(Imf::FLOAT));
header.channels().insert("G", Imf::Channel(Imf::FLOAT));
header.channels().insert("B", Imf::Channel(Imf::FLOAT));
header.channels().insert("A", Imf::Channel(Imf::FLOAT));
Imf::FrameBuffer framebuffer;
framebuffer.insert("R", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].r),
sizeof (RGBA), sizeof (RGBA) * mWidth));
framebuffer.insert("G", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].g),
sizeof (RGBA), sizeof (RGBA) * mWidth));
framebuffer.insert("B", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].b),
sizeof (RGBA), sizeof (RGBA) * mWidth));
framebuffer.insert("A", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].a),
sizeof (RGBA), sizeof (RGBA) * mWidth));
Imf::OutputFile file(name.c_str(), header);
file.setFrameBuffer(framebuffer);
file.writePixels(mHeight);
}
#endif
size_t width() const { return mWidth; }
size_t height() const { return mHeight; }
size_t numPixels() const { return mSize; }
const RGBA* pixels() const { return mPixels.get(); }
private:
size_t mWidth, mHeight, mSize;
std::unique_ptr<RGBA[]> mPixels;
};// Film
//////////////////////////////////////// CAMERAS ////////////////////////////////////////
/// Abstract base class for the perspective and orthographic cameras
class BaseCamera
{
public:
BaseCamera(Film& film, const Vec3R& rotation, const Vec3R& translation,
double frameWidth, double nearPlane, double farPlane)
: mFilm(&film)
, mScaleWidth(frameWidth)
, mScaleHeight(frameWidth * double(film.height()) / double(film.width()))
{
assert(nearPlane > 0 && farPlane > nearPlane);
mScreenToWorld.accumPostRotation(math::X_AXIS, rotation[0] * M_PI / 180.0);
mScreenToWorld.accumPostRotation(math::Y_AXIS, rotation[1] * M_PI / 180.0);
mScreenToWorld.accumPostRotation(math::Z_AXIS, rotation[2] * M_PI / 180.0);
mScreenToWorld.accumPostTranslation(translation);
this->initRay(nearPlane, farPlane);
}
virtual ~BaseCamera() {}
Film::RGBA& pixel(size_t i, size_t j) { return mFilm->pixel(i, j); }
size_t width() const { return mFilm->width(); }
size_t height() const { return mFilm->height(); }
/// Rotate the camera so its negative z-axis points at xyz and its
/// y axis is in the plane of the xyz and up vectors. In other
/// words the camera will look at xyz and use up as the
/// horizontal direction.
void lookAt(const Vec3R& xyz, const Vec3R& up = Vec3R(0.0, 1.0, 0.0))
{
const Vec3R orig = mScreenToWorld.applyMap(Vec3R(0.0));
const Vec3R dir = orig - xyz;
try {
Mat4d xform = math::aim<Mat4d>(dir, up);
xform.postTranslate(orig);
mScreenToWorld = math::AffineMap(xform);
this->initRay(mRay.t0(), mRay.t1());
} catch (...) {}
}
Vec3R rasterToScreen(double i, double j, double z) const
{
return Vec3R( (2 * i / double(mFilm->width()) - 1) * mScaleWidth,
(1 - 2 * j / double(mFilm->height())) * mScaleHeight, z );
}
/// @brief Return a Ray in world space given the pixel indices and
/// optional offsets in the range [0, 1]. An offset of 0.5 corresponds
/// to the center of the pixel.
virtual math::Ray<double> getRay(
size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const = 0;
protected:
void initRay(double t0, double t1)
{
mRay.setTimes(t0, t1);
mRay.setEye(mScreenToWorld.applyMap(Vec3R(0.0)));
mRay.setDir(mScreenToWorld.applyJacobian(Vec3R(0.0, 0.0, -1.0)));
}
Film* mFilm;
double mScaleWidth, mScaleHeight;
math::Ray<double> mRay;
math::AffineMap mScreenToWorld;
};// BaseCamera
class PerspectiveCamera: public BaseCamera
{
public:
/// @brief Constructor
/// @param film film (i.e. image) defining the pixel resolution
/// @param rotation rotation in degrees of the camera in world space
/// (applied in x, y, z order)
/// @param translation translation of the camera in world-space units,
/// applied after rotation
/// @param focalLength focal length of the camera in mm
/// (the default of 50mm corresponds to Houdini's default camera)
/// @param aperture width in mm of the frame, i.e., the visible field
/// (the default 41.2136 mm corresponds to Houdini's default camera)
/// @param nearPlane depth of the near clipping plane in world-space units
/// @param farPlane depth of the far clipping plane in world-space units
///
/// @details If no rotation or translation is provided, the camera is placed
/// at (0,0,0) in world space and points in the direction of the negative z axis.
PerspectiveCamera(Film& film,
const Vec3R& rotation = Vec3R(0.0),
const Vec3R& translation = Vec3R(0.0),
double focalLength = 50.0,
double aperture = 41.2136,
double nearPlane = 1e-3,
double farPlane = std::numeric_limits<double>::max())
: BaseCamera(film, rotation, translation, 0.5*aperture/focalLength, nearPlane, farPlane)
{
}
~PerspectiveCamera() override = default;
/// @brief Return a Ray in world space given the pixel indices and
/// optional offsets in the range [0,1]. An offset of 0.5 corresponds
/// to the center of the pixel.
math::Ray<double> getRay(
size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const override
{
math::Ray<double> ray(mRay);
Vec3R dir = BaseCamera::rasterToScreen(Real(i) + iOffset, Real(j) + jOffset, -1.0);
dir = BaseCamera::mScreenToWorld.applyJacobian(dir);
dir.normalize();
ray.scaleTimes(1.0/dir.dot(ray.dir()));
ray.setDir(dir);
return ray;
}
/// @brief Return the horizontal field of view in degrees given a
/// focal lenth in mm and the specified aperture in mm.
static double focalLengthToFieldOfView(double length, double aperture)
{
return 360.0 / M_PI * atan(aperture/(2.0*length));
}
/// @brief Return the focal length in mm given a horizontal field of
/// view in degrees and the specified aperture in mm.
static double fieldOfViewToFocalLength(double fov, double aperture)
{
return aperture/(2.0*(tan(fov * M_PI / 360.0)));
}
};// PerspectiveCamera
class OrthographicCamera: public BaseCamera
{
public:
/// @brief Constructor
/// @param film film (i.e. image) defining the pixel resolution
/// @param rotation rotation in degrees of the camera in world space
/// (applied in x, y, z order)
/// @param translation translation of the camera in world-space units,
/// applied after rotation
/// @param frameWidth width in of the frame in world-space units
/// @param nearPlane depth of the near clipping plane in world-space units
/// @param farPlane depth of the far clipping plane in world-space units
///
/// @details If no rotation or translation is provided, the camera is placed
/// at (0,0,0) in world space and points in the direction of the negative z axis.
OrthographicCamera(Film& film,
const Vec3R& rotation = Vec3R(0.0),
const Vec3R& translation = Vec3R(0.0),
double frameWidth = 1.0,
double nearPlane = 1e-3,
double farPlane = std::numeric_limits<double>::max())
: BaseCamera(film, rotation, translation, 0.5*frameWidth, nearPlane, farPlane)
{
}
~OrthographicCamera() override = default;
math::Ray<double> getRay(
size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const override
{
math::Ray<double> ray(mRay);
Vec3R eye = BaseCamera::rasterToScreen(Real(i) + iOffset, Real(j) + jOffset, 0.0);
ray.setEye(BaseCamera::mScreenToWorld.applyMap(eye));
return ray;
}
};// OrthographicCamera
//////////////////////////////////////// SHADERS ////////////////////////////////////////
/// Abstract base class for the shaders
class BaseShader
{
public:
using RayT = math::Ray<Real>;
BaseShader() {}
BaseShader(const BaseShader&) = default;
virtual ~BaseShader() = default;
/// @brief Defines the interface of the virtual function that returns a RGB color.
/// @param xyz World position of the intersection point.
/// @param nml Normal in world space at the intersection point.
/// @param dir Direction of the ray in world space.
virtual Film::RGBA operator()(const Vec3R& xyz, const Vec3R& nml, const Vec3R& dir) const = 0;
virtual BaseShader* copy() const = 0;
};
/// @brief Shader that produces a simple matte.
///
/// @details The color can either be constant (if GridT =
/// Film::RGBA which is the default) or defined in a separate Vec3
/// color grid. Use SamplerType to define the order of interpolation
/// (default is zero order, i.e. closes-point).
template<typename GridT = Film::RGBA,
typename SamplerType = tools::PointSampler>
class MatteShader: public BaseShader
{
public:
MatteShader(const GridT& grid) : mAcc(grid.getAccessor()), mXform(&grid.transform()) {}
MatteShader(const MatteShader&) = default;
~MatteShader() override = default;
Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override
{
typename GridT::ValueType v = zeroVal<typename GridT::ValueType>();
SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v);
return Film::RGBA(v[0], v[1], v[2]);
}
BaseShader* copy() const override { return new MatteShader<GridT, SamplerType>(*this); }
private:
typename GridT::ConstAccessor mAcc;
const math::Transform* mXform;
};
// Template specialization using a constant color of the material.
template<typename SamplerType>
class MatteShader<Film::RGBA, SamplerType>: public BaseShader
{
public:
MatteShader(const Film::RGBA& c = Film::RGBA(1.0f)): mRGBA(c) {}
MatteShader(const MatteShader&) = default;
~MatteShader() override = default;
Film::RGBA operator()(const Vec3R&, const Vec3R&, const Vec3R&) const override
{
return mRGBA;
}
BaseShader* copy() const override { return new MatteShader<Film::RGBA, SamplerType>(*this); }
private:
const Film::RGBA mRGBA;
};
/// @brief Color shader that treats the surface normal (x, y, z) as an
/// RGB color.
///
/// @details The color can either be constant (if GridT =
/// Film::RGBA which is the default) or defined in a separate Vec3
/// color grid. Use SamplerType to define the order of interpolation
/// (default is zero order, i.e. closes-point).
template<typename GridT = Film::RGBA,
typename SamplerType = tools::PointSampler>
class NormalShader: public BaseShader
{
public:
NormalShader(const GridT& grid) : mAcc(grid.getAccessor()), mXform(&grid.transform()) {}
NormalShader(const NormalShader&) = default;
~NormalShader() override = default;
Film::RGBA operator()(const Vec3R& xyz, const Vec3R& normal, const Vec3R&) const override
{
typename GridT::ValueType v = zeroVal<typename GridT::ValueType>();
SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v);
return Film::RGBA(v[0]*(normal[0]+1.0), v[1]*(normal[1]+1.0), v[2]*(normal[2]+1.0));
}
BaseShader* copy() const override { return new NormalShader<GridT, SamplerType>(*this); }
private:
typename GridT::ConstAccessor mAcc;
const math::Transform* mXform;
};
// Template specialization using a constant color of the material.
template<typename SamplerType>
class NormalShader<Film::RGBA, SamplerType>: public BaseShader
{
public:
NormalShader(const Film::RGBA& c = Film::RGBA(1.0f)) : mRGBA(c*0.5f) {}
NormalShader(const NormalShader&) = default;
~NormalShader() override = default;
Film::RGBA operator()(const Vec3R&, const Vec3R& normal, const Vec3R&) const override
{
return mRGBA * Film::RGBA(normal[0] + 1.0, normal[1] + 1.0, normal[2] + 1.0);
}
BaseShader* copy() const override { return new NormalShader<Film::RGBA, SamplerType>(*this); }
private:
const Film::RGBA mRGBA;
};
/// @brief Color shader that treats position (x, y, z) as an RGB color in a
/// cube defined from an axis-aligned bounding box in world space.
///
/// @details The color can either be constant (if GridT =
/// Film::RGBA which is the default) or defined in a separate Vec3
/// color grid. Use SamplerType to define the order of interpolation
/// (default is zero order, i.e. closes-point).
template<typename GridT = Film::RGBA,
typename SamplerType = tools::PointSampler>
class PositionShader: public BaseShader
{
public:
PositionShader(const math::BBox<Vec3R>& bbox, const GridT& grid)
: mMin(bbox.min())
, mInvDim(1.0/bbox.extents())
, mAcc(grid.getAccessor())
, mXform(&grid.transform())
{
}
PositionShader(const PositionShader&) = default;
~PositionShader() override = default;
Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override
{
typename GridT::ValueType v = zeroVal<typename GridT::ValueType>();
SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v);
const Vec3R rgb = (xyz - mMin) * mInvDim;
return Film::RGBA(v[0],v[1],v[2]) * Film::RGBA(rgb[0], rgb[1], rgb[2]);
}
BaseShader* copy() const override { return new PositionShader<GridT, SamplerType>(*this); }
private:
const Vec3R mMin, mInvDim;
typename GridT::ConstAccessor mAcc;
const math::Transform* mXform;
};
// Template specialization using a constant color of the material.
template<typename SamplerType>
class PositionShader<Film::RGBA, SamplerType>: public BaseShader
{
public:
PositionShader(const math::BBox<Vec3R>& bbox, const Film::RGBA& c = Film::RGBA(1.0f))
: mMin(bbox.min()), mInvDim(1.0/bbox.extents()), mRGBA(c) {}
PositionShader(const PositionShader&) = default;
~PositionShader() override = default;
Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override
{
const Vec3R rgb = (xyz - mMin)*mInvDim;
return mRGBA*Film::RGBA(rgb[0], rgb[1], rgb[2]);
}
BaseShader* copy() const override { return new PositionShader<Film::RGBA, SamplerType>(*this); }
private:
const Vec3R mMin, mInvDim;
const Film::RGBA mRGBA;
};
/// @brief Simple diffuse Lambertian surface shader.
///
/// @details The diffuse color can either be constant (if GridT =
/// Film::RGBA which is the default) or defined in a separate Vec3
/// color grid. Lambertian implies that the (radiant) intensity is
/// directly proportional to the cosine of the angle between the
/// surface normal and the direction of the light source. Use
/// SamplerType to define the order of interpolation (default is
/// zero order, i.e. closes-point).
template<typename GridT = Film::RGBA,
typename SamplerType = tools::PointSampler>
class DiffuseShader: public BaseShader
{
public:
DiffuseShader(const GridT& grid): mAcc(grid.getAccessor()), mXform(&grid.transform()) {}
DiffuseShader(const DiffuseShader&) = default;
~DiffuseShader() override = default;
Film::RGBA operator()(const Vec3R& xyz, const Vec3R& normal, const Vec3R& rayDir) const override
{
typename GridT::ValueType v = zeroVal<typename GridT::ValueType>();
SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v);
// We take the abs of the dot product corresponding to having
// light sources at +/- rayDir, i.e., two-sided shading.
return Film::RGBA(v[0],v[1],v[2])
* static_cast<Film::RGBA::ValueT>(math::Abs(normal.dot(rayDir)));
}
BaseShader* copy() const override { return new DiffuseShader<GridT, SamplerType>(*this); }
private:
typename GridT::ConstAccessor mAcc;
const math::Transform* mXform;
};
// Template specialization using a constant color of the material.
template <typename SamplerType>
class DiffuseShader<Film::RGBA, SamplerType>: public BaseShader
{
public:
DiffuseShader(const Film::RGBA& d = Film::RGBA(1.0f)): mRGBA(d) {}
DiffuseShader(const DiffuseShader&) = default;
~DiffuseShader() override = default;
Film::RGBA operator()(const Vec3R&, const Vec3R& normal, const Vec3R& rayDir) const override
{
// We assume a single directional light source at the camera,
// so the cosine of the angle between the surface normal and the
// direction of the light source becomes the dot product of the
// surface normal and inverse direction of the ray. We also ignore
// negative dot products, corresponding to strict one-sided shading.
//return mRGBA * math::Max(0.0, normal.dot(-rayDir));
// We take the abs of the dot product corresponding to having
// light sources at +/- rayDir, i.e., two-sided shading.
return mRGBA * static_cast<Film::RGBA::ValueT>(math::Abs(normal.dot(rayDir)));
}
BaseShader* copy() const override { return new DiffuseShader<Film::RGBA, SamplerType>(*this); }
private:
const Film::RGBA mRGBA;
};
//////////////////////////////////////// RAYTRACER ////////////////////////////////////////
template<typename GridT>
inline void rayTrace(const GridT& grid,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples,
unsigned int seed,
bool threaded)
{
LevelSetRayTracer<GridT, tools::LevelSetRayIntersector<GridT> >
tracer(grid, shader, camera, pixelSamples, seed);
tracer.render(threaded);
}
template<typename GridT, typename IntersectorT>
inline void rayTrace(const GridT&,
const IntersectorT& inter,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples,
unsigned int seed,
bool threaded)
{
LevelSetRayTracer<GridT, IntersectorT> tracer(inter, shader, camera, pixelSamples, seed);
tracer.render(threaded);
}
//////////////////////////////////////// LevelSetRayTracer ////////////////////////////////////////
template<typename GridT, typename IntersectorT>
inline LevelSetRayTracer<GridT, IntersectorT>::
LevelSetRayTracer(const GridT& grid,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples,
unsigned int seed)
: mIsMaster(true),
mRand(nullptr),
mInter(grid),
mShader(shader.copy()),
mCamera(&camera)
{
this->setPixelSamples(pixelSamples, seed);
}
template<typename GridT, typename IntersectorT>
inline LevelSetRayTracer<GridT, IntersectorT>::
LevelSetRayTracer(const IntersectorT& inter,
const BaseShader& shader,
BaseCamera& camera,
size_t pixelSamples,
unsigned int seed)
: mIsMaster(true),
mRand(nullptr),
mInter(inter),
mShader(shader.copy()),
mCamera(&camera)
{
this->setPixelSamples(pixelSamples, seed);
}
template<typename GridT, typename IntersectorT>
inline LevelSetRayTracer<GridT, IntersectorT>::
LevelSetRayTracer(const LevelSetRayTracer& other) :
mIsMaster(false),
mRand(other.mRand),
mInter(other.mInter),
mShader(other.mShader->copy()),
mCamera(other.mCamera),
mSubPixels(other.mSubPixels)
{
}
template<typename GridT, typename IntersectorT>
inline LevelSetRayTracer<GridT, IntersectorT>::
~LevelSetRayTracer()
{
if (mIsMaster) delete [] mRand;
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
setGrid(const GridT& grid)
{
assert(mIsMaster);
mInter = IntersectorT(grid);
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
setIntersector(const IntersectorT& inter)
{
assert(mIsMaster);
mInter = inter;
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
setShader(const BaseShader& shader)
{
assert(mIsMaster);
mShader.reset(shader.copy());
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
setCamera(BaseCamera& camera)
{
assert(mIsMaster);
mCamera = &camera;
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
setPixelSamples(size_t pixelSamples, unsigned int seed)
{
assert(mIsMaster);
if (pixelSamples == 0) {
OPENVDB_THROW(ValueError, "pixelSamples must be larger than zero!");
}
mSubPixels = pixelSamples - 1;
delete [] mRand;
if (mSubPixels > 0) {
mRand = new double[16];
math::Rand01<double> rand(seed);//offsets for anti-aliaing by jittered super-sampling
for (size_t i=0; i<16; ++i) mRand[i] = rand();
} else {
mRand = nullptr;
}
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
render(bool threaded) const
{
tbb::blocked_range<size_t> range(0, mCamera->height());
threaded ? tbb::parallel_for(range, *this) : (*this)(range);
}
template<typename GridT, typename IntersectorT>
inline void LevelSetRayTracer<GridT, IntersectorT>::
operator()(const tbb::blocked_range<size_t>& range) const
{
const BaseShader& shader = *mShader;
Vec3Type xyz, nml;
const float frac = 1.0f / (1.0f + float(mSubPixels));
for (size_t j=range.begin(), n=0, je = range.end(); j<je; ++j) {
for (size_t i=0, ie = mCamera->width(); i<ie; ++i) {
Film::RGBA& bg = mCamera->pixel(i,j);
RayType ray = mCamera->getRay(i, j);//primary ray
Film::RGBA c = mInter.intersectsWS(ray, xyz, nml) ? shader(xyz, nml, ray.dir()) : bg;
for (size_t k=0; k<mSubPixels; ++k, n +=2 ) {
ray = mCamera->getRay(i, j, mRand[n & 15], mRand[(n+1) & 15]);
c += mInter.intersectsWS(ray, xyz, nml) ? shader(xyz, nml, ray.dir()) : bg;
}//loop over sub-pixels
bg = c*frac;
}//loop over image height
}//loop over image width
}
//////////////////////////////////////// VolumeRender ////////////////////////////////////////
template<typename IntersectorT, typename SampleT>
inline VolumeRender<IntersectorT, SampleT>::
VolumeRender(const IntersectorT& inter, BaseCamera& camera)
: mAccessor(inter.grid().getConstAccessor())
, mCamera(&camera)
, mPrimary(new IntersectorT(inter))
, mShadow(new IntersectorT(inter))
, mPrimaryStep(1.0)
, mShadowStep(3.0)
, mCutOff(0.005)
, mLightGain(0.2)
, mLightDir(Vec3R(0.3, 0.3, 0).unit())
, mLightColor(0.7, 0.7, 0.7)
, mAbsorption(0.1)
, mScattering(1.5)
{
}
template<typename IntersectorT, typename SampleT>
inline VolumeRender<IntersectorT, SampleT>::
VolumeRender(const VolumeRender& other)
: mAccessor(other.mAccessor)
, mCamera(other.mCamera)
, mPrimary(new IntersectorT(*(other.mPrimary)))
, mShadow(new IntersectorT(*(other.mShadow)))
, mPrimaryStep(other.mPrimaryStep)
, mShadowStep(other.mShadowStep)
, mCutOff(other.mCutOff)
, mLightGain(other.mLightGain)
, mLightDir(other.mLightDir)
, mLightColor(other.mLightColor)
, mAbsorption(other.mAbsorption)
, mScattering(other.mScattering)
{
}
template<typename IntersectorT, typename SampleT>
inline void VolumeRender<IntersectorT, SampleT>::
print(std::ostream& os, int verboseLevel)
{
if (verboseLevel>0) {
os << "\nPrimary step: " << mPrimaryStep
<< "\nShadow step: " << mShadowStep
<< "\nCutoff: " << mCutOff
<< "\nLightGain: " << mLightGain
<< "\nLightDir: " << mLightDir
<< "\nLightColor: " << mLightColor
<< "\nAbsorption: " << mAbsorption
<< "\nScattering: " << mScattering << std::endl;
}
mPrimary->print(os, verboseLevel);
}
template<typename IntersectorT, typename SampleT>
inline void VolumeRender<IntersectorT, SampleT>::
setIntersector(const IntersectorT& inter)
{
mPrimary.reset(new IntersectorT(inter));
mShadow.reset( new IntersectorT(inter));
}
template<typename IntersectorT, typename SampleT>
inline void VolumeRender<IntersectorT, SampleT>::
render(bool threaded) const
{
tbb::blocked_range<size_t> range(0, mCamera->height());
threaded ? tbb::parallel_for(range, *this) : (*this)(range);
}
template<typename IntersectorT, typename SampleT>
inline void VolumeRender<IntersectorT, SampleT>::
operator()(const tbb::blocked_range<size_t>& range) const
{
SamplerType sampler(mAccessor, mShadow->grid().transform());//light-weight wrapper
// Any variable prefixed with p (or s) means it's associated with a primary (or shadow) ray
const Vec3R extinction = -mScattering-mAbsorption, One(1.0);
const Vec3R albedo = mLightColor*mScattering/(mScattering+mAbsorption);//single scattering
const Real sGain = mLightGain;//in-scattering along shadow ray
const Real pStep = mPrimaryStep;//Integration step along primary ray in voxel units
const Real sStep = mShadowStep;//Integration step along shadow ray in voxel units
const Real cutoff = mCutOff;//Cutoff for density and transmittance
// For the sake of completeness we show how to use two different
// methods (hits/march) in VolumeRayIntersector that produce
// segments along the ray that intersects active values. Comment out
// the line below to use VolumeRayIntersector::march instead of
// VolumeRayIntersector::hits.
#define USE_HITS
#ifdef USE_HITS
std::vector<typename RayType::TimeSpan> pTS, sTS;
//std::deque<typename RayType::TimeSpan> pTS, sTS;
#endif
RayType sRay(Vec3R(0), mLightDir);//Shadow ray
for (size_t j=range.begin(), je = range.end(); j<je; ++j) {
for (size_t i=0, ie = mCamera->width(); i<ie; ++i) {
Film::RGBA& bg = mCamera->pixel(i, j);
bg.a = bg.r = bg.g = bg.b = 0;
RayType pRay = mCamera->getRay(i, j);// Primary ray
if( !mPrimary->setWorldRay(pRay)) continue;
Vec3R pTrans(1.0), pLumi(0.0);
#ifndef USE_HITS
Real pT0, pT1;
while (mPrimary->march(pT0, pT1)) {
for (Real pT = pStep*ceil(pT0/pStep); pT <= pT1; pT += pStep) {
#else
mPrimary->hits(pTS);
for (size_t k=0; k<pTS.size(); ++k) {
Real pT = pStep*ceil(pTS[k].t0/pStep), pT1=pTS[k].t1;
for (; pT <= pT1; pT += pStep) {
#endif
Vec3R pPos = mPrimary->getWorldPos(pT);
const Real density = sampler.wsSample(pPos);
if (density < cutoff) continue;
const Vec3R dT = math::Exp(extinction * density * pStep);
Vec3R sTrans(1.0);
sRay.setEye(pPos);
if( !mShadow->setWorldRay(sRay)) continue;
#ifndef USE_HITS
Real sT0, sT1;
while (mShadow->march(sT0, sT1)) {
for (Real sT = sStep*ceil(sT0/sStep); sT <= sT1; sT+= sStep) {
#else
mShadow->hits(sTS);
for (size_t l=0; l<sTS.size(); ++l) {
Real sT = sStep*ceil(sTS[l].t0/sStep), sT1=sTS[l].t1;
for (; sT <= sT1; sT+= sStep) {
#endif
const Real d = sampler.wsSample(mShadow->getWorldPos(sT));
if (d < cutoff) continue;
sTrans *= math::Exp(extinction * d * sStep/(1.0+sT*sGain));
if (sTrans.lengthSqr()<cutoff) goto Luminance;//Terminate sRay
}//Integration over shadow segment
}// Shadow ray march
Luminance:
pLumi += albedo * sTrans * pTrans * (One-dT);
pTrans *= dT;
if (pTrans.lengthSqr()<cutoff) goto Pixel; // Terminate Ray
}//Integration over primary segment
}// Primary ray march
Pixel:
bg.r = static_cast<Film::RGBA::ValueT>(pLumi[0]);
bg.g = static_cast<Film::RGBA::ValueT>(pLumi[1]);
bg.b = static_cast<Film::RGBA::ValueT>(pLumi[2]);
bg.a = static_cast<Film::RGBA::ValueT>(1.0f - pTrans.sum()/3.0f);
}//Horizontal pixel scan
}//Vertical pixel scan
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED
| 42,232 | C | 37.56895 | 100 | 0.623129 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Merge.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file Merge.h
///
/// @brief Functions to efficiently merge grids
///
/// @author Dan Bailey
#ifndef OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h>
#include <openvdb/Exceptions.h>
#include <openvdb/Types.h>
#include <openvdb/Grid.h>
#include <openvdb/tree/NodeManager.h>
#include <unordered_map>
#include <unordered_set>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Convenience class that contains a pointer to a tree to be stolen or
/// deep copied depending on the tag dispatch class used and a subset of
/// methods to retrieve data from the tree.
///
/// @details The primary purpose of this class is to be able to create an array
/// of TreeToMerge objects that each store a tree to be stolen or a tree to be
/// deep-copied in an arbitrary order. Certain operations such as floating-point
/// addition are non-associative so the order in which they are merged is
/// important for the operation to remain deterministic regardless of how the
/// data is being extracted from the tree.
///
/// @note Stealing data requires a non-const tree pointer. There is a constructor
/// to pass in a tree shared pointer for cases where it is desirable for this class
/// to maintain shared ownership.
template <typename TreeT>
struct TreeToMerge
{
using TreeType = std::remove_const_t<TreeT>;
using RootNodeType = typename TreeType::RootNodeType;
using ValueType = typename TreeType::ValueType;
using MaskTreeType = typename TreeT::template ValueConverter<ValueMask>::Type;
TreeToMerge() = delete;
/// @brief Non-const pointer tree constructor for stealing data.
TreeToMerge(TreeType& tree, Steal)
: mTree(&tree), mSteal(true) { }
/// @brief Non-const shared pointer tree constructor for stealing data.
TreeToMerge(typename TreeType::Ptr treePtr, Steal)
: mTreePtr(treePtr), mTree(mTreePtr.get()), mSteal(true) { }
/// @brief Const tree pointer constructor for deep-copying data. As the
/// tree is not mutable and thus cannot be pruned, a lightweight mask tree
/// with the same topology is created that can be pruned to use as a
/// reference. Initialization of this mask tree can optionally be disabled
/// for delayed construction.
TreeToMerge(const TreeType& tree, DeepCopy, bool initialize = true)
: mTree(&tree), mSteal(false)
{
if (mTree && initialize) this->initializeMask();
}
/// @brief Non-const tree pointer constructor for deep-copying data. The
/// tree is not intended to be modified so is not pruned, instead a
/// lightweight mask tree with the same topology is created that can be
/// pruned to use as a reference. Initialization of this mask tree can
/// optionally be disabled for delayed construction.
TreeToMerge(TreeType& tree, DeepCopy tag, bool initialize = true)
: TreeToMerge(static_cast<const TreeType&>(tree), tag, initialize) { }
/// @brief Reset the non-const tree shared pointer. This is primarily
/// used to preserve the order of trees to merge in a container but have
/// the data in the tree be lazily loaded or resampled.
void reset(typename TreeType::Ptr treePtr, Steal);
/// @brief Return a pointer to the tree to be stolen.
TreeType* treeToSteal() { return mSteal ? const_cast<TreeType*>(mTree) : nullptr; }
/// @brief Return a pointer to the tree to be deep-copied.
const TreeType* treeToDeepCopy() { return mSteal ? nullptr : mTree; }
/// @brief Retrieve a const pointer to the root node.
const RootNodeType* rootPtr() const;
/// @brief Return a pointer to the node of type @c NodeT that contains
/// voxel (x, y, z). If no such node exists, return @c nullptr.
template<typename NodeT>
const NodeT* probeConstNode(const Coord& ijk) const;
/// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z).
/// If the tree is non-const, steal the node and replace it with an inactive
/// background-value tile.
/// If the tree is const, deep-copy the node and modify the mask tree to prune the node.
template <typename NodeT>
std::unique_ptr<NodeT> stealOrDeepCopyNode(const Coord& ijk);
/// @brief Add a tile containing voxel (x, y, z) at the level of NodeT,
/// deleting the existing branch if necessary.
template <typename NodeT>
void addTile(const Coord& ijk, const ValueType& value, bool active);
// build a lightweight mask using a union of the const tree where leaf nodes
// are converted into active tiles
void initializeMask();
// returns true if mask has been initialized
bool hasMask() const;
// returns MaskTree pointer or nullptr
MaskTreeType* mask() { return mMaskTree.ptr.get(); }
const MaskTreeType* mask() const { return mMaskTree.ptr.get(); }
private:
struct MaskPtr;
struct MaskUnionOp;
typename TreeType::Ptr mTreePtr;
const TreeType* mTree;
MaskPtr mMaskTree;
bool mSteal;
}; // struct TreeToMerge
/// @brief Wrapper around unique_ptr that deep-copies mask on copy construction
template <typename TreeT>
struct TreeToMerge<TreeT>::MaskPtr
{
std::unique_ptr<MaskTreeType> ptr;
MaskPtr() = default;
~MaskPtr() = default;
MaskPtr(MaskPtr&& other) = default;
MaskPtr& operator=(MaskPtr&& other) = default;
MaskPtr(const MaskPtr& other)
: ptr(bool(other.ptr) ? std::make_unique<MaskTreeType>(*other.ptr) : nullptr) { }
MaskPtr& operator=(const MaskPtr& other)
{
ptr.reset(bool(other.ptr) ? std::make_unique<MaskTreeType>(*other.ptr) : nullptr);
return *this;
}
};
/// @brief DynamicNodeManager operator used to generate a mask of the input
/// tree, but with dense leaf nodes replaced with active tiles for compactness
template <typename TreeT>
struct TreeToMerge<TreeT>::MaskUnionOp
{
using MaskT = MaskTreeType;
using RootT = typename MaskT::RootNodeType;
using LeafT = typename MaskT::LeafNodeType;
explicit MaskUnionOp(const TreeT& tree) : mTree(tree) { }
bool operator()(RootT& root, size_t) const;
template<typename NodeT>
bool operator()(NodeT& node, size_t) const;
bool operator()(LeafT&, size_t) const { return false; }
private:
const TreeT& mTree;
}; // struct TreeToMerge<TreeT>::MaskUnionOp
////////////////////////////////////////
/// @brief DynamicNodeManager operator to merge trees using a CSG union or intersection.
/// @note This class modifies the topology of the tree so is designed to be used
/// from DynamicNodeManager::foreachTopDown().
/// @details A union and an intersection are opposite operations to each other so
/// implemented in a combined class. Use the CsgUnionOp and CsgIntersectionOp aliases
/// for convenience.
template<typename TreeT, bool Union>
struct CsgUnionOrIntersectionOp
{
using ValueT = typename TreeT::ValueType;
using RootT = typename TreeT::RootNodeType;
using LeafT = typename TreeT::LeafNodeType;
/// @brief Convenience constructor to CSG union or intersect a single
/// non-const tree with another. This constructor takes a Steal or DeepCopy
/// tag dispatch class.
template <typename TagT>
CsgUnionOrIntersectionOp(TreeT& tree, TagT tag) { mTreesToMerge.emplace_back(tree, tag); }
/// @brief Convenience constructor to CSG union or intersect a single
/// const tree with another. This constructor requires a DeepCopy tag
/// dispatch class.
CsgUnionOrIntersectionOp(const TreeT& tree, DeepCopy tag) { mTreesToMerge.emplace_back(tree, tag); }
/// @brief Constructor to CSG union or intersect a container of multiple
/// const or non-const tree pointers. A Steal tag requires a container of
/// non-const trees, a DeepCopy tag will accept either const or non-const
/// trees.
template <typename TreesT, typename TagT>
CsgUnionOrIntersectionOp(TreesT& trees, TagT tag)
{
for (auto* tree : trees) {
if (tree) {
mTreesToMerge.emplace_back(*tree, tag);
}
}
}
/// @brief Constructor to accept a vector of TreeToMerge objects, primarily
/// used when mixing const/non-const trees.
/// @note Union/intersection order is preserved.
explicit CsgUnionOrIntersectionOp(const std::vector<TreeToMerge<TreeT>>& trees)
: mTreesToMerge(trees) { }
/// @brief Constructor to accept a deque of TreeToMerge objects, primarily
/// used when mixing const/non-const trees.
/// @note Union/intersection order is preserved.
explicit CsgUnionOrIntersectionOp(const std::deque<TreeToMerge<TreeT>>& trees)
: mTreesToMerge(trees.cbegin(), trees.cend()) { }
/// @brief Return true if no trees being merged
bool empty() const { return mTreesToMerge.empty(); }
/// @brief Return the number of trees being merged
size_t size() const { return mTreesToMerge.size(); }
// Processes the root node. Required by the NodeManager
bool operator()(RootT& root, size_t idx) const;
// Processes the internal nodes. Required by the NodeManager
template<typename NodeT>
bool operator()(NodeT& node, size_t idx) const;
// Processes the leaf nodes. Required by the NodeManager
bool operator()(LeafT& leaf, size_t idx) const;
private:
// on processing the root node, the background value is stored, retrieve it
// and check that the root node has already been processed
const ValueT& background() const;
mutable std::vector<TreeToMerge<TreeT>> mTreesToMerge;
mutable const ValueT* mBackground = nullptr;
}; // struct CsgUnionOrIntersectionOp
template <typename TreeT>
using CsgUnionOp = CsgUnionOrIntersectionOp<TreeT, /*Union=*/true>;
template <typename TreeT>
using CsgIntersectionOp = CsgUnionOrIntersectionOp<TreeT, /*Union=*/false>;
/// @brief DynamicNodeManager operator to merge two trees using a CSG difference.
/// @note This class modifies the topology of the tree so is designed to be used
/// from DynamicNodeManager::foreachTopDown().
template<typename TreeT>
struct CsgDifferenceOp
{
using ValueT = typename TreeT::ValueType;
using RootT = typename TreeT::RootNodeType;
using LeafT = typename TreeT::LeafNodeType;
/// @brief Convenience constructor to CSG difference a single non-const
/// tree from another. This constructor takes a Steal or DeepCopy tag
/// dispatch class.
template <typename TagT>
CsgDifferenceOp(TreeT& tree, TagT tag) : mTree(tree, tag) { }
/// @brief Convenience constructor to CSG difference a single const
/// tree from another. This constructor requires an explicit DeepCopy tag
/// dispatch class.
CsgDifferenceOp(const TreeT& tree, DeepCopy tag) : mTree(tree, tag) { }
/// @brief Constructor to CSG difference the tree in a TreeToMerge object
/// from another.
explicit CsgDifferenceOp(TreeToMerge<TreeT>& tree) : mTree(tree) { }
/// @brief Return the number of trees being merged (only ever 1)
size_t size() const { return 1; }
// Processes the root node. Required by the NodeManager
bool operator()(RootT& root, size_t idx) const;
// Processes the internal nodes. Required by the NodeManager
template<typename NodeT>
bool operator()(NodeT& node, size_t idx) const;
// Processes the leaf nodes. Required by the NodeManager
bool operator()(LeafT& leaf, size_t idx) const;
private:
// on processing the root node, the background values are stored, retrieve them
// and check that the root nodes have already been processed
const ValueT& background() const;
const ValueT& otherBackground() const;
// note that this vector is copied in NodeTransformer every time a foreach call is made,
// however in typical use cases this cost will be dwarfed by the actual merge algorithm
mutable TreeToMerge<TreeT> mTree;
mutable const ValueT* mBackground = nullptr;
mutable const ValueT* mOtherBackground = nullptr;
}; // struct CsgDifferenceOp
////////////////////////////////////////
template<typename TreeT>
void TreeToMerge<TreeT>::initializeMask()
{
if (mSteal) return;
mMaskTree.ptr.reset(new MaskTreeType);
MaskUnionOp op(*mTree);
tree::DynamicNodeManager<MaskTreeType, MaskTreeType::RootNodeType::LEVEL-1> manager(*this->mask());
manager.foreachTopDown(op);
}
template<typename TreeT>
bool TreeToMerge<TreeT>::hasMask() const
{
return bool(mMaskTree.ptr);
}
template<typename TreeT>
void TreeToMerge<TreeT>::reset(typename TreeType::Ptr treePtr, Steal)
{
if (!treePtr) {
OPENVDB_THROW(RuntimeError, "Cannot reset with empty Tree shared pointer.");
}
mSteal = true;
mTreePtr = treePtr;
mTree = mTreePtr.get();
}
template<typename TreeT>
const typename TreeToMerge<TreeT>::RootNodeType*
TreeToMerge<TreeT>::rootPtr() const
{
return &mTree->root();
}
template<typename TreeT>
template<typename NodeT>
const NodeT*
TreeToMerge<TreeT>::probeConstNode(const Coord& ijk) const
{
// test mutable mask first, node may have already been pruned
if (!mSteal && !this->mask()->isValueOn(ijk)) return nullptr;
return mTree->template probeConstNode<NodeT>(ijk);
}
template<typename TreeT>
template<typename NodeT>
std::unique_ptr<NodeT>
TreeToMerge<TreeT>::stealOrDeepCopyNode(const Coord& ijk)
{
if (mSteal) {
TreeType* tree = const_cast<TreeType*>(mTree);
return std::unique_ptr<NodeT>(
tree->root().template stealNode<NodeT>(ijk, mTree->root().background(), false)
);
} else {
auto* child = this->probeConstNode<NodeT>(ijk);
if (child) {
assert(this->hasMask());
auto result = std::make_unique<NodeT>(*child);
// prune mask tree
this->mask()->addTile(NodeT::LEVEL, ijk, false, false);
return result;
}
}
return std::unique_ptr<NodeT>();
}
template<typename TreeT>
template<typename NodeT>
void
TreeToMerge<TreeT>::addTile(const Coord& ijk, const ValueType& value, bool active)
{
// ignore leaf node tiles (values)
if (NodeT::LEVEL == 0) return;
if (mSteal) {
TreeType* tree = const_cast<TreeType*>(mTree);
auto* node = tree->template probeNode<NodeT>(ijk);
if (node) {
const Index pos = NodeT::coordToOffset(ijk);
node->addTile(pos, value, active);
}
} else {
auto* node = mTree->template probeConstNode<NodeT>(ijk);
// prune mask tree
if (node) {
assert(this->hasMask());
this->mask()->addTile(NodeT::LEVEL, ijk, false, false);
}
}
}
////////////////////////////////////////
template <typename TreeT>
bool TreeToMerge<TreeT>::MaskUnionOp::operator()(RootT& root, size_t /*idx*/) const
{
using ChildT = typename RootT::ChildNodeType;
const Index count = mTree.root().childCount();
std::vector<std::unique_ptr<ChildT>> children(count);
// allocate new root children
tbb::parallel_for(
tbb::blocked_range<Index>(0, count),
[&](tbb::blocked_range<Index>& range)
{
for (Index i = range.begin(); i < range.end(); i++) {
children[i] = std::make_unique<ChildT>(Coord::max(), true, true);
}
}
);
// apply origins and add root children to new root node
size_t i = 0;
for (auto iter = mTree.root().cbeginChildOn(); iter; ++iter) {
children[i]->setOrigin(iter->origin());
root.addChild(children[i].release());
i++;
}
return true;
}
template <typename TreeT>
template <typename NodeT>
bool TreeToMerge<TreeT>::MaskUnionOp::operator()(NodeT& node, size_t /*idx*/) const
{
using ChildT = typename NodeT::ChildNodeType;
const auto* otherNode = mTree.template probeConstNode<NodeT>(node.origin());
if (!otherNode) return false;
// this mask tree stores active tiles in place of leaf nodes for compactness
if (NodeT::LEVEL == 1) {
for (auto iter = otherNode->cbeginChildOn(); iter; ++iter) {
node.addTile(iter.pos(), true, true);
}
} else {
for (auto iter = otherNode->cbeginChildOn(); iter; ++iter) {
auto* child = new ChildT(iter->origin(), true, true);
node.addChild(child);
}
}
return true;
}
////////////////////////////////////////
namespace merge_internal {
template <typename BufferT, typename ValueT>
struct UnallocatedBuffer
{
static void allocateAndFill(BufferT& buffer, const ValueT& background)
{
if (!buffer.isOutOfCore() && buffer.empty()) {
buffer.allocate();
buffer.fill(background);
}
}
static bool isPartiallyConstructed(const BufferT& buffer)
{
return !buffer.isOutOfCore() && buffer.empty();
}
}; // struct AllocateAndFillBuffer
template <typename BufferT>
struct UnallocatedBuffer<BufferT, bool>
{
// do nothing for bool buffers as they cannot be unallocated
static void allocateAndFill(BufferT&, const bool&) { }
static bool isPartiallyConstructed(const BufferT&) { return false; }
}; // struct AllocateAndFillBuffer
} // namespace merge_internal
////////////////////////////////////////
template <typename TreeT, bool Union>
bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(RootT& root, size_t) const
{
const bool Intersect = !Union;
if (this->empty()) return false;
// store the background value
if (!mBackground) mBackground = &root.background();
// does the key exist in the root node?
auto keyExistsInRoot = [&](const Coord& key) -> bool
{
return root.getValueDepth(key) > -1;
};
// does the key exist in all merge tree root nodes?
auto keyExistsInAllTrees = [&](const Coord& key) -> bool
{
for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) {
const auto* mergeRoot = mergeTree.rootPtr();
if (!mergeRoot) return false;
if (mergeRoot->getValueDepth(key) == -1) return false;
}
return true;
};
// delete any background tiles
root.eraseBackgroundTiles();
// for intersection, delete any root node keys that are not present in all trees
if (Intersect) {
// find all tile coordinates to delete
std::vector<Coord> toDelete;
for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) {
const Coord& key = valueIter.getCoord();
if (!keyExistsInAllTrees(key)) toDelete.push_back(key);
}
// find all child coordinates to delete
for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) {
const Coord& key = childIter.getCoord();
if (!keyExistsInAllTrees(key)) toDelete.push_back(key);
}
// only mechanism to delete elements in root node is to delete background tiles,
// so insert background tiles (which will replace any child nodes) and then delete
for (Coord& key : toDelete) root.addTile(key, *mBackground, false);
root.eraseBackgroundTiles();
}
// find all tile values in this root and track inside/outside and active state
// note that level sets should never contain active tiles, but we handle them anyway
constexpr uint8_t ACTIVE_TILE = 0x1;
constexpr uint8_t INSIDE_TILE = 0x2;
constexpr uint8_t OUTSIDE_TILE = 0x4;
constexpr uint8_t INSIDE_STATE = Union ? INSIDE_TILE : OUTSIDE_TILE;
constexpr uint8_t OUTSIDE_STATE = Union ? OUTSIDE_TILE : INSIDE_TILE;
const ValueT insideBackground = Union ? -this->background() : this->background();
const ValueT outsideBackground = -insideBackground;
auto getTileFlag = [&](auto& valueIter) -> uint8_t
{
uint8_t flag(0);
const ValueT& value = valueIter.getValue();
if (value < zeroVal<ValueT>()) flag |= INSIDE_TILE;
else if (value > zeroVal<ValueT>()) flag |= OUTSIDE_TILE;
if (valueIter.isValueOn()) flag |= ACTIVE_TILE;
return flag;
};
std::unordered_map<Coord, /*flags*/uint8_t> tiles;
if (root.getTableSize() > 0) {
for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) {
const Coord& key = valueIter.getCoord();
tiles.insert({key, getTileFlag(valueIter)});
}
}
// find all tiles values in other roots and replace outside tiles with inside tiles
for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) {
const auto* mergeRoot = mergeTree.rootPtr();
if (!mergeRoot) continue;
for (auto valueIter = mergeRoot->cbeginValueAll(); valueIter; ++valueIter) {
const Coord& key = valueIter.getCoord();
auto it = tiles.find(key);
if (it == tiles.end()) {
// if no tile with this key, insert it
tiles.insert({key, getTileFlag(valueIter)});
} else {
// replace an outside tile with an inside tile
const uint8_t flag = it->second;
if (flag & OUTSIDE_STATE) {
const uint8_t newFlag = getTileFlag(valueIter);
if (newFlag & INSIDE_STATE) {
it->second = newFlag;
}
}
}
}
}
// insert all inside tiles
for (auto it : tiles) {
const uint8_t flag = it.second;
if (flag & INSIDE_STATE) {
const Coord& key = it.first;
const bool state = flag & ACTIVE_TILE;
// for intersection, only add the tile if the key already exists in the tree
if (Union || keyExistsInRoot(key)) {
root.addTile(key, insideBackground, state);
}
}
}
std::unordered_set<Coord> children;
if (root.getTableSize() > 0) {
for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) {
const Coord& key = childIter.getCoord();
children.insert(key);
}
}
bool continueRecurse = false;
// find all children in other roots and insert them if a child or tile with this key
// does not already exist or if the child will replace an outside tile
for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) {
const auto* mergeRoot = mergeTree.rootPtr();
if (!mergeRoot) continue;
for (auto childIter = mergeRoot->cbeginChildOn(); childIter; ++childIter) {
const Coord& key = childIter.getCoord();
// for intersection, only add child nodes if the key already exists in the tree
if (Intersect && !keyExistsInRoot(key)) continue;
// if child already exists, merge recursion will need to continue to resolve conflict
if (children.count(key)) {
continueRecurse = true;
continue;
}
// if an inside tile exists, do nothing
auto it = tiles.find(key);
if (it != tiles.end() && it->second == INSIDE_STATE) continue;
auto childPtr = mergeTree.template stealOrDeepCopyNode<typename RootT::ChildNodeType>(key);
childPtr->resetBackground(mergeRoot->background(), root.background());
if (childPtr) root.addChild(childPtr.release());
children.insert(key);
}
}
// insert all outside tiles that don't replace an inside tile or a child node
for (auto it : tiles) {
const uint8_t flag = it.second;
if (flag & OUTSIDE_STATE) {
const Coord& key = it.first;
if (!children.count(key)) {
const bool state = flag & ACTIVE_TILE;
// for intersection, only add the tile if the key already exists in the tree
if (Union || keyExistsInRoot(key)) {
root.addTile(key, outsideBackground, state);
}
}
}
}
// finish by removing any background tiles
root.eraseBackgroundTiles();
return continueRecurse;
}
template<typename TreeT, bool Union>
template<typename NodeT>
bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(NodeT& node, size_t) const
{
using NonConstNodeT = typename std::remove_const<NodeT>::type;
if (this->empty()) return false;
const ValueT insideBackground = Union ? -this->background() : this->background();
const ValueT outsideBackground = -insideBackground;
using NodeMaskT = typename NodeT::NodeMaskType;
// store temporary masks to track inside and outside tile states
NodeMaskT validTile;
NodeMaskT invalidTile;
auto isValid = [](const ValueT& value)
{
return Union ? value < zeroVal<ValueT>() : value > zeroVal<ValueT>();
};
auto isInvalid = [](const ValueT& value)
{
return Union ? value > zeroVal<ValueT>() : value < zeroVal<ValueT>();
};
for (auto iter = node.cbeginValueAll(); iter; ++iter) {
if (isValid(iter.getValue())) {
validTile.setOn(iter.pos());
} else if (isInvalid(iter.getValue())) {
invalidTile.setOn(iter.pos());
}
}
bool continueRecurse = false;
for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) {
auto* mergeNode = mergeTree.template probeConstNode<NonConstNodeT>(node.origin());
if (!mergeNode) continue;
// iterate over all tiles
for (auto iter = mergeNode->cbeginValueAll(); iter; ++iter) {
Index pos = iter.pos();
// source node contains an inside tile, so ignore
if (validTile.isOn(pos)) continue;
// this node contains an inside tile, so turn into an inside tile
if (isValid(iter.getValue())) {
node.addTile(pos, insideBackground, iter.isValueOn());
validTile.setOn(pos);
}
}
// iterate over all child nodes
for (auto iter = mergeNode->cbeginChildOn(); iter; ++iter) {
Index pos = iter.pos();
const Coord& ijk = iter.getCoord();
// source node contains an inside tile, so ensure other node has no child
if (validTile.isOn(pos)) {
mergeTree.template addTile<NonConstNodeT>(ijk, outsideBackground, false);
} else if (invalidTile.isOn(pos)) {
auto childPtr = mergeTree.template stealOrDeepCopyNode<typename NodeT::ChildNodeType>(ijk);
if (childPtr) {
childPtr->resetBackground(mergeTree.rootPtr()->background(), this->background());
node.addChild(childPtr.release());
}
invalidTile.setOff(pos);
} else {
// if both source and target are child nodes, merge recursion needs to continue
// along this branch to resolve the conflict
continueRecurse = true;
}
}
}
return continueRecurse;
}
template <typename TreeT, bool Union>
bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(LeafT& leaf, size_t) const
{
using LeafT = typename TreeT::LeafNodeType;
using ValueT = typename LeafT::ValueType;
using BufferT = typename LeafT::Buffer;
if (this->empty()) return false;
const ValueT background = Union ? this->background() : -this->background();
// if buffer is not out-of-core and empty, leaf node must have only been
// partially constructed, so allocate and fill with background value
merge_internal::UnallocatedBuffer<BufferT, ValueT>::allocateAndFill(
leaf.buffer(), background);
for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) {
const LeafT* mergeLeaf = mergeTree.template probeConstNode<LeafT>(leaf.origin());
if (!mergeLeaf) continue;
// if buffer is not out-of-core yet empty, leaf node must have only been
// partially constructed, so skip merge
if (merge_internal::UnallocatedBuffer<BufferT, ValueT>::isPartiallyConstructed(
mergeLeaf->buffer())) {
continue;
}
for (Index i = 0 ; i < LeafT::SIZE; i++) {
const ValueT& newValue = mergeLeaf->getValue(i);
const bool doMerge = Union ? newValue < leaf.getValue(i) : newValue > leaf.getValue(i);
if (doMerge) {
leaf.setValueOnly(i, newValue);
leaf.setActiveState(i, mergeLeaf->isValueOn(i));
}
}
}
return false;
}
template <typename TreeT, bool Union>
const typename CsgUnionOrIntersectionOp<TreeT, Union>::ValueT&
CsgUnionOrIntersectionOp<TreeT, Union>::background() const
{
// this operator is only intended to be used with foreachTopDown()
assert(mBackground);
return *mBackground;
}
////////////////////////////////////////
template <typename TreeT>
bool CsgDifferenceOp<TreeT>::operator()(RootT& root, size_t) const
{
// store the background values
if (!mBackground) mBackground = &root.background();
if (!mOtherBackground) mOtherBackground = &mTree.rootPtr()->background();
// find all tile values in this root and track inside/outside and active state
// note that level sets should never contain active tiles, but we handle them anyway
constexpr uint8_t ACTIVE_TILE = 0x1;
constexpr uint8_t INSIDE_TILE = 0x2;
constexpr uint8_t CHILD = 0x4;
auto getTileFlag = [&](auto& valueIter) -> uint8_t
{
uint8_t flag(0);
const ValueT& value = valueIter.getValue();
if (value < zeroVal<ValueT>()) flag |= INSIDE_TILE;
if (valueIter.isValueOn()) flag |= ACTIVE_TILE;
return flag;
};
// delete any background tiles
root.eraseBackgroundTiles();
std::unordered_map<Coord, /*flags*/uint8_t> flags;
if (root.getTableSize() > 0) {
for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) {
const Coord& key = valueIter.getCoord();
const uint8_t flag = getTileFlag(valueIter);
if (flag & INSIDE_TILE) {
flags.insert({key, getTileFlag(valueIter)});
}
}
for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) {
const Coord& key = childIter.getCoord();
flags.insert({key, CHILD});
}
}
bool continueRecurse = false;
const auto* mergeRoot = mTree.rootPtr();
if (mergeRoot) {
for (auto valueIter = mergeRoot->cbeginValueAll(); valueIter; ++valueIter) {
const Coord& key = valueIter.getCoord();
const uint8_t flag = getTileFlag(valueIter);
if (flag & INSIDE_TILE) {
auto it = flags.find(key);
if (it != flags.end()) {
const bool state = flag & ACTIVE_TILE;
root.addTile(key, this->background(), state);
}
}
}
for (auto childIter = mergeRoot->cbeginChildOn(); childIter; ++childIter) {
const Coord& key = childIter.getCoord();
auto it = flags.find(key);
if (it != flags.end()) {
const uint8_t otherFlag = it->second;
if (otherFlag & CHILD) {
// if child already exists, merge recursion will need to continue to resolve conflict
continueRecurse = true;
} else if (otherFlag & INSIDE_TILE) {
auto childPtr = mTree.template stealOrDeepCopyNode<typename RootT::ChildNodeType>(key);
if (childPtr) {
childPtr->resetBackground(this->otherBackground(), this->background());
childPtr->negate();
root.addChild(childPtr.release());
}
}
}
}
}
// finish by removing any background tiles
root.eraseBackgroundTiles();
return continueRecurse;
}
template<typename TreeT>
template<typename NodeT>
bool CsgDifferenceOp<TreeT>::operator()(NodeT& node, size_t) const
{
using NonConstNodeT = typename std::remove_const<NodeT>::type;
using NodeMaskT = typename NodeT::NodeMaskType;
// store temporary mask to track inside tile state
NodeMaskT insideTile;
for (auto iter = node.cbeginValueAll(); iter; ++iter) {
if (iter.getValue() < zeroVal<ValueT>()) {
insideTile.setOn(iter.pos());
}
}
bool continueRecurse = false;
auto* mergeNode = mTree.template probeConstNode<NonConstNodeT>(node.origin());
if (!mergeNode) return continueRecurse;
// iterate over all tiles
for (auto iter = mergeNode->cbeginValueAll(); iter; ++iter) {
Index pos = iter.pos();
if (iter.getValue() < zeroVal<ValueT>()) {
if (insideTile.isOn(pos) || node.isChildMaskOn(pos)) {
node.addTile(pos, this->background(), iter.isValueOn());
}
}
}
// iterate over all children
for (auto iter = mergeNode->cbeginChildOn(); iter; ++iter) {
Index pos = iter.pos();
const Coord& ijk = iter.getCoord();
if (insideTile.isOn(pos)) {
auto childPtr = mTree.template stealOrDeepCopyNode<typename NodeT::ChildNodeType>(ijk);
if (childPtr) {
childPtr->resetBackground(this->otherBackground(), this->background());
childPtr->negate();
node.addChild(childPtr.release());
}
} else if (node.isChildMaskOn(pos)) {
// if both source and target are child nodes, merge recursion needs to continue
// along this branch to resolve the conflict
continueRecurse = true;
}
}
return continueRecurse;
}
template <typename TreeT>
bool CsgDifferenceOp<TreeT>::operator()(LeafT& leaf, size_t) const
{
using LeafT = typename TreeT::LeafNodeType;
using ValueT = typename LeafT::ValueType;
using BufferT = typename LeafT::Buffer;
// if buffer is not out-of-core and empty, leaf node must have only been
// partially constructed, so allocate and fill with background value
merge_internal::UnallocatedBuffer<BufferT, ValueT>::allocateAndFill(
leaf.buffer(), this->background());
const LeafT* mergeLeaf = mTree.template probeConstNode<LeafT>(leaf.origin());
if (!mergeLeaf) return false;
// if buffer is not out-of-core yet empty, leaf node must have only been
// partially constructed, so skip merge
if (merge_internal::UnallocatedBuffer<BufferT, ValueT>::isPartiallyConstructed(
mergeLeaf->buffer())) {
return false;
}
for (Index i = 0 ; i < LeafT::SIZE; i++) {
const ValueT& aValue = leaf.getValue(i);
ValueT bValue = math::negative(mergeLeaf->getValue(i));
if (aValue < bValue) { // a = max(a, -b)
leaf.setValueOnly(i, bValue);
leaf.setActiveState(i, mergeLeaf->isValueOn(i));
}
}
return false;
}
template <typename TreeT>
const typename CsgDifferenceOp<TreeT>::ValueT&
CsgDifferenceOp<TreeT>::background() const
{
// this operator is only intended to be used with foreachTopDown()
assert(mBackground);
return *mBackground;
}
template <typename TreeT>
const typename CsgDifferenceOp<TreeT>::ValueT&
CsgDifferenceOp<TreeT>::otherBackground() const
{
// this operator is only intended to be used with foreachTopDown()
assert(mOtherBackground);
return *mOtherBackground;
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED
| 35,978 | C | 34.273529 | 107 | 0.635666 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetRebuild.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
#ifndef OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/Exceptions.h>
#include <openvdb/math/Math.h>
#include <openvdb/math/Transform.h>
#include <openvdb/tools/VolumeToMesh.h>
#include <openvdb/tools/MeshToVolume.h>
#include <openvdb/util/NullInterrupter.h>
#include <openvdb/util/Util.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Return a new grid of type @c GridType that contains a narrow-band level set
/// representation of an isosurface of a given grid.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed isosurfaces at the given @a isovalue
/// @param isovalue the isovalue that defines the implicit surface (defaults to zero,
/// which is typical if the input grid is already a level set or a SDF).
/// @param halfWidth half the width of the narrow band, in voxel units
/// (defaults to 3 voxels, which is required for some level set operations)
/// @param xform optional transform for the output grid
/// (if not provided, the transform of the input @a grid will be matched)
///
/// @throw TypeError if @a grid is not scalar or not floating-point
///
/// @note If the input grid contains overlapping isosurfaces, interior edges will be lost.
template<class GridType>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float isovalue = 0,
float halfWidth = float(LEVEL_SET_HALF_WIDTH), const math::Transform* xform = nullptr);
/// @brief Return a new grid of type @c GridType that contains a narrow-band level set
/// representation of an isosurface of a given grid.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed isosurfaces at the given @a isovalue
/// @param isovalue the isovalue that defines the implicit surface
/// @param exBandWidth the exterior narrow-band width in voxel units
/// @param inBandWidth the interior narrow-band width in voxel units
/// @param xform optional transform for the output grid
/// (if not provided, the transform of the input @a grid will be matched)
///
/// @throw TypeError if @a grid is not scalar or not floating-point
///
/// @note If the input grid contains overlapping isosurfaces, interior edges will be lost.
template<class GridType>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float isovalue, float exBandWidth, float inBandWidth,
const math::Transform* xform = nullptr);
/// @brief Return a new grid of type @c GridType that contains a narrow-band level set
/// representation of an isosurface of a given grid.
///
/// @param grid a scalar, floating-point grid with one or more disjoint,
/// closed isosurfaces at the given @a isovalue
/// @param isovalue the isovalue that defines the implicit surface
/// @param exBandWidth the exterior narrow-band width in voxel units
/// @param inBandWidth the interior narrow-band width in voxel units
/// @param xform optional transform for the output grid
/// (if not provided, the transform of the input @a grid will be matched)
/// @param interrupter optional interrupter object
///
/// @throw TypeError if @a grid is not scalar or not floating-point
///
/// @note If the input grid contains overlapping isosurfaces, interior edges will be lost.
template<class GridType, typename InterruptT>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float isovalue, float exBandWidth, float inBandWidth,
const math::Transform* xform = nullptr, InterruptT* interrupter = nullptr);
////////////////////////////////////////
// Internal utility objects and implementation details
namespace internal {
class PointListTransform
{
public:
PointListTransform(const PointList& pointsIn, std::vector<Vec3s>& pointsOut,
const math::Transform& xform)
: mPointsIn(pointsIn)
, mPointsOut(&pointsOut)
, mXform(xform)
{
}
void runParallel()
{
tbb::parallel_for(tbb::blocked_range<size_t>(0, mPointsOut->size()), *this);
}
void runSerial()
{
(*this)(tbb::blocked_range<size_t>(0, mPointsOut->size()));
}
inline void operator()(const tbb::blocked_range<size_t>& range) const
{
for (size_t n = range.begin(); n < range.end(); ++n) {
(*mPointsOut)[n] = Vec3s(mXform.worldToIndex(mPointsIn[n]));
}
}
private:
const PointList& mPointsIn;
std::vector<Vec3s> * const mPointsOut;
const math::Transform& mXform;
};
class PrimCpy
{
public:
PrimCpy(const PolygonPoolList& primsIn, const std::vector<size_t>& indexList,
std::vector<Vec4I>& primsOut)
: mPrimsIn(primsIn)
, mIndexList(indexList)
, mPrimsOut(&primsOut)
{
}
void runParallel()
{
tbb::parallel_for(tbb::blocked_range<size_t>(0, mIndexList.size()), *this);
}
void runSerial()
{
(*this)(tbb::blocked_range<size_t>(0, mIndexList.size()));
}
inline void operator()(const tbb::blocked_range<size_t>& range) const
{
openvdb::Vec4I quad;
quad[3] = openvdb::util::INVALID_IDX;
std::vector<Vec4I>& primsOut = *mPrimsOut;
for (size_t n = range.begin(); n < range.end(); ++n) {
size_t index = mIndexList[n];
PolygonPool& polygons = mPrimsIn[n];
// Copy quads
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
primsOut[index++] = polygons.quad(i);
}
polygons.clearQuads();
// Copy triangles (adaptive mesh)
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
const openvdb::Vec3I& triangle = polygons.triangle(i);
quad[0] = triangle[0];
quad[1] = triangle[1];
quad[2] = triangle[2];
primsOut[index++] = quad;
}
polygons.clearTriangles();
}
}
private:
const PolygonPoolList& mPrimsIn;
const std::vector<size_t>& mIndexList;
std::vector<Vec4I> * const mPrimsOut;
};
} // namespace internal
////////////////////////////////////////
//{
/// @cond OPENVDB_LEVEL_SET_REBUILD_INTERNAL
/// The normal entry points for level set rebuild are the levelSetRebuild() functions.
/// doLevelSetRebuild() is mainly for internal use, but when the isovalue and half band
/// widths are given in ValueType units (for example, if they are queried from
/// a grid), it might be more convenient to call this function directly.
///
/// @internal This overload is enabled only for grids with a scalar, floating-point ValueType.
template<class GridType, typename InterruptT>
inline typename std::enable_if<
std::is_floating_point<typename GridType::ValueType>::value, typename GridType::Ptr>::type
doLevelSetRebuild(const GridType& grid, typename GridType::ValueType iso,
typename GridType::ValueType exWidth, typename GridType::ValueType inWidth,
const math::Transform* xform, InterruptT* interrupter)
{
const float
isovalue = float(iso),
exBandWidth = float(exWidth),
inBandWidth = float(inWidth);
tools::VolumeToMesh mesher(isovalue);
mesher(grid);
math::Transform::Ptr transform = (xform != nullptr) ? xform->copy() : grid.transform().copy();
std::vector<Vec3s> points(mesher.pointListSize());
{ // Copy and transform (required for MeshToVolume) points to grid space.
internal::PointListTransform ptnXForm(mesher.pointList(), points, *transform);
ptnXForm.runParallel();
mesher.pointList().reset(nullptr);
}
std::vector<Vec4I> primitives;
{ // Copy primitives.
PolygonPoolList& polygonPoolList = mesher.polygonPoolList();
size_t numPrimitives = 0;
std::vector<size_t> indexlist(mesher.polygonPoolListSize());
for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) {
const openvdb::tools::PolygonPool& polygons = polygonPoolList[n];
indexlist[n] = numPrimitives;
numPrimitives += polygons.numQuads();
numPrimitives += polygons.numTriangles();
}
primitives.resize(numPrimitives);
internal::PrimCpy primCpy(polygonPoolList, indexlist, primitives);
primCpy.runParallel();
}
QuadAndTriangleDataAdapter<Vec3s, Vec4I> mesh(points, primitives);
if (interrupter) {
return meshToVolume<GridType>(*interrupter, mesh, *transform, exBandWidth, inBandWidth,
DISABLE_RENORMALIZATION, nullptr);
}
return meshToVolume<GridType>(mesh, *transform, exBandWidth, inBandWidth,
DISABLE_RENORMALIZATION, nullptr);
}
/// @internal This overload is enabled only for grids that do not have a scalar,
/// floating-point ValueType.
template<class GridType, typename InterruptT>
inline typename std::enable_if<
!std::is_floating_point<typename GridType::ValueType>::value, typename GridType::Ptr>::type
doLevelSetRebuild(const GridType&, typename GridType::ValueType /*isovalue*/,
typename GridType::ValueType /*exWidth*/, typename GridType::ValueType /*inWidth*/,
const math::Transform*, InterruptT*)
{
OPENVDB_THROW(TypeError,
"level set rebuild is supported only for scalar, floating-point grids");
}
/// @endcond
//}
////////////////////////////////////////
template<class GridType, typename InterruptT>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float iso, float exWidth, float inWidth,
const math::Transform* xform, InterruptT* interrupter)
{
using ValueT = typename GridType::ValueType;
ValueT
isovalue(zeroVal<ValueT>() + ValueT(iso)),
exBandWidth(zeroVal<ValueT>() + ValueT(exWidth)),
inBandWidth(zeroVal<ValueT>() + ValueT(inWidth));
return doLevelSetRebuild(grid, isovalue, exBandWidth, inBandWidth, xform, interrupter);
}
template<class GridType>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float iso, float exWidth, float inWidth,
const math::Transform* xform)
{
using ValueT = typename GridType::ValueType;
ValueT
isovalue(zeroVal<ValueT>() + ValueT(iso)),
exBandWidth(zeroVal<ValueT>() + ValueT(exWidth)),
inBandWidth(zeroVal<ValueT>() + ValueT(inWidth));
return doLevelSetRebuild<GridType, util::NullInterrupter>(
grid, isovalue, exBandWidth, inBandWidth, xform, nullptr);
}
template<class GridType>
inline typename GridType::Ptr
levelSetRebuild(const GridType& grid, float iso, float halfVal, const math::Transform* xform)
{
using ValueT = typename GridType::ValueType;
ValueT
isovalue(zeroVal<ValueT>() + ValueT(iso)),
halfWidth(zeroVal<ValueT>() + ValueT(halfVal));
return doLevelSetRebuild<GridType, util::NullInterrupter>(
grid, isovalue, halfWidth, halfWidth, xform, nullptr);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED
| 11,442 | C | 33.887195 | 98 | 0.668852 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/GridOperators.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file tools/GridOperators.h
///
/// @brief Apply an operator to an input grid to produce an output grid
/// with the same active voxel topology but a potentially different value type.
#ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/math/Operators.h>
#include <openvdb/util/NullInterrupter.h>
#include <openvdb/tree/LeafManager.h>
#include <openvdb/tree/ValueAccessor.h>
#include "ValueTransformer.h" // for tools::foreach()
#include <tbb/parallel_for.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
/// having the same tree configuration as VectorGridType but a scalar value type, T,
/// where T is the type of the original vector components.
/// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
template<typename VectorGridType> struct VectorToScalarConverter {
typedef typename VectorGridType::ValueType::value_type VecComponentValueT;
typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
};
/// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
/// having the same tree configuration as ScalarGridType but value type Vec3<T>
/// where T is ScalarGridType::ValueType.
/// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
template<typename ScalarGridType> struct ScalarToVectorConverter {
typedef math::Vec3<typename ScalarGridType::ValueType> VectorValueT;
typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
};
/// @brief Compute the Closest-Point Transform (CPT) from a distance field.
/// @return a new vector-valued grid with the same numerical precision as the input grid
/// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, bool threaded = true)
{
return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the curl of the given vector-valued grid.
/// @return a new vector-valued grid
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
curl(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename GridType::Ptr
curl(const GridType& grid, bool threaded = true)
{
return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename GridType::Ptr
curl(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the divergence of the given vector-valued grid.
/// @return a new scalar-valued grid with the same numerical precision as the input grid
/// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, bool threaded = true)
{
return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the gradient of the given scalar grid.
/// @return a new vector-valued grid with the same numerical precision as the input grid
/// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, bool threaded = true)
{
return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the Laplacian of the given scalar grid.
/// @return a new scalar grid
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename GridType::Ptr
laplacian(const GridType& grid, bool threaded = true)
{
return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename GridType::Ptr
laplacian(const GridType& grid, const MaskT mask, bool threaded = true)
{
return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the mean curvature of the given grid.
/// @return a new grid
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, bool threaded = true)
{
return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
/// @return a new scalar-valued grid with the same numerical precision as the input grid
/// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, bool threaded = true)
{
return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
/// @brief Normalize the vectors of the given vector-valued grid.
/// @return a new vector-valued grid
/// @details When a mask grid is specified, the solution is calculated only in
/// the intersection of the mask active topology and the input active topology
/// independent of the transforms associated with either grid.
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
template<typename GridType> inline
typename GridType::Ptr
normalize(const GridType& grid, bool threaded = true)
{
return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
}
template<typename GridType, typename MaskT> inline
typename GridType::Ptr
normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
{
return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
}
////////////////////////////////////////
namespace gridop {
/// @brief ToMaskGrid<T>::Type is the type of a grid having the same
/// tree hierarchy as grid type T but a value equal to its active state.
/// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
template<typename GridType>
struct ToMaskGrid {
typedef Grid<typename GridType::TreeType::template ValueConverter<ValueMask>::Type> Type;
};
/// @brief Apply an operator to an input grid to produce an output grid
/// with the same active voxel topology but a potentially different value type.
/// @details To facilitate inlining, this class is also templated on a Map type.
///
/// @note This is a helper class and should never be used directly.
template<
typename InGridT,
typename MaskGridType,
typename OutGridT,
typename MapT,
typename OperatorT,
typename InterruptT = util::NullInterrupter>
class GridOperator
{
public:
typedef typename OutGridT::TreeType OutTreeT;
typedef typename OutTreeT::LeafNodeType OutLeafT;
typedef typename tree::LeafManager<OutTreeT> LeafManagerT;
GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
InterruptT* interrupt = nullptr, bool densify = true)
: mAcc(grid.getConstAccessor())
, mMap(map)
, mInterrupt(interrupt)
, mMask(mask)
, mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
{
}
GridOperator(const GridOperator&) = default;
GridOperator& operator=(const GridOperator&) = default;
virtual ~GridOperator() = default;
typename OutGridT::Ptr process(bool threaded = true)
{
if (mInterrupt) mInterrupt->start("Processing grid");
// Derive background value of the output grid
typename InGridT::TreeType tmp(mAcc.tree().background());
typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
// The output tree is topology copy, optionally densified, of the input tree.
// (Densification is necessary for some operators because applying the operator to
// a constant tile produces distinct output values, particularly along tile borders.)
/// @todo Can tiles be handled correctly without densification, or by densifying
/// only to the width of the operator stencil?
typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
if (mDensify) tree->voxelizeActiveTiles();
// create grid with output tree and unit transform
typename OutGridT::Ptr result(new OutGridT(tree));
// Modify the solution area if a mask was supplied.
if (mMask) {
result->topologyIntersection(*mMask);
}
// transform of output grid = transform of input grid
result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
LeafManagerT leafManager(*tree);
if (threaded) {
tbb::parallel_for(leafManager.leafRange(), *this);
} else {
(*this)(leafManager.leafRange());
}
// If the tree wasn't densified, it might have active tiles that need to be processed.
if (!mDensify) {
using TileIter = typename OutTreeT::ValueOnIter;
TileIter tileIter = tree->beginValueOn();
tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
auto tileOp = [this, inAcc](const TileIter& it) {
// Apply the operator to the input grid's tile value at the iterator's
// current coordinates, and set the output tile's value to the result.
it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
};
// Apply the operator to tile values, optionally in parallel.
// (But don't share the functor; each thread needs its own accessor.)
tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
}
if (mDensify) tree->prune();
if (mInterrupt) mInterrupt->end();
return result;
}
/// @brief Iterate sequentially over LeafNodes and voxels in the output
/// grid and apply the operator using a value accessor for the input grid.
///
/// @note Never call this public method directly - it is called by
/// TBB threads only!
void operator()(const typename LeafManagerT::LeafRange& range) const
{
if (util::wasInterrupted(mInterrupt)) tbb::task::self().cancel_group_execution();
for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
}
}
}
protected:
typedef typename InGridT::ConstAccessor AccessorT;
mutable AccessorT mAcc;
const MapT& mMap;
InterruptT* mInterrupt;
const MaskGridType* mMask;
const bool mDensify;
}; // end of GridOperator class
} // namespace gridop
////////////////////////////////////////
/// @brief Compute the closest-point transform of a scalar grid.
template<
typename InGridT,
typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
typename InterruptT = util::NullInterrupter>
class Cpt
{
public:
typedef InGridT InGridType;
typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType;
Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
{
Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
return functor.mOutputGrid;
}
private:
struct IsOpT
{
template<typename MapT, typename AccT>
static typename OutGridType::ValueType
result(const MapT& map, const AccT& acc, const Coord& xyz)
{
return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
}
};
struct WsOpT
{
template<typename MapT, typename AccT>
static typename OutGridType::ValueType
result(const MapT& map, const AccT& acc, const Coord& xyz)
{
return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
}
};
struct Functor
{
Functor(const InGridType& grid, const MaskGridType* mask,
bool threaded, bool worldspace, InterruptT* interrupt)
: mThreaded(threaded)
, mWorldSpace(worldspace)
, mInputGrid(grid)
, mInterrupt(interrupt)
, mMask(mask)
{}
template<typename MapT>
void operator()(const MapT& map)
{
if (mWorldSpace) {
gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
mOutputGrid = op.process(mThreaded); // cache the result
} else {
gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
mOutputGrid = op.process(mThreaded); // cache the result
}
}
const bool mThreaded;
const bool mWorldSpace;
const InGridType& mInputGrid;
typename OutGridType::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
};
const InGridType& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Cpt class
////////////////////////////////////////
/// @brief Compute the curl of a vector grid.
template<
typename GridT,
typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
typename InterruptT = util::NullInterrupter>
class Curl
{
public:
typedef GridT InGridType;
typedef GridT OutGridType;
Curl(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename GridT::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
return functor.mOutputGrid;
}
private:
struct Functor
{
Functor(const GridT& grid, const MaskGridType* mask,
bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
typedef math::Curl<MapT, math::CD_2ND> OpT;
gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const GridT& mInputGrid;
typename GridT::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const GridT& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Curl class
////////////////////////////////////////
/// @brief Compute the divergence of a vector grid.
template<
typename InGridT,
typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
typename InterruptT = util::NullInterrupter>
class Divergence
{
public:
typedef InGridT InGridType;
typedef typename VectorToScalarConverter<InGridT>::Type OutGridType;
Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename OutGridType::Ptr process(bool threaded = true)
{
if (mInputGrid.getGridClass() == GRID_STAGGERED) {
Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
return functor.mOutputGrid;
} else {
Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
return functor.mOutputGrid;
}
}
protected:
template<math::DScheme DiffScheme>
struct Functor
{
Functor(const InGridT& grid, const MaskGridType* mask,
bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
typedef math::Divergence<MapT, DiffScheme> OpT;
gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const InGridType& mInputGrid;
typename OutGridType::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const InGridType& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Divergence class
////////////////////////////////////////
/// @brief Compute the gradient of a scalar grid.
template<
typename InGridT,
typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
typename InterruptT = util::NullInterrupter>
class Gradient
{
public:
typedef InGridT InGridType;
typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType;
Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename OutGridType::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
return functor.mOutputGrid;
}
protected:
struct Functor
{
Functor(const InGridT& grid, const MaskGridType* mask,
bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
typedef math::Gradient<MapT, math::CD_2ND> OpT;
gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const InGridT& mInputGrid;
typename OutGridType::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const InGridT& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Gradient class
////////////////////////////////////////
template<
typename GridT,
typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
typename InterruptT = util::NullInterrupter>
class Laplacian
{
public:
typedef GridT InGridType;
typedef GridT OutGridType;
Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename GridT::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
return functor.mOutputGrid;
}
protected:
struct Functor
{
Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
typedef math::Laplacian<MapT, math::CD_SECOND> OpT;
gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const GridT& mInputGrid;
typename GridT::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const GridT& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Laplacian class
////////////////////////////////////////
template<
typename GridT,
typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
typename InterruptT = util::NullInterrupter>
class MeanCurvature
{
public:
typedef GridT InGridType;
typedef GridT OutGridType;
MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename GridT::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
return functor.mOutputGrid;
}
protected:
struct Functor
{
Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
typedef math::MeanCurvature<MapT, math::CD_SECOND, math::CD_2ND> OpT;
gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const GridT& mInputGrid;
typename GridT::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const GridT& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of MeanCurvature class
////////////////////////////////////////
template<
typename InGridT,
typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
typename InterruptT = util::NullInterrupter>
class Magnitude
{
public:
typedef InGridT InGridType;
typedef typename VectorToScalarConverter<InGridT>::Type OutGridType;
Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename OutGridType::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
return functor.mOutputGrid;
}
protected:
struct OpT
{
template<typename MapT, typename AccT>
static typename OutGridType::ValueType
result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
};
struct Functor
{
Functor(const InGridT& grid, const MaskGridType* mask,
bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const InGridType& mInputGrid;
typename OutGridType::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const InGridType& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Magnitude class
////////////////////////////////////////
template<
typename GridT,
typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
typename InterruptT = util::NullInterrupter>
class Normalize
{
public:
typedef GridT InGridType;
typedef GridT OutGridType;
Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
{
}
Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
{
}
typename GridT::Ptr process(bool threaded = true)
{
Functor functor(mInputGrid, mMask, threaded, mInterrupt);
processTypedMap(mInputGrid.transform(), functor);
if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
const VecType vecType = mInputGrid.getVectorType();
if (vecType == VEC_COVARIANT) {
outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
} else {
outGrid->setVectorType(vecType);
}
}
return functor.mOutputGrid;
}
protected:
struct OpT
{
template<typename MapT, typename AccT>
static typename OutGridType::ValueType
result(const MapT&, const AccT& acc, const Coord& xyz)
{
typename OutGridType::ValueType vec = acc.getValue(xyz);
if ( !vec.normalize() ) vec.setZero();
return vec;
}
};
struct Functor
{
Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
template<typename MapT>
void operator()(const MapT& map)
{
gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
mOutputGrid = op.process(mThreaded); // cache the result
}
const bool mThreaded;
const GridT& mInputGrid;
typename GridT::Ptr mOutputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // Private Functor
const GridT& mInputGrid;
InterruptT* mInterrupt;
const MaskGridType* mMask;
}; // end of Normalize class
////////////////////////////////////////
template<typename GridType, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
curl(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Divergence<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Gradient<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Laplacian<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
{
MeanCurvature<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Magnitude<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
template<typename GridType, typename InterruptT> inline
typename GridType::Ptr
normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
{
Normalize<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
op(grid, interrupt);
return op.process(threaded);
}
template<typename GridType, typename MaskT, typename InterruptT> inline
typename GridType::Ptr
normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
{
Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
return op.process(threaded);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
| 39,697 | C | 35.621771 | 100 | 0.687709 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeToMesh.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/// @file VolumeToMesh.h
///
/// @brief Extract polygonal surfaces from scalar volumes.
///
/// @author Mihai Alden
#ifndef OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
#include <openvdb/Platform.h>
#include <openvdb/math/Operators.h> // for ISGradient
#include <openvdb/tree/ValueAccessor.h>
#include <openvdb/util/Util.h> // for INVALID_IDX
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <tbb/task_scheduler_init.h>
#include <cmath> // for std::isfinite()
#include <map>
#include <memory>
#include <set>
#include <type_traits>
#include <vector>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
////////////////////////////////////////
// Wrapper functions for the VolumeToMesh converter
/// @brief Uniformly mesh any scalar grid that has a continuous isosurface.
///
/// @param grid a scalar grid to mesh
/// @param points output list of world space points
/// @param quads output quad index list
/// @param isovalue determines which isosurface to mesh
///
/// @throw TypeError if @a grid does not have a scalar value type
template<typename GridType>
inline void
volumeToMesh(
const GridType& grid,
std::vector<Vec3s>& points,
std::vector<Vec4I>& quads,
double isovalue = 0.0);
/// @brief Adaptively mesh any scalar grid that has a continuous isosurface.
///
/// @param grid a scalar grid to mesh
/// @param points output list of world space points
/// @param triangles output triangle index list
/// @param quads output quad index list
/// @param isovalue determines which isosurface to mesh
/// @param adaptivity surface adaptivity threshold [0 to 1]
/// @param relaxDisorientedTriangles toggle relaxing disoriented triangles during
/// adaptive meshing.
///
/// @throw TypeError if @a grid does not have a scalar value type
template<typename GridType>
inline void
volumeToMesh(
const GridType& grid,
std::vector<Vec3s>& points,
std::vector<Vec3I>& triangles,
std::vector<Vec4I>& quads,
double isovalue = 0.0,
double adaptivity = 0.0,
bool relaxDisorientedTriangles = true);
////////////////////////////////////////
/// @brief Polygon flags, used for reference based meshing.
enum { POLYFLAG_EXTERIOR = 0x1, POLYFLAG_FRACTURE_SEAM = 0x2, POLYFLAG_SUBDIVIDED = 0x4 };
/// @brief Collection of quads and triangles
class PolygonPool
{
public:
inline PolygonPool();
inline PolygonPool(const size_t numQuads, const size_t numTriangles);
inline void copy(const PolygonPool& rhs);
inline void resetQuads(size_t size);
inline void clearQuads();
inline void resetTriangles(size_t size);
inline void clearTriangles();
// polygon accessor methods
const size_t& numQuads() const { return mNumQuads; }
openvdb::Vec4I& quad(size_t n) { return mQuads[n]; }
const openvdb::Vec4I& quad(size_t n) const { return mQuads[n]; }
const size_t& numTriangles() const { return mNumTriangles; }
openvdb::Vec3I& triangle(size_t n) { return mTriangles[n]; }
const openvdb::Vec3I& triangle(size_t n) const { return mTriangles[n]; }
// polygon flags accessor methods
char& quadFlags(size_t n) { return mQuadFlags[n]; }
const char& quadFlags(size_t n) const { return mQuadFlags[n]; }
char& triangleFlags(size_t n) { return mTriangleFlags[n]; }
const char& triangleFlags(size_t n) const { return mTriangleFlags[n]; }
// reduce the polygon containers, n has to
// be smaller than the current container size.
inline bool trimQuads(const size_t n, bool reallocate = false);
inline bool trimTrinagles(const size_t n, bool reallocate = false);
private:
// disallow copy by assignment
void operator=(const PolygonPool&) {}
size_t mNumQuads, mNumTriangles;
std::unique_ptr<openvdb::Vec4I[]> mQuads;
std::unique_ptr<openvdb::Vec3I[]> mTriangles;
std::unique_ptr<char[]> mQuadFlags, mTriangleFlags;
};
/// @{
/// @brief Point and primitive list types.
using PointList = std::unique_ptr<openvdb::Vec3s[]>;
using PolygonPoolList = std::unique_ptr<PolygonPool[]>;
/// @}
////////////////////////////////////////
/// @brief Mesh any scalar grid that has a continuous isosurface.
struct VolumeToMesh
{
/// @param isovalue Determines which isosurface to mesh.
/// @param adaptivity Adaptivity threshold [0 to 1]
/// @param relaxDisorientedTriangles Toggle relaxing disoriented triangles during
/// adaptive meshing.
VolumeToMesh(double isovalue = 0, double adaptivity = 0, bool relaxDisorientedTriangles = true);
//////////
/// @{
// Mesh data accessors
size_t pointListSize() const { return mPointListSize; }
PointList& pointList() { return mPoints; }
const PointList& pointList() const { return mPoints; }
size_t polygonPoolListSize() const { return mPolygonPoolListSize; }
PolygonPoolList& polygonPoolList() { return mPolygons; }
const PolygonPoolList& polygonPoolList() const { return mPolygons; }
std::vector<uint8_t>& pointFlags() { return mPointFlags; }
const std::vector<uint8_t>& pointFlags() const { return mPointFlags; }
/// @}
//////////
/// @brief Main call
/// @note Call with scalar typed grid.
template<typename InputGridType>
void operator()(const InputGridType&);
//////////
/// @brief When surfacing fractured SDF fragments, the original unfractured
/// SDF grid can be used to eliminate seam lines and tag polygons that are
/// coincident with the reference surface with the @c POLYFLAG_EXTERIOR
/// flag and polygons that are in proximity to the seam lines with the
/// @c POLYFLAG_FRACTURE_SEAM flag. (The performance cost for using this
/// reference based scheme compared to the regular meshing scheme is
/// approximately 15% for the first fragment and neglect-able for
/// subsequent fragments.)
///
/// @note Attributes from the original asset such as uv coordinates, normals etc.
/// are typically transfered to polygons that are marked with the
/// @c POLYFLAG_EXTERIOR flag. Polygons that are not marked with this flag
/// are interior to reference surface and might need projected UV coordinates
/// or a different material. Polygons marked as @c POLYFLAG_FRACTURE_SEAM can
/// be used to drive secondary elements such as debris and dust in a FX pipeline.
///
/// @param grid reference surface grid of @c GridT type.
/// @param secAdaptivity Secondary adaptivity threshold [0 to 1]. Used in regions
/// that do not exist in the reference grid. (Parts of the
/// fragment surface that are not coincident with the
/// reference surface.)
void setRefGrid(const GridBase::ConstPtr& grid, double secAdaptivity = 0);
/// @param mask A boolean grid whose active topology defines the region to mesh.
/// @param invertMask Toggle to mesh the complement of the mask.
/// @note The mask's tree configuration has to match @c GridT's tree configuration.
void setSurfaceMask(const GridBase::ConstPtr& mask, bool invertMask = false);
/// @param grid A scalar grid used as a spatial multiplier for the adaptivity threshold.
/// @note The grid's tree configuration has to match @c GridT's tree configuration.
void setSpatialAdaptivity(const GridBase::ConstPtr& grid);
/// @param tree A boolean tree whose active topology defines the adaptivity mask.
/// @note The tree configuration has to match @c GridT's tree configuration.
void setAdaptivityMask(const TreeBase::ConstPtr& tree);
private:
// Disallow copying
VolumeToMesh(const VolumeToMesh&);
VolumeToMesh& operator=(const VolumeToMesh&);
PointList mPoints;
PolygonPoolList mPolygons;
size_t mPointListSize, mSeamPointListSize, mPolygonPoolListSize;
double mIsovalue, mPrimAdaptivity, mSecAdaptivity;
GridBase::ConstPtr mRefGrid, mSurfaceMaskGrid, mAdaptivityGrid;
TreeBase::ConstPtr mAdaptivityMaskTree;
TreeBase::Ptr mRefSignTree, mRefIdxTree;
bool mInvertSurfaceMask, mRelaxDisorientedTriangles;
std::unique_ptr<uint32_t[]> mQuantizedSeamPoints;
std::vector<uint8_t> mPointFlags;
}; // struct VolumeToMesh
////////////////////////////////////////
/// @brief Given a set of tangent elements, @c points with corresponding @c normals,
/// this method returns the intersection point of all tangent elements.
///
/// @note Used to extract surfaces with sharp edges and corners from volume data,
/// see the following paper for details: "Feature Sensitive Surface
/// Extraction from Volume Data, Kobbelt et al. 2001".
inline Vec3d findFeaturePoint(
const std::vector<Vec3d>& points,
const std::vector<Vec3d>& normals)
{
using Mat3d = math::Mat3d;
Vec3d avgPos(0.0);
if (points.empty()) return avgPos;
for (size_t n = 0, N = points.size(); n < N; ++n) {
avgPos += points[n];
}
avgPos /= double(points.size());
// Unique components of the 3x3 A^TA matrix, where A is
// the matrix of normals.
double m00=0,m01=0,m02=0,
m11=0,m12=0,
m22=0;
// The rhs vector, A^Tb, where b = n dot p
Vec3d rhs(0.0);
for (size_t n = 0, N = points.size(); n < N; ++n) {
const Vec3d& n_ref = normals[n];
// A^TA
m00 += n_ref[0] * n_ref[0]; // diagonal
m11 += n_ref[1] * n_ref[1];
m22 += n_ref[2] * n_ref[2];
m01 += n_ref[0] * n_ref[1]; // Upper-tri
m02 += n_ref[0] * n_ref[2];
m12 += n_ref[1] * n_ref[2];
// A^Tb (centered around the origin)
rhs += n_ref * n_ref.dot(points[n] - avgPos);
}
Mat3d A(m00,m01,m02,
m01,m11,m12,
m02,m12,m22);
/*
// Inverse
const double det = A.det();
if (det > 0.01) {
Mat3d A_inv = A.adjoint();
A_inv *= (1.0 / det);
return avgPos + A_inv * rhs;
}
*/
// Compute the pseudo inverse
math::Mat3d eigenVectors;
Vec3d eigenValues;
diagonalizeSymmetricMatrix(A, eigenVectors, eigenValues, 300);
Mat3d D = Mat3d::identity();
double tolerance = std::max(std::abs(eigenValues[0]), std::abs(eigenValues[1]));
tolerance = std::max(tolerance, std::abs(eigenValues[2]));
tolerance *= 0.01;
int clamped = 0;
for (int i = 0; i < 3; ++i ) {
if (std::abs(eigenValues[i]) < tolerance) {
D[i][i] = 0.0;
++clamped;
} else {
D[i][i] = 1.0 / eigenValues[i];
}
}
// Assemble the pseudo inverse and calc. the intersection point
if (clamped < 3) {
Mat3d pseudoInv = eigenVectors * D * eigenVectors.transpose();
return avgPos + pseudoInv * rhs;
}
return avgPos;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Internal utility objects and implementation details
namespace volume_to_mesh_internal {
template<typename ValueType>
struct FillArray
{
FillArray(ValueType* array, const ValueType& v) : mArray(array), mValue(v) { }
void operator()(const tbb::blocked_range<size_t>& range) const {
const ValueType v = mValue;
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
mArray[n] = v;
}
}
ValueType * const mArray;
const ValueType mValue;
};
template<typename ValueType>
inline void
fillArray(ValueType* array, const ValueType& val, const size_t length)
{
const auto grainSize = std::max<size_t>(
length / tbb::task_scheduler_init::default_num_threads(), 1024);
const tbb::blocked_range<size_t> range(0, length, grainSize);
tbb::parallel_for(range, FillArray<ValueType>(array, val), tbb::simple_partitioner());
}
/// @brief Bit-flags used to classify cells.
enum { SIGNS = 0xFF, EDGES = 0xE00, INSIDE = 0x100,
XEDGE = 0x200, YEDGE = 0x400, ZEDGE = 0x800, SEAM = 0x1000};
/// @brief Used to quickly determine if a given cell is adaptable.
const bool sAdaptable[256] = {
1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,0,1,0,0,0,1,0,1,0,1,0,1,0,1,
1,0,1,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,0,1,
1,0,0,0,1,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,1,1,0,1,1,0,0,0,0,1,0,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,1,
1,0,0,0,0,0,0,0,1,1,0,1,1,1,1,1,1,1,0,1,0,0,0,0,1,1,0,1,1,1,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,1,0,0,0,1,
1,0,0,0,1,0,1,0,1,1,0,0,1,1,1,1,1,1,0,0,1,0,0,0,1,1,0,0,1,1,0,1,
1,0,1,0,1,0,1,0,1,0,0,0,1,0,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1};
/// @brief Contains the ambiguous face index for certain cell configuration.
const unsigned char sAmbiguousFace[256] = {
0,0,0,0,0,5,0,0,0,0,5,0,0,0,0,0,0,0,1,0,0,5,1,0,4,0,0,0,4,0,0,0,
0,1,0,0,2,0,0,0,0,1,5,0,2,0,0,0,0,0,0,0,2,0,0,0,4,0,0,0,0,0,0,0,
0,0,2,2,0,5,0,0,3,3,0,0,0,0,0,0,6,6,0,0,6,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,4,0,4,3,0,3,0,0,0,5,0,0,0,0,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,
6,0,6,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
/// @brief Lookup table for different cell sign configurations. The first entry specifies
/// the total number of points that need to be generated inside a cell and the
/// remaining 12 entries indicate different edge groups.
const unsigned char sEdgeGroupTable[256][13] = {
{0,0,0,0,0,0,0,0,0,0,0,0,0},{1,1,0,0,1,0,0,0,0,1,0,0,0},{1,1,1,0,0,0,0,0,0,0,1,0,0},
{1,0,1,0,1,0,0,0,0,1,1,0,0},{1,0,1,1,0,0,0,0,0,0,0,1,0},{1,1,1,1,1,0,0,0,0,1,0,1,0},
{1,1,0,1,0,0,0,0,0,0,1,1,0},{1,0,0,1,1,0,0,0,0,1,1,1,0},{1,0,0,1,1,0,0,0,0,0,0,0,1},
{1,1,0,1,0,0,0,0,0,1,0,0,1},{1,1,1,1,1,0,0,0,0,0,1,0,1},{1,0,1,1,0,0,0,0,0,1,1,0,1},
{1,0,1,0,1,0,0,0,0,0,0,1,1},{1,1,1,0,0,0,0,0,0,1,0,1,1},{1,1,0,0,1,0,0,0,0,0,1,1,1},
{1,0,0,0,0,0,0,0,0,1,1,1,1},{1,0,0,0,0,1,0,0,1,1,0,0,0},{1,1,0,0,1,1,0,0,1,0,0,0,0},
{1,1,1,0,0,1,0,0,1,1,1,0,0},{1,0,1,0,1,1,0,0,1,0,1,0,0},{2,0,1,1,0,2,0,0,2,2,0,1,0},
{1,1,1,1,1,1,0,0,1,0,0,1,0},{1,1,0,1,0,1,0,0,1,1,1,1,0},{1,0,0,1,1,1,0,0,1,0,1,1,0},
{1,0,0,1,1,1,0,0,1,1,0,0,1},{1,1,0,1,0,1,0,0,1,0,0,0,1},{2,2,1,1,2,1,0,0,1,2,1,0,1},
{1,0,1,1,0,1,0,0,1,0,1,0,1},{1,0,1,0,1,1,0,0,1,1,0,1,1},{1,1,1,0,0,1,0,0,1,0,0,1,1},
{2,1,0,0,1,2,0,0,2,1,2,2,2},{1,0,0,0,0,1,0,0,1,0,1,1,1},{1,0,0,0,0,1,1,0,0,0,1,0,0},
{1,1,0,0,1,1,1,0,0,1,1,0,0},{1,1,1,0,0,1,1,0,0,0,0,0,0},{1,0,1,0,1,1,1,0,0,1,0,0,0},
{1,0,1,1,0,1,1,0,0,0,1,1,0},{2,2,2,1,1,1,1,0,0,1,2,1,0},{1,1,0,1,0,1,1,0,0,0,0,1,0},
{1,0,0,1,1,1,1,0,0,1,0,1,0},{2,0,0,2,2,1,1,0,0,0,1,0,2},{1,1,0,1,0,1,1,0,0,1,1,0,1},
{1,1,1,1,1,1,1,0,0,0,0,0,1},{1,0,1,1,0,1,1,0,0,1,0,0,1},{1,0,1,0,1,1,1,0,0,0,1,1,1},
{2,1,1,0,0,2,2,0,0,2,1,2,2},{1,1,0,0,1,1,1,0,0,0,0,1,1},{1,0,0,0,0,1,1,0,0,1,0,1,1},
{1,0,0,0,0,0,1,0,1,1,1,0,0},{1,1,0,0,1,0,1,0,1,0,1,0,0},{1,1,1,0,0,0,1,0,1,1,0,0,0},
{1,0,1,0,1,0,1,0,1,0,0,0,0},{1,0,1,1,0,0,1,0,1,1,1,1,0},{2,1,1,2,2,0,2,0,2,0,1,2,0},
{1,1,0,1,0,0,1,0,1,1,0,1,0},{1,0,0,1,1,0,1,0,1,0,0,1,0},{1,0,0,1,1,0,1,0,1,1,1,0,1},
{1,1,0,1,0,0,1,0,1,0,1,0,1},{2,1,2,2,1,0,2,0,2,1,0,0,2},{1,0,1,1,0,0,1,0,1,0,0,0,1},
{2,0,2,0,2,0,1,0,1,2,2,1,1},{2,2,2,0,0,0,1,0,1,0,2,1,1},{2,2,0,0,2,0,1,0,1,2,0,1,1},
{1,0,0,0,0,0,1,0,1,0,0,1,1},{1,0,0,0,0,0,1,1,0,0,0,1,0},{2,1,0,0,1,0,2,2,0,1,0,2,0},
{1,1,1,0,0,0,1,1,0,0,1,1,0},{1,0,1,0,1,0,1,1,0,1,1,1,0},{1,0,1,1,0,0,1,1,0,0,0,0,0},
{1,1,1,1,1,0,1,1,0,1,0,0,0},{1,1,0,1,0,0,1,1,0,0,1,0,0},{1,0,0,1,1,0,1,1,0,1,1,0,0},
{1,0,0,1,1,0,1,1,0,0,0,1,1},{1,1,0,1,0,0,1,1,0,1,0,1,1},{2,1,2,2,1,0,1,1,0,0,1,2,1},
{2,0,1,1,0,0,2,2,0,2,2,1,2},{1,0,1,0,1,0,1,1,0,0,0,0,1},{1,1,1,0,0,0,1,1,0,1,0,0,1},
{1,1,0,0,1,0,1,1,0,0,1,0,1},{1,0,0,0,0,0,1,1,0,1,1,0,1},{1,0,0,0,0,1,1,1,1,1,0,1,0},
{1,1,0,0,1,1,1,1,1,0,0,1,0},{2,1,1,0,0,2,2,1,1,1,2,1,0},{2,0,2,0,2,1,1,2,2,0,1,2,0},
{1,0,1,1,0,1,1,1,1,1,0,0,0},{2,2,2,1,1,2,2,1,1,0,0,0,0},{2,2,0,2,0,1,1,2,2,2,1,0,0},
{2,0,0,1,1,2,2,1,1,0,2,0,0},{2,0,0,1,1,1,1,2,2,1,0,1,2},{2,2,0,2,0,2,2,1,1,0,0,2,1},
{4,3,2,2,3,4,4,1,1,3,4,2,1},{3,0,2,2,0,1,1,3,3,0,1,2,3},{2,0,2,0,2,2,2,1,1,2,0,0,1},
{2,1,1,0,0,1,1,2,2,0,0,0,2},{3,1,0,0,1,2,2,3,3,1,2,0,3},{2,0,0,0,0,1,1,2,2,0,1,0,2},
{1,0,0,0,0,1,0,1,0,0,1,1,0},{1,1,0,0,1,1,0,1,0,1,1,1,0},{1,1,1,0,0,1,0,1,0,0,0,1,0},
{1,0,1,0,1,1,0,1,0,1,0,1,0},{1,0,1,1,0,1,0,1,0,0,1,0,0},{2,1,1,2,2,2,0,2,0,2,1,0,0},
{1,1,0,1,0,1,0,1,0,0,0,0,0},{1,0,0,1,1,1,0,1,0,1,0,0,0},{1,0,0,1,1,1,0,1,0,0,1,1,1},
{2,2,0,2,0,1,0,1,0,1,2,2,1},{2,2,1,1,2,2,0,2,0,0,0,1,2},{2,0,2,2,0,1,0,1,0,1,0,2,1},
{1,0,1,0,1,1,0,1,0,0,1,0,1},{2,2,2,0,0,1,0,1,0,1,2,0,1},{1,1,0,0,1,1,0,1,0,0,0,0,1},
{1,0,0,0,0,1,0,1,0,1,0,0,1},{1,0,0,0,0,0,0,1,1,1,1,1,0},{1,1,0,0,1,0,0,1,1,0,1,1,0},
{1,1,1,0,0,0,0,1,1,1,0,1,0},{1,0,1,0,1,0,0,1,1,0,0,1,0},{1,0,1,1,0,0,0,1,1,1,1,0,0},
{2,2,2,1,1,0,0,1,1,0,2,0,0},{1,1,0,1,0,0,0,1,1,1,0,0,0},{1,0,0,1,1,0,0,1,1,0,0,0,0},
{2,0,0,2,2,0,0,1,1,2,2,2,1},{2,1,0,1,0,0,0,2,2,0,1,1,2},{3,2,1,1,2,0,0,3,3,2,0,1,3},
{2,0,1,1,0,0,0,2,2,0,0,1,2},{2,0,1,0,1,0,0,2,2,1,1,0,2},{2,1,1,0,0,0,0,2,2,0,1,0,2},
{2,1,0,0,1,0,0,2,2,1,0,0,2},{1,0,0,0,0,0,0,1,1,0,0,0,1},{1,0,0,0,0,0,0,1,1,0,0,0,1},
{1,1,0,0,1,0,0,1,1,1,0,0,1},{2,1,1,0,0,0,0,2,2,0,1,0,2},{1,0,1,0,1,0,0,1,1,1,1,0,1},
{1,0,1,1,0,0,0,1,1,0,0,1,1},{2,1,1,2,2,0,0,1,1,1,0,1,2},{1,1,0,1,0,0,0,1,1,0,1,1,1},
{2,0,0,1,1,0,0,2,2,2,2,2,1},{1,0,0,1,1,0,0,1,1,0,0,0,0},{1,1,0,1,0,0,0,1,1,1,0,0,0},
{1,1,1,1,1,0,0,1,1,0,1,0,0},{1,0,1,1,0,0,0,1,1,1,1,0,0},{1,0,1,0,1,0,0,1,1,0,0,1,0},
{1,1,1,0,0,0,0,1,1,1,0,1,0},{1,1,0,0,1,0,0,1,1,0,1,1,0},{1,0,0,0,0,0,0,1,1,1,1,1,0},
{1,0,0,0,0,1,0,1,0,1,0,0,1},{1,1,0,0,1,1,0,1,0,0,0,0,1},{1,1,1,0,0,1,0,1,0,1,1,0,1},
{1,0,1,0,1,1,0,1,0,0,1,0,1},{1,0,1,1,0,1,0,1,0,1,0,1,1},{2,2,2,1,1,2,0,2,0,0,0,2,1},
{2,1,0,1,0,2,0,2,0,1,2,2,1},{2,0,0,2,2,1,0,1,0,0,1,1,2},{1,0,0,1,1,1,0,1,0,1,0,0,0},
{1,1,0,1,0,1,0,1,0,0,0,0,0},{2,1,2,2,1,2,0,2,0,1,2,0,0},{1,0,1,1,0,1,0,1,0,0,1,0,0},
{1,0,1,0,1,1,0,1,0,1,0,1,0},{1,1,1,0,0,1,0,1,0,0,0,1,0},{2,2,0,0,2,1,0,1,0,2,1,1,0},
{1,0,0,0,0,1,0,1,0,0,1,1,0},{1,0,0,0,0,1,1,1,1,0,1,0,1},{2,1,0,0,1,2,1,1,2,2,1,0,1},
{1,1,1,0,0,1,1,1,1,0,0,0,1},{2,0,2,0,2,1,2,2,1,1,0,0,2},{2,0,1,1,0,1,2,2,1,0,1,2,1},
{4,1,1,3,3,2,4,4,2,2,1,4,3},{2,2,0,2,0,2,1,1,2,0,0,1,2},{3,0,0,1,1,2,3,3,2,2,0,3,1},
{1,0,0,1,1,1,1,1,1,0,1,0,0},{2,2,0,2,0,1,2,2,1,1,2,0,0},{2,2,1,1,2,2,1,1,2,0,0,0,0},
{2,0,1,1,0,2,1,1,2,2,0,0,0},{2,0,2,0,2,2,1,1,2,0,2,1,0},{3,1,1,0,0,3,2,2,3,3,1,2,0},
{2,1,0,0,1,1,2,2,1,0,0,2,0},{2,0,0,0,0,2,1,1,2,2,0,1,0},{1,0,0,0,0,0,1,1,0,1,1,0,1},
{1,1,0,0,1,0,1,1,0,0,1,0,1},{1,1,1,0,0,0,1,1,0,1,0,0,1},{1,0,1,0,1,0,1,1,0,0,0,0,1},
{2,0,2,2,0,0,1,1,0,2,2,1,2},{3,1,1,2,2,0,3,3,0,0,1,3,2},{2,1,0,1,0,0,2,2,0,1,0,2,1},
{2,0,0,1,1,0,2,2,0,0,0,2,1},{1,0,0,1,1,0,1,1,0,1,1,0,0},{1,1,0,1,0,0,1,1,0,0,1,0,0},
{2,2,1,1,2,0,1,1,0,2,0,0,0},{1,0,1,1,0,0,1,1,0,0,0,0,0},{2,0,1,0,1,0,2,2,0,1,1,2,0},
{2,1,1,0,0,0,2,2,0,0,1,2,0},{2,1,0,0,1,0,2,2,0,1,0,2,0},{1,0,0,0,0,0,1,1,0,0,0,1,0},
{1,0,0,0,0,0,1,0,1,0,0,1,1},{1,1,0,0,1,0,1,0,1,1,0,1,1},{1,1,1,0,0,0,1,0,1,0,1,1,1},
{2,0,2,0,2,0,1,0,1,1,1,2,2},{1,0,1,1,0,0,1,0,1,0,0,0,1},{2,2,2,1,1,0,2,0,2,2,0,0,1},
{1,1,0,1,0,0,1,0,1,0,1,0,1},{2,0,0,2,2,0,1,0,1,1,1,0,2},{1,0,0,1,1,0,1,0,1,0,0,1,0},
{1,1,0,1,0,0,1,0,1,1,0,1,0},{2,2,1,1,2,0,2,0,2,0,2,1,0},{2,0,2,2,0,0,1,0,1,1,1,2,0},
{1,0,1,0,1,0,1,0,1,0,0,0,0},{1,1,1,0,0,0,1,0,1,1,0,0,0},{1,1,0,0,1,0,1,0,1,0,1,0,0},
{1,0,0,0,0,0,1,0,1,1,1,0,0},{1,0,0,0,0,1,1,0,0,1,0,1,1},{1,1,0,0,1,1,1,0,0,0,0,1,1},
{2,2,2,0,0,1,1,0,0,2,1,2,2},{2,0,1,0,1,2,2,0,0,0,2,1,1},{1,0,1,1,0,1,1,0,0,1,0,0,1},
{2,1,1,2,2,1,1,0,0,0,0,0,2},{2,1,0,1,0,2,2,0,0,1,2,0,1},{2,0,0,2,2,1,1,0,0,0,1,0,2},
{1,0,0,1,1,1,1,0,0,1,0,1,0},{1,1,0,1,0,1,1,0,0,0,0,1,0},{3,1,2,2,1,3,3,0,0,1,3,2,0},
{2,0,1,1,0,2,2,0,0,0,2,1,0},{1,0,1,0,1,1,1,0,0,1,0,0,0},{1,1,1,0,0,1,1,0,0,0,0,0,0},
{2,2,0,0,2,1,1,0,0,2,1,0,0},{1,0,0,0,0,1,1,0,0,0,1,0,0},{1,0,0,0,0,1,0,0,1,0,1,1,1},
{2,2,0,0,2,1,0,0,1,1,2,2,2},{1,1,1,0,0,1,0,0,1,0,0,1,1},{2,0,1,0,1,2,0,0,2,2,0,1,1},
{1,0,1,1,0,1,0,0,1,0,1,0,1},{3,1,1,3,3,2,0,0,2,2,1,0,3},{1,1,0,1,0,1,0,0,1,0,0,0,1},
{2,0,0,2,2,1,0,0,1,1,0,0,2},{1,0,0,1,1,1,0,0,1,0,1,1,0},{2,1,0,1,0,2,0,0,2,2,1,1,0},
{2,1,2,2,1,1,0,0,1,0,0,2,0},{2,0,1,1,0,2,0,0,2,2,0,1,0},{1,0,1,0,1,1,0,0,1,0,1,0,0},
{2,1,1,0,0,2,0,0,2,2,1,0,0},{1,1,0,0,1,1,0,0,1,0,0,0,0},{1,0,0,0,0,1,0,0,1,1,0,0,0},
{1,0,0,0,0,0,0,0,0,1,1,1,1},{1,1,0,0,1,0,0,0,0,0,1,1,1},{1,1,1,0,0,0,0,0,0,1,0,1,1},
{1,0,1,0,1,0,0,0,0,0,0,1,1},{1,0,1,1,0,0,0,0,0,1,1,0,1},{2,1,1,2,2,0,0,0,0,0,1,0,2},
{1,1,0,1,0,0,0,0,0,1,0,0,1},{1,0,0,1,1,0,0,0,0,0,0,0,1},{1,0,0,1,1,0,0,0,0,1,1,1,0},
{1,1,0,1,0,0,0,0,0,0,1,1,0},{2,1,2,2,1,0,0,0,0,1,0,2,0},{1,0,1,1,0,0,0,0,0,0,0,1,0},
{1,0,1,0,1,0,0,0,0,1,1,0,0},{1,1,1,0,0,0,0,0,0,0,1,0,0},{1,1,0,0,1,0,0,0,0,1,0,0,0},
{0,0,0,0,0,0,0,0,0,0,0,0,0}};
////////////////////////////////////////
inline bool
isPlanarQuad(
const Vec3d& p0, const Vec3d& p1,
const Vec3d& p2, const Vec3d& p3,
double epsilon = 0.001)
{
// compute representative plane
Vec3d normal = (p2-p0).cross(p1-p3);
normal.normalize();
const Vec3d centroid = (p0 + p1 + p2 + p3);
const double d = centroid.dot(normal) * 0.25;
// test vertice distance to plane
double absDist = std::abs(p0.dot(normal) - d);
if (absDist > epsilon) return false;
absDist = std::abs(p1.dot(normal) - d);
if (absDist > epsilon) return false;
absDist = std::abs(p2.dot(normal) - d);
if (absDist > epsilon) return false;
absDist = std::abs(p3.dot(normal) - d);
if (absDist > epsilon) return false;
return true;
}
////////////////////////////////////////
/// @{
/// @brief Utility methods for point quantization.
enum {
MASK_FIRST_10_BITS = 0x000003FF,
MASK_DIRTY_BIT = 0x80000000,
MASK_INVALID_BIT = 0x40000000
};
inline uint32_t
packPoint(const Vec3d& v)
{
uint32_t data = 0;
// values are expected to be in the [0.0 to 1.0] range.
assert(!(v.x() > 1.0) && !(v.y() > 1.0) && !(v.z() > 1.0));
assert(!(v.x() < 0.0) && !(v.y() < 0.0) && !(v.z() < 0.0));
data |= (uint32_t(v.x() * 1023.0) & MASK_FIRST_10_BITS) << 20;
data |= (uint32_t(v.y() * 1023.0) & MASK_FIRST_10_BITS) << 10;
data |= (uint32_t(v.z() * 1023.0) & MASK_FIRST_10_BITS);
return data;
}
inline Vec3d
unpackPoint(uint32_t data)
{
Vec3d v;
v.z() = double(data & MASK_FIRST_10_BITS) * 0.0009775171;
data = data >> 10;
v.y() = double(data & MASK_FIRST_10_BITS) * 0.0009775171;
data = data >> 10;
v.x() = double(data & MASK_FIRST_10_BITS) * 0.0009775171;
return v;
}
/// @}
////////////////////////////////////////
template<typename T>
inline bool isBoolValue() { return false; }
template<>
inline bool isBoolValue<bool>() { return true; }
template<typename T>
inline bool isInsideValue(T value, T isovalue) { return value < isovalue; }
template<>
inline bool isInsideValue<bool>(bool value, bool /*isovalue*/) { return value; }
template<typename AccessorT>
inline void
getCellVertexValues(const AccessorT& accessor, Coord ijk,
math::Tuple<8, typename AccessorT::ValueType>& values)
{
values[0] = accessor.getValue(ijk); // i, j, k
++ijk[0];
values[1] = accessor.getValue(ijk); // i+1, j, k
++ijk[2];
values[2] = accessor.getValue(ijk); // i+1, j, k+1
--ijk[0];
values[3] = accessor.getValue(ijk); // i, j, k+1
--ijk[2]; ++ijk[1];
values[4] = accessor.getValue(ijk); // i, j+1, k
++ijk[0];
values[5] = accessor.getValue(ijk); // i+1, j+1, k
++ijk[2];
values[6] = accessor.getValue(ijk); // i+1, j+1, k+1
--ijk[0];
values[7] = accessor.getValue(ijk); // i, j+1, k+1
}
template<typename LeafT>
inline void
getCellVertexValues(const LeafT& leaf, const Index offset,
math::Tuple<8, typename LeafT::ValueType>& values)
{
values[0] = leaf.getValue(offset); // i, j, k
values[3] = leaf.getValue(offset + 1); // i, j, k+1
values[4] = leaf.getValue(offset + LeafT::DIM); // i, j+1, k
values[7] = leaf.getValue(offset + LeafT::DIM + 1); // i, j+1, k+1
values[1] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM)); // i+1, j, k
values[2] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1); // i+1, j, k+1
values[5] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM); // i+1, j+1, k
values[6] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1); // i+1, j+1, k+1
}
template<typename ValueType>
inline uint8_t
computeSignFlags(const math::Tuple<8, ValueType>& values, const ValueType iso)
{
unsigned signs = 0;
signs |= isInsideValue(values[0], iso) ? 1u : 0u;
signs |= isInsideValue(values[1], iso) ? 2u : 0u;
signs |= isInsideValue(values[2], iso) ? 4u : 0u;
signs |= isInsideValue(values[3], iso) ? 8u : 0u;
signs |= isInsideValue(values[4], iso) ? 16u : 0u;
signs |= isInsideValue(values[5], iso) ? 32u : 0u;
signs |= isInsideValue(values[6], iso) ? 64u : 0u;
signs |= isInsideValue(values[7], iso) ? 128u : 0u;
return uint8_t(signs);
}
/// @brief General method that computes the cell-sign configuration at the given
/// @c ijk coordinate.
template<typename AccessorT>
inline uint8_t
evalCellSigns(const AccessorT& accessor, const Coord& ijk, typename AccessorT::ValueType iso)
{
unsigned signs = 0;
Coord coord = ijk; // i, j, k
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 1u;
coord[0] += 1; // i+1, j, k
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 2u;
coord[2] += 1; // i+1, j, k+1
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 4u;
coord[0] = ijk[0]; // i, j, k+1
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 8u;
coord[1] += 1; coord[2] = ijk[2]; // i, j+1, k
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 16u;
coord[0] += 1; // i+1, j+1, k
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 32u;
coord[2] += 1; // i+1, j+1, k+1
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 64u;
coord[0] = ijk[0]; // i, j+1, k+1
if (isInsideValue(accessor.getValue(coord), iso)) signs |= 128u;
return uint8_t(signs);
}
/// @brief Leaf node optimized method that computes the cell-sign configuration
/// at the given local @c offset
template<typename LeafT>
inline uint8_t
evalCellSigns(const LeafT& leaf, const Index offset, typename LeafT::ValueType iso)
{
unsigned signs = 0;
// i, j, k
if (isInsideValue(leaf.getValue(offset), iso)) signs |= 1u;
// i, j, k+1
if (isInsideValue(leaf.getValue(offset + 1), iso)) signs |= 8u;
// i, j+1, k
if (isInsideValue(leaf.getValue(offset + LeafT::DIM), iso)) signs |= 16u;
// i, j+1, k+1
if (isInsideValue(leaf.getValue(offset + LeafT::DIM + 1), iso)) signs |= 128u;
// i+1, j, k
if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) ), iso)) signs |= 2u;
// i+1, j, k+1
if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1), iso)) signs |= 4u;
// i+1, j+1, k
if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM), iso)) signs |= 32u;
// i+1, j+1, k+1
if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1), iso)) signs |= 64u;
return uint8_t(signs);
}
/// @brief Used to correct topological ambiguities related to two adjacent cells
/// that share an ambiguous face.
template<class AccessorT>
inline void
correctCellSigns(uint8_t& signs, uint8_t face,
const AccessorT& acc, Coord ijk, typename AccessorT::ValueType iso)
{
switch (int(face)) {
case 1:
ijk[2] -= 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 3) signs = uint8_t(~signs);
break;
case 2:
ijk[0] += 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 4) signs = uint8_t(~signs);
break;
case 3:
ijk[2] += 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 1) signs = uint8_t(~signs);
break;
case 4:
ijk[0] -= 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 2) signs = uint8_t(~signs);
break;
case 5:
ijk[1] -= 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 6) signs = uint8_t(~signs);
break;
case 6:
ijk[1] += 1;
if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 5) signs = uint8_t(~signs);
break;
default:
break;
}
}
template<class AccessorT>
inline bool
isNonManifold(const AccessorT& accessor, const Coord& ijk,
typename AccessorT::ValueType isovalue, const int dim)
{
int hDim = dim >> 1;
bool m, p[8]; // Corner signs
Coord coord = ijk; // i, j, k
p[0] = isInsideValue(accessor.getValue(coord), isovalue);
coord[0] += dim; // i+dim, j, k
p[1] = isInsideValue(accessor.getValue(coord), isovalue);
coord[2] += dim; // i+dim, j, k+dim
p[2] = isInsideValue(accessor.getValue(coord), isovalue);
coord[0] = ijk[0]; // i, j, k+dim
p[3] = isInsideValue(accessor.getValue(coord), isovalue);
coord[1] += dim; coord[2] = ijk[2]; // i, j+dim, k
p[4] = isInsideValue(accessor.getValue(coord), isovalue);
coord[0] += dim; // i+dim, j+dim, k
p[5] = isInsideValue(accessor.getValue(coord), isovalue);
coord[2] += dim; // i+dim, j+dim, k+dim
p[6] = isInsideValue(accessor.getValue(coord), isovalue);
coord[0] = ijk[0]; // i, j+dim, k+dim
p[7] = isInsideValue(accessor.getValue(coord), isovalue);
// Check if the corner sign configuration is ambiguous
unsigned signs = 0;
if (p[0]) signs |= 1u;
if (p[1]) signs |= 2u;
if (p[2]) signs |= 4u;
if (p[3]) signs |= 8u;
if (p[4]) signs |= 16u;
if (p[5]) signs |= 32u;
if (p[6]) signs |= 64u;
if (p[7]) signs |= 128u;
if (!sAdaptable[signs]) return true;
// Manifold check
// Evaluate edges
int i = ijk[0], ip = ijk[0] + hDim, ipp = ijk[0] + dim;
int j = ijk[1], jp = ijk[1] + hDim, jpp = ijk[1] + dim;
int k = ijk[2], kp = ijk[2] + hDim, kpp = ijk[2] + dim;
// edge 1
coord.reset(ip, j, k);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[1] != m) return true;
// edge 2
coord.reset(ipp, j, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[1] != m && p[2] != m) return true;
// edge 3
coord.reset(ip, j, kpp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[2] != m && p[3] != m) return true;
// edge 4
coord.reset(i, j, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[3] != m) return true;
// edge 5
coord.reset(ip, jpp, k);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[4] != m && p[5] != m) return true;
// edge 6
coord.reset(ipp, jpp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[5] != m && p[6] != m) return true;
// edge 7
coord.reset(ip, jpp, kpp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[6] != m && p[7] != m) return true;
// edge 8
coord.reset(i, jpp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[7] != m && p[4] != m) return true;
// edge 9
coord.reset(i, jp, k);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[4] != m) return true;
// edge 10
coord.reset(ipp, jp, k);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[1] != m && p[5] != m) return true;
// edge 11
coord.reset(ipp, jp, kpp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[2] != m && p[6] != m) return true;
// edge 12
coord.reset(i, jp, kpp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[3] != m && p[7] != m) return true;
// Evaluate faces
// face 1
coord.reset(ip, jp, k);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[1] != m && p[4] != m && p[5] != m) return true;
// face 2
coord.reset(ipp, jp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[1] != m && p[2] != m && p[5] != m && p[6] != m) return true;
// face 3
coord.reset(ip, jp, kpp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[2] != m && p[3] != m && p[6] != m && p[7] != m) return true;
// face 4
coord.reset(i, jp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[3] != m && p[4] != m && p[7] != m) return true;
// face 5
coord.reset(ip, j, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[1] != m && p[2] != m && p[3] != m) return true;
// face 6
coord.reset(ip, jpp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[4] != m && p[5] != m && p[6] != m && p[7] != m) return true;
// test cube center
coord.reset(ip, jp, kp);
m = isInsideValue(accessor.getValue(coord), isovalue);
if (p[0] != m && p[1] != m && p[2] != m && p[3] != m &&
p[4] != m && p[5] != m && p[6] != m && p[7] != m) return true;
return false;
}
////////////////////////////////////////
template <class LeafType>
inline void
mergeVoxels(LeafType& leaf, const Coord& start, int dim, int regionId)
{
Coord ijk, end = start;
end[0] += dim;
end[1] += dim;
end[2] += dim;
for (ijk[0] = start[0]; ijk[0] < end[0]; ++ijk[0]) {
for (ijk[1] = start[1]; ijk[1] < end[1]; ++ijk[1]) {
for (ijk[2] = start[2]; ijk[2] < end[2]; ++ijk[2]) {
leaf.setValueOnly(ijk, regionId);
}
}
}
}
// Note that we must use ValueType::value_type or else Visual C++ gets confused
// thinking that it is a constructor.
template <class LeafType>
inline bool
isMergable(LeafType& leaf, const Coord& start, int dim,
typename LeafType::ValueType::value_type adaptivity)
{
if (adaptivity < 1e-6) return false;
using VecT = typename LeafType::ValueType;
Coord ijk, end = start;
end[0] += dim;
end[1] += dim;
end[2] += dim;
std::vector<VecT> norms;
for (ijk[0] = start[0]; ijk[0] < end[0]; ++ijk[0]) {
for (ijk[1] = start[1]; ijk[1] < end[1]; ++ijk[1]) {
for (ijk[2] = start[2]; ijk[2] < end[2]; ++ijk[2]) {
if(!leaf.isValueOn(ijk)) continue;
norms.push_back(leaf.getValue(ijk));
}
}
}
size_t N = norms.size();
for (size_t ni = 0; ni < N; ++ni) {
VecT n_i = norms[ni];
for (size_t nj = 0; nj < N; ++nj) {
VecT n_j = norms[nj];
if ((1.0 - n_i.dot(n_j)) > adaptivity) return false;
}
}
return true;
}
////////////////////////////////////////
/// linear interpolation.
inline double evalZeroCrossing(double v0, double v1, double iso) { return (iso - v0) / (v1 - v0); }
/// @brief Extracts the eight corner values for leaf inclusive cells.
template<typename LeafT>
inline void
collectCornerValues(const LeafT& leaf, const Index offset, std::vector<double>& values)
{
values[0] = double(leaf.getValue(offset)); // i, j, k
values[3] = double(leaf.getValue(offset + 1)); // i, j, k+1
values[4] = double(leaf.getValue(offset + LeafT::DIM)); // i, j+1, k
values[7] = double(leaf.getValue(offset + LeafT::DIM + 1)); // i, j+1, k+1
values[1] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM))); // i+1, j, k
values[2] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1)); // i+1, j, k+1
values[5] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM)); // i+1, j+1, k
values[6] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1)); // i+1, j+1, k+1
}
/// @brief Extracts the eight corner values for a cell starting at the given @ijk coordinate.
template<typename AccessorT>
inline void
collectCornerValues(const AccessorT& acc, const Coord& ijk, std::vector<double>& values)
{
Coord coord = ijk;
values[0] = double(acc.getValue(coord)); // i, j, k
coord[0] += 1;
values[1] = double(acc.getValue(coord)); // i+1, j, k
coord[2] += 1;
values[2] = double(acc.getValue(coord)); // i+i, j, k+1
coord[0] = ijk[0];
values[3] = double(acc.getValue(coord)); // i, j, k+1
coord[1] += 1; coord[2] = ijk[2];
values[4] = double(acc.getValue(coord)); // i, j+1, k
coord[0] += 1;
values[5] = double(acc.getValue(coord)); // i+1, j+1, k
coord[2] += 1;
values[6] = double(acc.getValue(coord)); // i+1, j+1, k+1
coord[0] = ijk[0];
values[7] = double(acc.getValue(coord)); // i, j+1, k+1
}
/// @brief Computes the average cell point for a given edge group.
inline Vec3d
computePoint(const std::vector<double>& values, unsigned char signs,
unsigned char edgeGroup, double iso)
{
Vec3d avg(0.0, 0.0, 0.0);
int samples = 0;
if (sEdgeGroupTable[signs][1] == edgeGroup) { // Edged: 0 - 1
avg[0] += evalZeroCrossing(values[0], values[1], iso);
++samples;
}
if (sEdgeGroupTable[signs][2] == edgeGroup) { // Edged: 1 - 2
avg[0] += 1.0;
avg[2] += evalZeroCrossing(values[1], values[2], iso);
++samples;
}
if (sEdgeGroupTable[signs][3] == edgeGroup) { // Edged: 3 - 2
avg[0] += evalZeroCrossing(values[3], values[2], iso);
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][4] == edgeGroup) { // Edged: 0 - 3
avg[2] += evalZeroCrossing(values[0], values[3], iso);
++samples;
}
if (sEdgeGroupTable[signs][5] == edgeGroup) { // Edged: 4 - 5
avg[0] += evalZeroCrossing(values[4], values[5], iso);
avg[1] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][6] == edgeGroup) { // Edged: 5 - 6
avg[0] += 1.0;
avg[1] += 1.0;
avg[2] += evalZeroCrossing(values[5], values[6], iso);
++samples;
}
if (sEdgeGroupTable[signs][7] == edgeGroup) { // Edged: 7 - 6
avg[0] += evalZeroCrossing(values[7], values[6], iso);
avg[1] += 1.0;
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][8] == edgeGroup) { // Edged: 4 - 7
avg[1] += 1.0;
avg[2] += evalZeroCrossing(values[4], values[7], iso);
++samples;
}
if (sEdgeGroupTable[signs][9] == edgeGroup) { // Edged: 0 - 4
avg[1] += evalZeroCrossing(values[0], values[4], iso);
++samples;
}
if (sEdgeGroupTable[signs][10] == edgeGroup) { // Edged: 1 - 5
avg[0] += 1.0;
avg[1] += evalZeroCrossing(values[1], values[5], iso);
++samples;
}
if (sEdgeGroupTable[signs][11] == edgeGroup) { // Edged: 2 - 6
avg[0] += 1.0;
avg[1] += evalZeroCrossing(values[2], values[6], iso);
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][12] == edgeGroup) { // Edged: 3 - 7
avg[1] += evalZeroCrossing(values[3], values[7], iso);
avg[2] += 1.0;
++samples;
}
if (samples > 1) {
double w = 1.0 / double(samples);
avg[0] *= w;
avg[1] *= w;
avg[2] *= w;
}
return avg;
}
/// @brief Computes the average cell point for a given edge group, ignoring edge
/// samples present in the @c signsMask configuration.
inline int
computeMaskedPoint(Vec3d& avg, const std::vector<double>& values, unsigned char signs,
unsigned char signsMask, unsigned char edgeGroup, double iso)
{
avg = Vec3d(0.0, 0.0, 0.0);
int samples = 0;
if (sEdgeGroupTable[signs][1] == edgeGroup
&& sEdgeGroupTable[signsMask][1] == 0) { // Edged: 0 - 1
avg[0] += evalZeroCrossing(values[0], values[1], iso);
++samples;
}
if (sEdgeGroupTable[signs][2] == edgeGroup
&& sEdgeGroupTable[signsMask][2] == 0) { // Edged: 1 - 2
avg[0] += 1.0;
avg[2] += evalZeroCrossing(values[1], values[2], iso);
++samples;
}
if (sEdgeGroupTable[signs][3] == edgeGroup
&& sEdgeGroupTable[signsMask][3] == 0) { // Edged: 3 - 2
avg[0] += evalZeroCrossing(values[3], values[2], iso);
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][4] == edgeGroup
&& sEdgeGroupTable[signsMask][4] == 0) { // Edged: 0 - 3
avg[2] += evalZeroCrossing(values[0], values[3], iso);
++samples;
}
if (sEdgeGroupTable[signs][5] == edgeGroup
&& sEdgeGroupTable[signsMask][5] == 0) { // Edged: 4 - 5
avg[0] += evalZeroCrossing(values[4], values[5], iso);
avg[1] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][6] == edgeGroup
&& sEdgeGroupTable[signsMask][6] == 0) { // Edged: 5 - 6
avg[0] += 1.0;
avg[1] += 1.0;
avg[2] += evalZeroCrossing(values[5], values[6], iso);
++samples;
}
if (sEdgeGroupTable[signs][7] == edgeGroup
&& sEdgeGroupTable[signsMask][7] == 0) { // Edged: 7 - 6
avg[0] += evalZeroCrossing(values[7], values[6], iso);
avg[1] += 1.0;
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][8] == edgeGroup
&& sEdgeGroupTable[signsMask][8] == 0) { // Edged: 4 - 7
avg[1] += 1.0;
avg[2] += evalZeroCrossing(values[4], values[7], iso);
++samples;
}
if (sEdgeGroupTable[signs][9] == edgeGroup
&& sEdgeGroupTable[signsMask][9] == 0) { // Edged: 0 - 4
avg[1] += evalZeroCrossing(values[0], values[4], iso);
++samples;
}
if (sEdgeGroupTable[signs][10] == edgeGroup
&& sEdgeGroupTable[signsMask][10] == 0) { // Edged: 1 - 5
avg[0] += 1.0;
avg[1] += evalZeroCrossing(values[1], values[5], iso);
++samples;
}
if (sEdgeGroupTable[signs][11] == edgeGroup
&& sEdgeGroupTable[signsMask][11] == 0) { // Edged: 2 - 6
avg[0] += 1.0;
avg[1] += evalZeroCrossing(values[2], values[6], iso);
avg[2] += 1.0;
++samples;
}
if (sEdgeGroupTable[signs][12] == edgeGroup
&& sEdgeGroupTable[signsMask][12] == 0) { // Edged: 3 - 7
avg[1] += evalZeroCrossing(values[3], values[7], iso);
avg[2] += 1.0;
++samples;
}
if (samples > 1) {
double w = 1.0 / double(samples);
avg[0] *= w;
avg[1] *= w;
avg[2] *= w;
}
return samples;
}
/// @brief Computes the average cell point for a given edge group, by computing
/// convex weights based on the distance from the sample point @c p.
inline Vec3d
computeWeightedPoint(const Vec3d& p, const std::vector<double>& values,
unsigned char signs, unsigned char edgeGroup, double iso)
{
std::vector<Vec3d> samples;
samples.reserve(8);
std::vector<double> weights;
weights.reserve(8);
Vec3d avg(0.0, 0.0, 0.0);
if (sEdgeGroupTable[signs][1] == edgeGroup) { // Edged: 0 - 1
avg[0] = evalZeroCrossing(values[0], values[1], iso);
avg[1] = 0.0;
avg[2] = 0.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][2] == edgeGroup) { // Edged: 1 - 2
avg[0] = 1.0;
avg[1] = 0.0;
avg[2] = evalZeroCrossing(values[1], values[2], iso);
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][3] == edgeGroup) { // Edged: 3 - 2
avg[0] = evalZeroCrossing(values[3], values[2], iso);
avg[1] = 0.0;
avg[2] = 1.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][4] == edgeGroup) { // Edged: 0 - 3
avg[0] = 0.0;
avg[1] = 0.0;
avg[2] = evalZeroCrossing(values[0], values[3], iso);
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][5] == edgeGroup) { // Edged: 4 - 5
avg[0] = evalZeroCrossing(values[4], values[5], iso);
avg[1] = 1.0;
avg[2] = 0.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][6] == edgeGroup) { // Edged: 5 - 6
avg[0] = 1.0;
avg[1] = 1.0;
avg[2] = evalZeroCrossing(values[5], values[6], iso);
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][7] == edgeGroup) { // Edged: 7 - 6
avg[0] = evalZeroCrossing(values[7], values[6], iso);
avg[1] = 1.0;
avg[2] = 1.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][8] == edgeGroup) { // Edged: 4 - 7
avg[0] = 0.0;
avg[1] = 1.0;
avg[2] = evalZeroCrossing(values[4], values[7], iso);
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][9] == edgeGroup) { // Edged: 0 - 4
avg[0] = 0.0;
avg[1] = evalZeroCrossing(values[0], values[4], iso);
avg[2] = 0.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][10] == edgeGroup) { // Edged: 1 - 5
avg[0] = 1.0;
avg[1] = evalZeroCrossing(values[1], values[5], iso);
avg[2] = 0.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][11] == edgeGroup) { // Edged: 2 - 6
avg[0] = 1.0;
avg[1] = evalZeroCrossing(values[2], values[6], iso);
avg[2] = 1.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
if (sEdgeGroupTable[signs][12] == edgeGroup) { // Edged: 3 - 7
avg[0] = 0.0;
avg[1] = evalZeroCrossing(values[3], values[7], iso);
avg[2] = 1.0;
samples.push_back(avg);
weights.push_back((avg-p).lengthSqr());
}
double minWeight = std::numeric_limits<double>::max();
double maxWeight = -std::numeric_limits<double>::max();
for (size_t i = 0, I = weights.size(); i < I; ++i) {
minWeight = std::min(minWeight, weights[i]);
maxWeight = std::max(maxWeight, weights[i]);
}
const double offset = maxWeight + minWeight * 0.1;
for (size_t i = 0, I = weights.size(); i < I; ++i) {
weights[i] = offset - weights[i];
}
double weightSum = 0.0;
for (size_t i = 0, I = weights.size(); i < I; ++i) {
weightSum += weights[i];
}
avg[0] = 0.0;
avg[1] = 0.0;
avg[2] = 0.0;
if (samples.size() > 1) {
for (size_t i = 0, I = samples.size(); i < I; ++i) {
avg += samples[i] * (weights[i] / weightSum);
}
} else {
avg = samples.front();
}
return avg;
}
/// @brief Computes the average cell points defined by the sign configuration
/// @c signs and the given corner values @c values.
inline void
computeCellPoints(std::vector<Vec3d>& points,
const std::vector<double>& values, unsigned char signs, double iso)
{
for (size_t n = 1, N = sEdgeGroupTable[signs][0] + 1; n < N; ++n) {
points.push_back(computePoint(values, signs, uint8_t(n), iso));
}
}
/// @brief Given a sign configuration @c lhsSigns and an edge group @c groupId,
/// finds the corresponding edge group in a different sign configuration
/// @c rhsSigns. Returns -1 if no match is found.
inline int
matchEdgeGroup(unsigned char groupId, unsigned char lhsSigns, unsigned char rhsSigns)
{
int id = -1;
for (size_t i = 1; i <= 12; ++i) {
if (sEdgeGroupTable[lhsSigns][i] == groupId && sEdgeGroupTable[rhsSigns][i] != 0) {
id = sEdgeGroupTable[rhsSigns][i];
break;
}
}
return id;
}
/// @brief Computes the average cell points defined by the sign configuration
/// @c signs and the given corner values @c values. Combines data from
/// two different level sets to eliminate seam lines when meshing
/// fractured segments.
inline void
computeCellPoints(std::vector<Vec3d>& points, std::vector<bool>& weightedPointMask,
const std::vector<double>& lhsValues, const std::vector<double>& rhsValues,
unsigned char lhsSigns, unsigned char rhsSigns,
double iso, size_t pointIdx, const uint32_t * seamPointArray)
{
for (size_t n = 1, N = sEdgeGroupTable[lhsSigns][0] + 1; n < N; ++n) {
int id = matchEdgeGroup(uint8_t(n), lhsSigns, rhsSigns);
if (id != -1) {
const unsigned char e = uint8_t(id);
const uint32_t& quantizedPoint = seamPointArray[pointIdx + (id - 1)];
if ((quantizedPoint & MASK_DIRTY_BIT) && !(quantizedPoint & MASK_INVALID_BIT)) {
Vec3d p = unpackPoint(quantizedPoint);
points.push_back(computeWeightedPoint(p, rhsValues, rhsSigns, e, iso));
weightedPointMask.push_back(true);
} else {
points.push_back(computePoint(rhsValues, rhsSigns, e, iso));
weightedPointMask.push_back(false);
}
} else {
points.push_back(computePoint(lhsValues, lhsSigns, uint8_t(n), iso));
weightedPointMask.push_back(false);
}
}
}
template <typename InputTreeType>
struct ComputePoints
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type;
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type;
using Index32LeafNodeType = typename Index32TreeType::LeafNodeType;
ComputePoints(Vec3s * pointArray,
const InputTreeType& inputTree,
const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes,
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
const std::unique_ptr<Index32[]>& leafNodeOffsets,
const math::Transform& xform,
double iso);
void setRefData(const InputTreeType& refInputTree,
const Index32TreeType& refPointIndexTree,
const Int16TreeType& refSignFlagsTree,
const uint32_t * quantizedSeamLinePoints,
uint8_t * seamLinePointsFlags);
void operator()(const tbb::blocked_range<size_t>&) const;
private:
Vec3s * const mPoints;
InputTreeType const * const mInputTree;
Index32LeafNodeType * const * const mPointIndexNodes;
Int16LeafNodeType const * const * const mSignFlagsNodes;
Index32 const * const mNodeOffsets;
math::Transform const mTransform;
double const mIsovalue;
// reference meshing data
InputTreeType const * mRefInputTree;
Index32TreeType const * mRefPointIndexTree;
Int16TreeType const * mRefSignFlagsTree;
uint32_t const * mQuantizedSeamLinePoints;
uint8_t * mSeamLinePointsFlags;
}; // struct ComputePoints
template <typename InputTreeType>
ComputePoints<InputTreeType>::ComputePoints(
Vec3s * pointArray,
const InputTreeType& inputTree,
const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes,
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
const std::unique_ptr<Index32[]>& leafNodeOffsets,
const math::Transform& xform,
double iso)
: mPoints(pointArray)
, mInputTree(&inputTree)
, mPointIndexNodes(pointIndexLeafNodes.empty() ? nullptr : &pointIndexLeafNodes.front())
, mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mNodeOffsets(leafNodeOffsets.get())
, mTransform(xform)
, mIsovalue(iso)
, mRefInputTree(nullptr)
, mRefPointIndexTree(nullptr)
, mRefSignFlagsTree(nullptr)
, mQuantizedSeamLinePoints(nullptr)
, mSeamLinePointsFlags(nullptr)
{
}
template <typename InputTreeType>
void
ComputePoints<InputTreeType>::setRefData(
const InputTreeType& refInputTree,
const Index32TreeType& refPointIndexTree,
const Int16TreeType& refSignFlagsTree,
const uint32_t * quantizedSeamLinePoints,
uint8_t * seamLinePointsFlags)
{
mRefInputTree = &refInputTree;
mRefPointIndexTree = &refPointIndexTree;
mRefSignFlagsTree = &refSignFlagsTree;
mQuantizedSeamLinePoints = quantizedSeamLinePoints;
mSeamLinePointsFlags = seamLinePointsFlags;
}
template <typename InputTreeType>
void
ComputePoints<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) const
{
using InputTreeAccessor = tree::ValueAccessor<const InputTreeType>;
using Index32TreeAccessor = tree::ValueAccessor<const Index32TreeType>;
using Int16TreeAccessor = tree::ValueAccessor<const Int16TreeType>;
using IndexType = typename Index32TreeType::ValueType;
using IndexArray = std::vector<Index>;
using IndexArrayMap = std::map<IndexType, IndexArray>;
InputTreeAccessor inputAcc(*mInputTree);
Vec3d xyz;
Coord ijk;
std::vector<Vec3d> points(4);
std::vector<bool> weightedPointMask(4);
std::vector<double> values(8), refValues(8);
const double iso = mIsovalue;
// reference data accessors
std::unique_ptr<InputTreeAccessor> refInputAcc;
std::unique_ptr<Index32TreeAccessor> refPointIndexAcc;
std::unique_ptr<Int16TreeAccessor> refSignFlagsAcc;
const bool hasReferenceData = mRefInputTree && mRefPointIndexTree && mRefSignFlagsTree;
if (hasReferenceData) {
refInputAcc.reset(new InputTreeAccessor(*mRefInputTree));
refPointIndexAcc.reset(new Index32TreeAccessor(*mRefPointIndexTree));
refSignFlagsAcc.reset(new Int16TreeAccessor(*mRefSignFlagsTree));
}
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
Index32LeafNodeType& pointIndexNode = *mPointIndexNodes[n];
const Coord& origin = pointIndexNode.origin();
const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
const InputLeafNodeType * inputNode = inputAcc.probeConstLeaf(origin);
// get reference data
const InputLeafNodeType * refInputNode = nullptr;
const Index32LeafNodeType * refPointIndexNode = nullptr;
const Int16LeafNodeType * refSignFlagsNode = nullptr;
if (hasReferenceData) {
refInputNode = refInputAcc->probeConstLeaf(origin);
refPointIndexNode = refPointIndexAcc->probeConstLeaf(origin);
refSignFlagsNode = refSignFlagsAcc->probeConstLeaf(origin);
}
IndexType pointOffset = IndexType(mNodeOffsets[n]);
IndexArrayMap regions;
for (auto it = pointIndexNode.beginValueOn(); it; ++it) {
const Index offset = it.pos();
const IndexType id = it.getValue();
if (id != 0) {
if (id != IndexType(util::INVALID_IDX)) {
regions[id].push_back(offset);
}
continue;
}
pointIndexNode.setValueOnly(offset, pointOffset);
const Int16 flags = signFlagsNode.getValue(offset);
uint8_t signs = uint8_t(SIGNS & flags);
uint8_t refSigns = 0;
if ((flags & SEAM) && refPointIndexNode && refSignFlagsNode) {
if (refSignFlagsNode->isValueOn(offset)) {
refSigns = uint8_t(SIGNS & refSignFlagsNode->getValue(offset));
}
}
ijk = Index32LeafNodeType::offsetToLocalCoord(offset);
const bool inclusiveCell = inputNode &&
ijk[0] < int(Index32LeafNodeType::DIM - 1) &&
ijk[1] < int(Index32LeafNodeType::DIM - 1) &&
ijk[2] < int(Index32LeafNodeType::DIM - 1);
ijk += origin;
if (inclusiveCell) collectCornerValues(*inputNode, offset, values);
else collectCornerValues(inputAcc, ijk, values);
points.clear();
weightedPointMask.clear();
if (refSigns == 0) {
computeCellPoints(points, values, signs, iso);
} else {
if (inclusiveCell && refInputNode) {
collectCornerValues(*refInputNode, offset, refValues);
} else {
collectCornerValues(*refInputAcc, ijk, refValues);
}
computeCellPoints(points, weightedPointMask, values, refValues, signs, refSigns,
iso, refPointIndexNode->getValue(offset), mQuantizedSeamLinePoints);
}
xyz[0] = double(ijk[0]);
xyz[1] = double(ijk[1]);
xyz[2] = double(ijk[2]);
for (size_t i = 0, I = points.size(); i < I; ++i) {
Vec3d& point = points[i];
// Checks for both NaN and inf vertex positions, i.e. any value that is not finite.
if (!std::isfinite(point[0]) ||
!std::isfinite(point[1]) ||
!std::isfinite(point[2]))
{
OPENVDB_THROW(ValueError,
"VolumeToMesh encountered NaNs or infs in the input VDB!"
" Hint: Check the input and consider using the \"Diagnostics\" tool "
"to detect and resolve the NaNs.");
}
point += xyz;
point = mTransform.indexToWorld(point);
Vec3s& pos = mPoints[pointOffset];
pos[0] = float(point[0]);
pos[1] = float(point[1]);
pos[2] = float(point[2]);
if (mSeamLinePointsFlags && !weightedPointMask.empty() && weightedPointMask[i]) {
mSeamLinePointsFlags[pointOffset] = uint8_t(1);
}
++pointOffset;
}
}
// generate collapsed region points
for (typename IndexArrayMap::iterator it = regions.begin(); it != regions.end(); ++it) {
Vec3d avg(0.0), point(0.0);
int count = 0;
const IndexArray& voxels = it->second;
for (size_t i = 0, I = voxels.size(); i < I; ++i) {
const Index offset = voxels[i];
ijk = Index32LeafNodeType::offsetToLocalCoord(offset);
const bool inclusiveCell = inputNode &&
ijk[0] < int(Index32LeafNodeType::DIM - 1) &&
ijk[1] < int(Index32LeafNodeType::DIM - 1) &&
ijk[2] < int(Index32LeafNodeType::DIM - 1);
ijk += origin;
pointIndexNode.setValueOnly(offset, pointOffset);
uint8_t signs = uint8_t(SIGNS & signFlagsNode.getValue(offset));
if (inclusiveCell) collectCornerValues(*inputNode, offset, values);
else collectCornerValues(inputAcc, ijk, values);
points.clear();
computeCellPoints(points, values, signs, iso);
avg[0] += double(ijk[0]) + points[0][0];
avg[1] += double(ijk[1]) + points[0][1];
avg[2] += double(ijk[2]) + points[0][2];
++count;
}
if (count > 1) {
double w = 1.0 / double(count);
avg[0] *= w;
avg[1] *= w;
avg[2] *= w;
}
avg = mTransform.indexToWorld(avg);
Vec3s& pos = mPoints[pointOffset];
pos[0] = float(avg[0]);
pos[1] = float(avg[1]);
pos[2] = float(avg[2]);
++pointOffset;
}
}
} // ComputePoints::operator()
////////////////////////////////////////
template <typename InputTreeType>
struct SeamLineWeights
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type;
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type;
using Index32LeafNodeType = typename Index32TreeType::LeafNodeType;
SeamLineWeights(const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
const InputTreeType& inputTree,
const Index32TreeType& refPointIndexTree,
const Int16TreeType& refSignFlagsTree,
uint32_t * quantizedPoints,
InputValueType iso)
: mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mInputTree(&inputTree)
, mRefPointIndexTree(&refPointIndexTree)
, mRefSignFlagsTree(&refSignFlagsTree)
, mQuantizedPoints(quantizedPoints)
, mIsovalue(iso)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
tree::ValueAccessor<const InputTreeType> inputTreeAcc(*mInputTree);
tree::ValueAccessor<const Index32TreeType> pointIndexTreeAcc(*mRefPointIndexTree);
tree::ValueAccessor<const Int16TreeType> signFlagsTreeAcc(*mRefSignFlagsTree);
std::vector<double> values(8);
const double iso = double(mIsovalue);
Coord ijk;
Vec3d pos;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
const Coord& origin = signFlagsNode.origin();
const Int16LeafNodeType * refSignNode = signFlagsTreeAcc.probeConstLeaf(origin);
if (!refSignNode) continue;
const Index32LeafNodeType* refPointIndexNode =
pointIndexTreeAcc.probeConstLeaf(origin);
if (!refPointIndexNode) continue;
const InputLeafNodeType * inputNode = inputTreeAcc.probeConstLeaf(origin);
for (typename Int16LeafNodeType::ValueOnCIter it = signFlagsNode.cbeginValueOn();
it; ++it)
{
const Index offset = it.pos();
ijk = Index32LeafNodeType::offsetToLocalCoord(offset);
const bool inclusiveCell = inputNode &&
ijk[0] < int(Index32LeafNodeType::DIM - 1) &&
ijk[1] < int(Index32LeafNodeType::DIM - 1) &&
ijk[2] < int(Index32LeafNodeType::DIM - 1);
ijk += origin;
if ((it.getValue() & SEAM) && refSignNode->isValueOn(offset)) {
uint8_t lhsSigns = uint8_t(SIGNS & it.getValue());
uint8_t rhsSigns = uint8_t(SIGNS & refSignNode->getValue(offset));
if (inclusiveCell) {
collectCornerValues(*inputNode, offset, values);
} else {
collectCornerValues(inputTreeAcc, ijk, values);
}
for (unsigned i = 1, I = sEdgeGroupTable[lhsSigns][0] + 1; i < I; ++i) {
int id = matchEdgeGroup(uint8_t(i), lhsSigns, rhsSigns);
if (id != -1) {
uint32_t& data = mQuantizedPoints[
refPointIndexNode->getValue(offset) + (id - 1)];
if (!(data & MASK_DIRTY_BIT)) {
int smaples = computeMaskedPoint(
pos, values, lhsSigns, rhsSigns, uint8_t(i), iso);
if (smaples > 0) data = packPoint(pos);
else data = MASK_INVALID_BIT;
data |= MASK_DIRTY_BIT;
}
}
} // end point group loop
}
} // end value on loop
} // end leaf node loop
}
private:
Int16LeafNodeType const * const * const mSignFlagsNodes;
InputTreeType const * const mInputTree;
Index32TreeType const * const mRefPointIndexTree;
Int16TreeType const * const mRefSignFlagsTree;
uint32_t * const mQuantizedPoints;
InputValueType const mIsovalue;
}; // struct SeamLineWeights
template <typename TreeType>
struct SetSeamLineFlags
{
using LeafNodeType = typename TreeType::LeafNodeType;
SetSeamLineFlags(const std::vector<LeafNodeType*>& signFlagsLeafNodes,
const TreeType& refSignFlagsTree)
: mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mRefSignFlagsTree(&refSignFlagsTree)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
tree::ValueAccessor<const TreeType> refSignFlagsTreeAcc(*mRefSignFlagsTree);
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
LeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
const Coord& origin = signFlagsNode.origin();
const LeafNodeType * refSignNode = refSignFlagsTreeAcc.probeConstLeaf(origin);
if (!refSignNode) continue;
for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) {
const Index offset = it.pos();
uint8_t rhsSigns = uint8_t(refSignNode->getValue(offset) & SIGNS);
if (sEdgeGroupTable[rhsSigns][0] > 0) {
const typename LeafNodeType::ValueType value = it.getValue();
uint8_t lhsSigns = uint8_t(value & SIGNS);
if (rhsSigns != lhsSigns) {
signFlagsNode.setValueOnly(offset, value | SEAM);
}
}
} // end value on loop
} // end leaf node loop
}
private:
LeafNodeType * const * const mSignFlagsNodes;
TreeType const * const mRefSignFlagsTree;
}; // struct SetSeamLineFlags
template <typename BoolTreeType, typename SignDataType>
struct TransferSeamLineFlags
{
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
using SignDataTreeType = typename BoolTreeType::template ValueConverter<SignDataType>::Type;
using SignDataLeafNodeType = typename SignDataTreeType::LeafNodeType;
TransferSeamLineFlags(const std::vector<SignDataLeafNodeType*>& signFlagsLeafNodes,
const BoolTreeType& maskTree)
: mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mMaskTree(&maskTree)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree);
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
SignDataLeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
const Coord& origin = signFlagsNode.origin();
const BoolLeafNodeType * maskNode = maskAcc.probeConstLeaf(origin);
if (!maskNode) continue;
using ValueOnCIter = typename SignDataLeafNodeType::ValueOnCIter;
for (ValueOnCIter it = signFlagsNode.cbeginValueOn(); it; ++it) {
const Index offset = it.pos();
if (maskNode->isValueOn(offset)) {
signFlagsNode.setValueOnly(offset, it.getValue() | SEAM);
}
} // end value on loop
} // end leaf node loop
}
private:
SignDataLeafNodeType * const * const mSignFlagsNodes;
BoolTreeType const * const mMaskTree;
}; // struct TransferSeamLineFlags
template <typename TreeType>
struct MaskSeamLineVoxels
{
using LeafNodeType = typename TreeType::LeafNodeType;
using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type;
MaskSeamLineVoxels(const std::vector<LeafNodeType*>& signFlagsLeafNodes,
const TreeType& signFlagsTree,
BoolTreeType& mask)
: mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mSignFlagsTree(&signFlagsTree)
, mTempMask(false)
, mMask(&mask)
{
}
MaskSeamLineVoxels(MaskSeamLineVoxels& rhs, tbb::split)
: mSignFlagsNodes(rhs.mSignFlagsNodes)
, mSignFlagsTree(rhs.mSignFlagsTree)
, mTempMask(false)
, mMask(&mTempMask)
{
}
void join(MaskSeamLineVoxels& rhs) { mMask->merge(*rhs.mMask); }
void operator()(const tbb::blocked_range<size_t>& range)
{
using ValueOnCIter = typename LeafNodeType::ValueOnCIter;
using ValueType = typename LeafNodeType::ValueType;
tree::ValueAccessor<const TreeType> signFlagsAcc(*mSignFlagsTree);
tree::ValueAccessor<BoolTreeType> maskAcc(*mMask);
Coord ijk(0, 0, 0);
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
LeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
for (ValueOnCIter it = signFlagsNode.cbeginValueOn(); it; ++it) {
const ValueType flags = it.getValue();
if (!(flags & SEAM) && (flags & EDGES)) {
ijk = it.getCoord();
bool isSeamLineVoxel = false;
if (flags & XEDGE) {
ijk[1] -= 1;
isSeamLineVoxel = (signFlagsAcc.getValue(ijk) & SEAM);
ijk[2] -= 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[1] += 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[2] += 1;
}
if (!isSeamLineVoxel && flags & YEDGE) {
ijk[2] -= 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[0] -= 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[2] += 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[0] += 1;
}
if (!isSeamLineVoxel && flags & ZEDGE) {
ijk[1] -= 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[0] -= 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[1] += 1;
isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM);
ijk[0] += 1;
}
if (isSeamLineVoxel) {
maskAcc.setValue(it.getCoord(), true);
}
}
} // end value on loop
} // end leaf node loop
}
private:
LeafNodeType * const * const mSignFlagsNodes;
TreeType const * const mSignFlagsTree;
BoolTreeType mTempMask;
BoolTreeType * const mMask;
}; // struct MaskSeamLineVoxels
template<typename SignDataTreeType>
inline void
markSeamLineData(SignDataTreeType& signFlagsTree, const SignDataTreeType& refSignFlagsTree)
{
using SignDataType = typename SignDataTreeType::ValueType;
using SignDataLeafNodeType = typename SignDataTreeType::LeafNodeType;
using BoolTreeType = typename SignDataTreeType::template ValueConverter<bool>::Type;
std::vector<SignDataLeafNodeType*> signFlagsLeafNodes;
signFlagsTree.getNodes(signFlagsLeafNodes);
const tbb::blocked_range<size_t> nodeRange(0, signFlagsLeafNodes.size());
tbb::parallel_for(nodeRange,
SetSeamLineFlags<SignDataTreeType>(signFlagsLeafNodes, refSignFlagsTree));
BoolTreeType seamLineMaskTree(false);
MaskSeamLineVoxels<SignDataTreeType>
maskSeamLine(signFlagsLeafNodes, signFlagsTree, seamLineMaskTree);
tbb::parallel_reduce(nodeRange, maskSeamLine);
tbb::parallel_for(nodeRange,
TransferSeamLineFlags<BoolTreeType, SignDataType>(signFlagsLeafNodes, seamLineMaskTree));
}
////////////////////////////////////////
template <typename InputGridType>
struct MergeVoxelRegions
{
using InputTreeType = typename InputGridType::TreeType;
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using FloatTreeType = typename InputTreeType::template ValueConverter<float>::Type;
using FloatLeafNodeType = typename FloatTreeType::LeafNodeType;
using FloatGridType = Grid<FloatTreeType>;
using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type;
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type;
using Index32LeafNodeType = typename Index32TreeType::LeafNodeType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MergeVoxelRegions(const InputGridType& inputGrid,
const Index32TreeType& pointIndexTree,
const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes,
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
InputValueType iso,
float adaptivity,
bool invertSurfaceOrientation);
void setSpatialAdaptivity(const FloatGridType& grid)
{
mSpatialAdaptivityTree = &grid.tree();
mSpatialAdaptivityTransform = &grid.transform();
}
void setAdaptivityMask(const BoolTreeType& mask)
{
mMaskTree = &mask;
}
void setRefSignFlagsData(const Int16TreeType& signFlagsData, float internalAdaptivity)
{
mRefSignFlagsTree = &signFlagsData;
mInternalAdaptivity = internalAdaptivity;
}
void operator()(const tbb::blocked_range<size_t>&) const;
private:
InputTreeType const * const mInputTree;
math::Transform const * const mInputTransform;
Index32TreeType const * const mPointIndexTree;
Index32LeafNodeType * const * const mPointIndexNodes;
Int16LeafNodeType const * const * const mSignFlagsNodes;
InputValueType mIsovalue;
float mSurfaceAdaptivity, mInternalAdaptivity;
bool mInvertSurfaceOrientation;
FloatTreeType const * mSpatialAdaptivityTree;
BoolTreeType const * mMaskTree;
Int16TreeType const * mRefSignFlagsTree;
math::Transform const * mSpatialAdaptivityTransform;
}; // struct MergeVoxelRegions
template <typename InputGridType>
MergeVoxelRegions<InputGridType>::MergeVoxelRegions(
const InputGridType& inputGrid,
const Index32TreeType& pointIndexTree,
const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes,
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
InputValueType iso,
float adaptivity,
bool invertSurfaceOrientation)
: mInputTree(&inputGrid.tree())
, mInputTransform(&inputGrid.transform())
, mPointIndexTree(&pointIndexTree)
, mPointIndexNodes(pointIndexLeafNodes.empty() ? nullptr : &pointIndexLeafNodes.front())
, mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mIsovalue(iso)
, mSurfaceAdaptivity(adaptivity)
, mInternalAdaptivity(adaptivity)
, mInvertSurfaceOrientation(invertSurfaceOrientation)
, mSpatialAdaptivityTree(nullptr)
, mMaskTree(nullptr)
, mRefSignFlagsTree(nullptr)
, mSpatialAdaptivityTransform(nullptr)
{
}
template <typename InputGridType>
void
MergeVoxelRegions<InputGridType>::operator()(const tbb::blocked_range<size_t>& range) const
{
using Vec3sType = math::Vec3<float>;
using Vec3sLeafNodeType = typename InputLeafNodeType::template ValueConverter<Vec3sType>::Type;
using InputTreeAccessor = tree::ValueAccessor<const InputTreeType>;
using FloatTreeAccessor = tree::ValueAccessor<const FloatTreeType>;
using Index32TreeAccessor = tree::ValueAccessor<const Index32TreeType>;
using Int16TreeAccessor = tree::ValueAccessor<const Int16TreeType>;
using BoolTreeAccessor = tree::ValueAccessor<const BoolTreeType>;
std::unique_ptr<FloatTreeAccessor> spatialAdaptivityAcc;
if (mSpatialAdaptivityTree && mSpatialAdaptivityTransform) {
spatialAdaptivityAcc.reset(new FloatTreeAccessor(*mSpatialAdaptivityTree));
}
std::unique_ptr<BoolTreeAccessor> maskAcc;
if (mMaskTree) {
maskAcc.reset(new BoolTreeAccessor(*mMaskTree));
}
std::unique_ptr<Int16TreeAccessor> refSignFlagsAcc;
if (mRefSignFlagsTree) {
refSignFlagsAcc.reset(new Int16TreeAccessor(*mRefSignFlagsTree));
}
InputTreeAccessor inputAcc(*mInputTree);
Index32TreeAccessor pointIndexAcc(*mPointIndexTree);
BoolLeafNodeType mask;
const bool invertGradientDir = mInvertSurfaceOrientation || isBoolValue<InputValueType>();
std::unique_ptr<Vec3sLeafNodeType> gradientNode;
Coord ijk, end;
const int LeafDim = InputLeafNodeType::DIM;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
mask.setValuesOff();
const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n];
Index32LeafNodeType& pointIndexNode = *mPointIndexNodes[n];
const Coord& origin = pointIndexNode.origin();
end[0] = origin[0] + LeafDim;
end[1] = origin[1] + LeafDim;
end[2] = origin[2] + LeafDim;
// Mask off seam line adjacent voxels
if (maskAcc) {
const BoolLeafNodeType* maskLeaf = maskAcc->probeConstLeaf(origin);
if (maskLeaf != nullptr) {
for (typename BoolLeafNodeType::ValueOnCIter it = maskLeaf->cbeginValueOn();
it; ++it)
{
mask.setActiveState(it.getCoord() & ~1u, true);
}
}
}
float adaptivity = (refSignFlagsAcc && !refSignFlagsAcc->probeConstLeaf(origin)) ?
mInternalAdaptivity : mSurfaceAdaptivity;
bool useGradients = adaptivity < 1.0f;
// Set region adaptivity
FloatLeafNodeType adaptivityLeaf(origin, adaptivity);
if (spatialAdaptivityAcc) {
useGradients = false;
for (Index offset = 0; offset < FloatLeafNodeType::NUM_VALUES; ++offset) {
ijk = adaptivityLeaf.offsetToGlobalCoord(offset);
ijk = mSpatialAdaptivityTransform->worldToIndexCellCentered(
mInputTransform->indexToWorld(ijk));
float weight = spatialAdaptivityAcc->getValue(ijk);
float adaptivityValue = weight * adaptivity;
if (adaptivityValue < 1.0f) useGradients = true;
adaptivityLeaf.setValueOnly(offset, adaptivityValue);
}
}
// Mask off ambiguous voxels
for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) {
const Int16 flags = it.getValue();
const unsigned char signs = static_cast<unsigned char>(SIGNS & int(flags));
if ((flags & SEAM) || !sAdaptable[signs] || sEdgeGroupTable[signs][0] > 1) {
mask.setActiveState(it.getCoord() & ~1u, true);
} else if (flags & EDGES) {
bool maskRegion = false;
ijk = it.getCoord();
if (!pointIndexAcc.isValueOn(ijk)) maskRegion = true;
if (!maskRegion && flags & XEDGE) {
ijk[1] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[2] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[1] += 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[2] += 1;
}
if (!maskRegion && flags & YEDGE) {
ijk[2] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[0] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[2] += 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[0] += 1;
}
if (!maskRegion && flags & ZEDGE) {
ijk[1] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[0] -= 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[1] += 1;
if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true;
ijk[0] += 1;
}
if (maskRegion) {
mask.setActiveState(it.getCoord() & ~1u, true);
}
}
}
// Mask off topologically ambiguous 2x2x2 voxel sub-blocks
int dim = 2;
for (ijk[0] = origin[0]; ijk[0] < end[0]; ijk[0] += dim) {
for (ijk[1] = origin[1]; ijk[1] < end[1]; ijk[1] += dim) {
for (ijk[2] = origin[2]; ijk[2] < end[2]; ijk[2] += dim) {
if (!mask.isValueOn(ijk) && isNonManifold(inputAcc, ijk, mIsovalue, dim)) {
mask.setActiveState(ijk, true);
}
}
}
}
// Compute the gradient for the remaining voxels
if (useGradients) {
if (gradientNode) {
gradientNode->setValuesOff();
} else {
gradientNode.reset(new Vec3sLeafNodeType());
}
for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) {
ijk = it.getCoord();
if (!mask.isValueOn(ijk & ~1u)) {
Vec3sType dir(math::ISGradient<math::CD_2ND>::result(inputAcc, ijk));
dir.normalize();
if (invertGradientDir) {
dir = -dir;
}
gradientNode->setValueOn(it.pos(), dir);
}
}
}
// Merge regions
int regionId = 1;
for ( ; dim <= LeafDim; dim = dim << 1) {
const unsigned coordMask = ~((dim << 1) - 1);
for (ijk[0] = origin[0]; ijk[0] < end[0]; ijk[0] += dim) {
for (ijk[1] = origin[1]; ijk[1] < end[1]; ijk[1] += dim) {
for (ijk[2] = origin[2]; ijk[2] < end[2]; ijk[2] += dim) {
adaptivity = adaptivityLeaf.getValue(ijk);
if (mask.isValueOn(ijk)
|| isNonManifold(inputAcc, ijk, mIsovalue, dim)
|| (useGradients && !isMergable(*gradientNode, ijk, dim, adaptivity)))
{
mask.setActiveState(ijk & coordMask, true);
} else {
mergeVoxels(pointIndexNode, ijk, dim, regionId++);
}
}
}
}
}
}
} // MergeVoxelRegions::operator()
////////////////////////////////////////
// Constructs qudas
struct UniformPrimBuilder
{
UniformPrimBuilder(): mIdx(0), mPolygonPool(nullptr) {}
void init(const size_t upperBound, PolygonPool& quadPool)
{
mPolygonPool = &quadPool;
mPolygonPool->resetQuads(upperBound);
mIdx = 0;
}
template<typename IndexType>
void addPrim(const math::Vec4<IndexType>& verts, bool reverse, char flags = 0)
{
if (!reverse) {
mPolygonPool->quad(mIdx) = verts;
} else {
Vec4I& quad = mPolygonPool->quad(mIdx);
quad[0] = verts[3];
quad[1] = verts[2];
quad[2] = verts[1];
quad[3] = verts[0];
}
mPolygonPool->quadFlags(mIdx) = flags;
++mIdx;
}
void done()
{
mPolygonPool->trimQuads(mIdx);
}
private:
size_t mIdx;
PolygonPool* mPolygonPool;
};
// Constructs qudas and triangles
struct AdaptivePrimBuilder
{
AdaptivePrimBuilder() : mQuadIdx(0), mTriangleIdx(0), mPolygonPool(nullptr) {}
void init(const size_t upperBound, PolygonPool& polygonPool)
{
mPolygonPool = &polygonPool;
mPolygonPool->resetQuads(upperBound);
mPolygonPool->resetTriangles(upperBound);
mQuadIdx = 0;
mTriangleIdx = 0;
}
template<typename IndexType>
void addPrim(const math::Vec4<IndexType>& verts, bool reverse, char flags = 0)
{
if (verts[0] != verts[1] && verts[0] != verts[2] && verts[0] != verts[3]
&& verts[1] != verts[2] && verts[1] != verts[3] && verts[2] != verts[3]) {
mPolygonPool->quadFlags(mQuadIdx) = flags;
addQuad(verts, reverse);
} else if (
verts[0] == verts[3] &&
verts[1] != verts[2] &&
verts[1] != verts[0] &&
verts[2] != verts[0]) {
mPolygonPool->triangleFlags(mTriangleIdx) = flags;
addTriangle(verts[0], verts[1], verts[2], reverse);
} else if (
verts[1] == verts[2] &&
verts[0] != verts[3] &&
verts[0] != verts[1] &&
verts[3] != verts[1]) {
mPolygonPool->triangleFlags(mTriangleIdx) = flags;
addTriangle(verts[0], verts[1], verts[3], reverse);
} else if (
verts[0] == verts[1] &&
verts[2] != verts[3] &&
verts[2] != verts[0] &&
verts[3] != verts[0]) {
mPolygonPool->triangleFlags(mTriangleIdx) = flags;
addTriangle(verts[0], verts[2], verts[3], reverse);
} else if (
verts[2] == verts[3] &&
verts[0] != verts[1] &&
verts[0] != verts[2] &&
verts[1] != verts[2]) {
mPolygonPool->triangleFlags(mTriangleIdx) = flags;
addTriangle(verts[0], verts[1], verts[2], reverse);
}
}
void done()
{
mPolygonPool->trimQuads(mQuadIdx, /*reallocate=*/true);
mPolygonPool->trimTrinagles(mTriangleIdx, /*reallocate=*/true);
}
private:
template<typename IndexType>
void addQuad(const math::Vec4<IndexType>& verts, bool reverse)
{
if (!reverse) {
mPolygonPool->quad(mQuadIdx) = verts;
} else {
Vec4I& quad = mPolygonPool->quad(mQuadIdx);
quad[0] = verts[3];
quad[1] = verts[2];
quad[2] = verts[1];
quad[3] = verts[0];
}
++mQuadIdx;
}
void addTriangle(unsigned v0, unsigned v1, unsigned v2, bool reverse)
{
Vec3I& prim = mPolygonPool->triangle(mTriangleIdx);
prim[1] = v1;
if (!reverse) {
prim[0] = v0;
prim[2] = v2;
} else {
prim[0] = v2;
prim[2] = v0;
}
++mTriangleIdx;
}
size_t mQuadIdx, mTriangleIdx;
PolygonPool *mPolygonPool;
};
template<typename SignAccT, typename IdxAccT, typename PrimBuilder>
inline void
constructPolygons(
bool invertSurfaceOrientation,
Int16 flags,
Int16 refFlags,
const Vec3i& offsets,
const Coord& ijk,
const SignAccT& signAcc,
const IdxAccT& idxAcc,
PrimBuilder& mesher)
{
using IndexType = typename IdxAccT::ValueType;
IndexType v0 = IndexType(util::INVALID_IDX);
const bool isActive = idxAcc.probeValue(ijk, v0);
if (isActive == false || v0 == IndexType(util::INVALID_IDX)) return;
char tag[2];
tag[0] = (flags & SEAM) ? POLYFLAG_FRACTURE_SEAM : 0;
tag[1] = tag[0] | char(POLYFLAG_EXTERIOR);
bool isInside = flags & INSIDE;
isInside = invertSurfaceOrientation ? !isInside : isInside;
Coord coord = ijk;
math::Vec4<IndexType> quad(0,0,0,0);
if (flags & XEDGE) {
quad[0] = v0 + offsets[0];
// i, j-1, k
coord[1]--;
bool activeValues = idxAcc.probeValue(coord, quad[1]);
uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][5] - 1 : 0;
// i, j-1, k-1
coord[2]--;
activeValues = activeValues && idxAcc.probeValue(coord, quad[2]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][7] - 1 : 0;
// i, j, k-1
coord[1]++;
activeValues = activeValues && idxAcc.probeValue(coord, quad[3]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][3] - 1 : 0;
if (activeValues) {
mesher.addPrim(quad, isInside, tag[bool(refFlags & XEDGE)]);
}
coord[2]++; // i, j, k
}
if (flags & YEDGE) {
quad[0] = v0 + offsets[1];
// i, j, k-1
coord[2]--;
bool activeValues = idxAcc.probeValue(coord, quad[1]);
uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][12] - 1 : 0;
// i-1, j, k-1
coord[0]--;
activeValues = activeValues && idxAcc.probeValue(coord, quad[2]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][11] - 1 : 0;
// i-1, j, k
coord[2]++;
activeValues = activeValues && idxAcc.probeValue(coord, quad[3]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][10] - 1 : 0;
if (activeValues) {
mesher.addPrim(quad, isInside, tag[bool(refFlags & YEDGE)]);
}
coord[0]++; // i, j, k
}
if (flags & ZEDGE) {
quad[0] = v0 + offsets[2];
// i, j-1, k
coord[1]--;
bool activeValues = idxAcc.probeValue(coord, quad[1]);
uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][8] - 1 : 0;
// i-1, j-1, k
coord[0]--;
activeValues = activeValues && idxAcc.probeValue(coord, quad[2]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][6] - 1 : 0;
// i-1, j, k
coord[1]++;
activeValues = activeValues && idxAcc.probeValue(coord, quad[3]);
cell = uint8_t(SIGNS & signAcc.getValue(coord));
quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][2] - 1 : 0;
if (activeValues) {
mesher.addPrim(quad, !isInside, tag[bool(refFlags & ZEDGE)]);
}
}
}
////////////////////////////////////////
template<typename InputTreeType>
struct MaskTileBorders
{
using InputValueType = typename InputTreeType::ValueType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
MaskTileBorders(const InputTreeType& inputTree, InputValueType iso,
BoolTreeType& mask, const Vec4i* tileArray)
: mInputTree(&inputTree)
, mIsovalue(iso)
, mTempMask(false)
, mMask(&mask)
, mTileArray(tileArray)
{
}
MaskTileBorders(MaskTileBorders& rhs, tbb::split)
: mInputTree(rhs.mInputTree)
, mIsovalue(rhs.mIsovalue)
, mTempMask(false)
, mMask(&mTempMask)
, mTileArray(rhs.mTileArray)
{
}
void join(MaskTileBorders& rhs) { mMask->merge(*rhs.mMask); }
void operator()(const tbb::blocked_range<size_t>&);
private:
InputTreeType const * const mInputTree;
InputValueType const mIsovalue;
BoolTreeType mTempMask;
BoolTreeType * const mMask;
Vec4i const * const mTileArray;
}; // MaskTileBorders
template<typename InputTreeType>
void
MaskTileBorders<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range)
{
tree::ValueAccessor<const InputTreeType> inputTreeAcc(*mInputTree);
CoordBBox region, bbox;
Coord ijk, nijk;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
const Vec4i& tile = mTileArray[n];
bbox.min()[0] = tile[0];
bbox.min()[1] = tile[1];
bbox.min()[2] = tile[2];
bbox.max() = bbox.min();
bbox.max().offset(tile[3]);
InputValueType value = mInputTree->background();
const bool isInside = isInsideValue(inputTreeAcc.getValue(bbox.min()), mIsovalue);
const int valueDepth = inputTreeAcc.getValueDepth(bbox.min());
// eval x-edges
ijk = bbox.max();
nijk = ijk;
++nijk[0];
bool processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) {
processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue);
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[0] = region.max()[0] = ijk[0];
mMask->fill(region, false);
}
ijk = bbox.min();
--ijk[0];
processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) {
processRegion = (!inputTreeAcc.probeValue(ijk, value)
&& isInside != isInsideValue(value, mIsovalue));
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[0] = region.max()[0] = ijk[0];
mMask->fill(region, false);
}
// eval y-edges
ijk = bbox.max();
nijk = ijk;
++nijk[1];
processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) {
processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue);
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[1] = region.max()[1] = ijk[1];
mMask->fill(region, false);
}
ijk = bbox.min();
--ijk[1];
processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) {
processRegion = (!inputTreeAcc.probeValue(ijk, value)
&& isInside != isInsideValue(value, mIsovalue));
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[1] = region.max()[1] = ijk[1];
mMask->fill(region, false);
}
// eval z-edges
ijk = bbox.max();
nijk = ijk;
++nijk[2];
processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) {
processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue);
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[2] = region.max()[2] = ijk[2];
mMask->fill(region, false);
}
ijk = bbox.min();
--ijk[2];
processRegion = true;
if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) {
processRegion = (!inputTreeAcc.probeValue(ijk, value)
&& isInside != isInsideValue(value, mIsovalue));
}
if (processRegion) {
region = bbox;
region.expand(1);
region.min()[2] = region.max()[2] = ijk[2];
mMask->fill(region, false);
}
}
} // MaskTileBorders::operator()
template<typename InputTreeType>
inline void
maskActiveTileBorders(const InputTreeType& inputTree, typename InputTreeType::ValueType iso,
typename InputTreeType::template ValueConverter<bool>::Type& mask)
{
typename InputTreeType::ValueOnCIter tileIter(inputTree);
tileIter.setMaxDepth(InputTreeType::ValueOnCIter::LEAF_DEPTH - 1);
size_t tileCount = 0;
for ( ; tileIter; ++tileIter) {
++tileCount;
}
if (tileCount > 0) {
std::unique_ptr<Vec4i[]> tiles(new Vec4i[tileCount]);
CoordBBox bbox;
size_t index = 0;
tileIter = inputTree.cbeginValueOn();
tileIter.setMaxDepth(InputTreeType::ValueOnCIter::LEAF_DEPTH - 1);
for (; tileIter; ++tileIter) {
Vec4i& tile = tiles[index++];
tileIter.getBoundingBox(bbox);
tile[0] = bbox.min()[0];
tile[1] = bbox.min()[1];
tile[2] = bbox.min()[2];
tile[3] = bbox.max()[0] - bbox.min()[0];
}
MaskTileBorders<InputTreeType> op(inputTree, iso, mask, tiles.get());
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, tileCount), op);
}
}
////////////////////////////////////////
// Utility class for the volumeToMesh wrapper
class PointListCopy
{
public:
PointListCopy(const PointList& pointsIn, std::vector<Vec3s>& pointsOut)
: mPointsIn(pointsIn) , mPointsOut(pointsOut)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
for (size_t n = range.begin(); n < range.end(); ++n) {
mPointsOut[n] = mPointsIn[n];
}
}
private:
const PointList& mPointsIn;
std::vector<Vec3s>& mPointsOut;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
struct LeafNodeVoxelOffsets
{
using IndexVector = std::vector<Index>;
template<typename LeafNodeType>
void constructOffsetList();
/// Return internal core voxel offsets.
const IndexVector& core() const { return mCore; }
/// Return front face voxel offsets.
const IndexVector& minX() const { return mMinX; }
/// Return back face voxel offsets.
const IndexVector& maxX() const { return mMaxX; }
/// Return bottom face voxel offsets.
const IndexVector& minY() const { return mMinY; }
/// Return top face voxel offsets.
const IndexVector& maxY() const { return mMaxY; }
/// Return left face voxel offsets.
const IndexVector& minZ() const { return mMinZ; }
/// Return right face voxel offsets.
const IndexVector& maxZ() const { return mMaxZ; }
/// Return voxel offsets with internal neighbours in x + 1.
const IndexVector& internalNeighborsX() const { return mInternalNeighborsX; }
/// Return voxel offsets with internal neighbours in y + 1.
const IndexVector& internalNeighborsY() const { return mInternalNeighborsY; }
/// Return voxel offsets with internal neighbours in z + 1.
const IndexVector& internalNeighborsZ() const { return mInternalNeighborsZ; }
private:
IndexVector mCore, mMinX, mMaxX, mMinY, mMaxY, mMinZ, mMaxZ,
mInternalNeighborsX, mInternalNeighborsY, mInternalNeighborsZ;
}; // struct LeafNodeOffsets
template<typename LeafNodeType>
inline void
LeafNodeVoxelOffsets::constructOffsetList()
{
// internal core voxels
mCore.clear();
mCore.reserve((LeafNodeType::DIM - 2) * (LeafNodeType::DIM - 2));
for (Index x = 1; x < (LeafNodeType::DIM - 1); ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 1; y < (LeafNodeType::DIM - 1); ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
for (Index z = 1; z < (LeafNodeType::DIM - 1); ++z) {
mCore.push_back(offsetXY + z);
}
}
}
// internal neighbors in x + 1
mInternalNeighborsX.clear();
mInternalNeighborsX.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM));
for (Index x = 0; x < (LeafNodeType::DIM - 1); ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
mInternalNeighborsX.push_back(offsetXY + z);
}
}
}
// internal neighbors in y + 1
mInternalNeighborsY.clear();
mInternalNeighborsY.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM));
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < (LeafNodeType::DIM - 1); ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
mInternalNeighborsY.push_back(offsetXY + z);
}
}
}
// internal neighbors in z + 1
mInternalNeighborsZ.clear();
mInternalNeighborsZ.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM));
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) {
mInternalNeighborsZ.push_back(offsetXY + z);
}
}
}
// min x
mMinX.clear();
mMinX.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = (y << LeafNodeType::LOG2DIM);
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
mMinX.push_back(offsetXY + z);
}
}
}
// max x
mMaxX.clear();
mMaxX.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
const Index offsetX = (LeafNodeType::DIM - 1) << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
for (Index z = 0; z < LeafNodeType::DIM; ++z) {
mMaxX.push_back(offsetXY + z);
}
}
}
// min y
mMinY.clear();
mMinY.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) {
mMinY.push_back(offsetX + z);
}
}
}
// max y
mMaxY.clear();
mMaxY.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
const Index offsetY = (LeafNodeType::DIM - 1) << LeafNodeType::LOG2DIM;
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) {
mMaxY.push_back(offsetX + offsetY + z);
}
}
}
// min z
mMinZ.clear();
mMinZ.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
mMinZ.push_back(offsetXY);
}
}
}
// max z
mMaxZ.clear();
mMaxZ.reserve(LeafNodeType::DIM * LeafNodeType::DIM);
{
for (Index x = 0; x < LeafNodeType::DIM; ++x) {
const Index offsetX = x << (2 * LeafNodeType::LOG2DIM);
for (Index y = 0; y < LeafNodeType::DIM; ++y) {
const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM);
mMaxZ.push_back(offsetXY + (LeafNodeType::DIM - 1));
}
}
}
}
////////////////////////////////////////
/// Utility method to marks all voxels that share an edge.
template<typename AccessorT, int _AXIS>
struct VoxelEdgeAccessor {
enum { AXIS = _AXIS };
AccessorT& acc;
VoxelEdgeAccessor(AccessorT& _acc) : acc(_acc) {}
void set(Coord ijk) {
if (_AXIS == 0) { // x + 1 edge
acc.setActiveState(ijk);
--ijk[1]; // set i, j-1, k
acc.setActiveState(ijk);
--ijk[2]; // set i, j-1, k-1
acc.setActiveState(ijk);
++ijk[1]; // set i, j, k-1
acc.setActiveState(ijk);
} else if (_AXIS == 1) { // y + 1 edge
acc.setActiveState(ijk);
--ijk[2]; // set i, j, k-1
acc.setActiveState(ijk);
--ijk[0]; // set i-1, j, k-1
acc.setActiveState(ijk);
++ijk[2]; // set i-1, j, k
acc.setActiveState(ijk);
} else { // z + 1 edge
acc.setActiveState(ijk);
--ijk[1]; // set i, j-1, k
acc.setActiveState(ijk);
--ijk[0]; // set i-1, j-1, k
acc.setActiveState(ijk);
++ijk[1]; // set i-1, j, k
acc.setActiveState(ijk);
}
}
};
/// Utility method to check for sign changes along the x + 1, y + 1 or z + 1 directions.
/// The direction is determined by the @a edgeAcc parameter. Only voxels that have internal
/// neighbours are evaluated.
template<typename VoxelEdgeAcc, typename LeafNode>
void
evalInternalVoxelEdges(VoxelEdgeAcc& edgeAcc, const LeafNode& leafnode,
const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso)
{
Index nvo = 1; // neighbour voxel offset, z + 1 direction assumed initially.
const std::vector<Index>* offsets = &voxels.internalNeighborsZ();
if (VoxelEdgeAcc::AXIS == 0) { // x + 1 direction
nvo = LeafNode::DIM * LeafNode::DIM;
offsets = &voxels.internalNeighborsX();
} else if (VoxelEdgeAcc::AXIS == 1) { // y + 1 direction
nvo = LeafNode::DIM;
offsets = &voxels.internalNeighborsY();
}
for (size_t n = 0, N = offsets->size(); n < N; ++n) {
const Index& pos = (*offsets)[n];
bool isActive = leafnode.isValueOn(pos) || leafnode.isValueOn(pos + nvo);
if (isActive && (isInsideValue(leafnode.getValue(pos), iso) !=
isInsideValue(leafnode.getValue(pos + nvo), iso))) {
edgeAcc.set(leafnode.offsetToGlobalCoord(pos));
}
}
}
/// Utility method to check for sign changes along the x + 1, y + 1 or z + 1 directions.
/// The direction is determined by the @a edgeAcc parameter. All voxels that reside in the
/// specified leafnode face: back, top or right are evaluated.
template<typename LeafNode, typename TreeAcc, typename VoxelEdgeAcc>
void
evalExtrenalVoxelEdges(VoxelEdgeAcc& edgeAcc, TreeAcc& acc, const LeafNode& lhsNode,
const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso)
{
const std::vector<Index>* lhsOffsets = &voxels.maxX();
const std::vector<Index>* rhsOffsets = &voxels.minX();
Coord ijk = lhsNode.origin();
if (VoxelEdgeAcc::AXIS == 0) { // back leafnode face
ijk[0] += LeafNode::DIM;
} else if (VoxelEdgeAcc::AXIS == 1) { // top leafnode face
ijk[1] += LeafNode::DIM;
lhsOffsets = &voxels.maxY();
rhsOffsets = &voxels.minY();
} else if (VoxelEdgeAcc::AXIS == 2) { // right leafnode face
ijk[2] += LeafNode::DIM;
lhsOffsets = &voxels.maxZ();
rhsOffsets = &voxels.minZ();
}
typename LeafNode::ValueType value;
const LeafNode* rhsNodePt = acc.probeConstLeaf(ijk);
if (rhsNodePt) {
for (size_t n = 0, N = lhsOffsets->size(); n < N; ++n) {
const Index& pos = (*lhsOffsets)[n];
bool isActive = lhsNode.isValueOn(pos) || rhsNodePt->isValueOn((*rhsOffsets)[n]);
if (isActive && (isInsideValue(lhsNode.getValue(pos), iso) !=
isInsideValue(rhsNodePt->getValue((*rhsOffsets)[n]), iso))) {
edgeAcc.set(lhsNode.offsetToGlobalCoord(pos));
}
}
} else if (!acc.probeValue(ijk, value)) {
const bool inside = isInsideValue(value, iso);
for (size_t n = 0, N = lhsOffsets->size(); n < N; ++n) {
const Index& pos = (*lhsOffsets)[n];
if (lhsNode.isValueOn(pos) && (inside != isInsideValue(lhsNode.getValue(pos), iso))) {
edgeAcc.set(lhsNode.offsetToGlobalCoord(pos));
}
}
}
}
/// Utility method to check for sign changes along the x - 1, y - 1 or z - 1 directions.
/// The direction is determined by the @a edgeAcc parameter. All voxels that reside in the
/// specified leafnode face: front, bottom or left are evaluated.
template<typename LeafNode, typename TreeAcc, typename VoxelEdgeAcc>
void
evalExtrenalVoxelEdgesInv(VoxelEdgeAcc& edgeAcc, TreeAcc& acc, const LeafNode& leafnode,
const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso)
{
Coord ijk = leafnode.origin();
if (VoxelEdgeAcc::AXIS == 0) --ijk[0]; // front leafnode face
else if (VoxelEdgeAcc::AXIS == 1) --ijk[1]; // bottom leafnode face
else if (VoxelEdgeAcc::AXIS == 2) --ijk[2]; // left leafnode face
typename LeafNode::ValueType value;
if (!acc.probeConstLeaf(ijk) && !acc.probeValue(ijk, value)) {
const std::vector<Index>* offsets = &voxels.internalNeighborsX();
if (VoxelEdgeAcc::AXIS == 1) offsets = &voxels.internalNeighborsY();
else if (VoxelEdgeAcc::AXIS == 2) offsets = &voxels.internalNeighborsZ();
const bool inside = isInsideValue(value, iso);
for (size_t n = 0, N = offsets->size(); n < N; ++n) {
const Index& pos = (*offsets)[n];
if (leafnode.isValueOn(pos)
&& (inside != isInsideValue(leafnode.getValue(pos), iso)))
{
ijk = leafnode.offsetToGlobalCoord(pos);
if (VoxelEdgeAcc::AXIS == 0) --ijk[0];
else if (VoxelEdgeAcc::AXIS == 1) --ijk[1];
else if (VoxelEdgeAcc::AXIS == 2) --ijk[2];
edgeAcc.set(ijk);
}
}
}
}
template<typename InputTreeType>
struct IdentifyIntersectingVoxels
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
IdentifyIntersectingVoxels(
const InputTreeType& inputTree,
const std::vector<const InputLeafNodeType*>& inputLeafNodes,
BoolTreeType& intersectionTree,
InputValueType iso);
IdentifyIntersectingVoxels(IdentifyIntersectingVoxels&, tbb::split);
void operator()(const tbb::blocked_range<size_t>&);
void join(const IdentifyIntersectingVoxels& rhs) {
mIntersectionAccessor.tree().merge(rhs.mIntersectionAccessor.tree());
}
private:
tree::ValueAccessor<const InputTreeType> mInputAccessor;
InputLeafNodeType const * const * const mInputNodes;
BoolTreeType mIntersectionTree;
tree::ValueAccessor<BoolTreeType> mIntersectionAccessor;
LeafNodeVoxelOffsets mOffsetData;
const LeafNodeVoxelOffsets* mOffsets;
InputValueType mIsovalue;
}; // struct IdentifyIntersectingVoxels
template<typename InputTreeType>
IdentifyIntersectingVoxels<InputTreeType>::IdentifyIntersectingVoxels(
const InputTreeType& inputTree,
const std::vector<const InputLeafNodeType*>& inputLeafNodes,
BoolTreeType& intersectionTree,
InputValueType iso)
: mInputAccessor(inputTree)
, mInputNodes(inputLeafNodes.empty() ? nullptr : &inputLeafNodes.front())
, mIntersectionTree(false)
, mIntersectionAccessor(intersectionTree)
, mOffsetData()
, mOffsets(&mOffsetData)
, mIsovalue(iso)
{
mOffsetData.constructOffsetList<InputLeafNodeType>();
}
template<typename InputTreeType>
IdentifyIntersectingVoxels<InputTreeType>::IdentifyIntersectingVoxels(
IdentifyIntersectingVoxels& rhs, tbb::split)
: mInputAccessor(rhs.mInputAccessor.tree())
, mInputNodes(rhs.mInputNodes)
, mIntersectionTree(false)
, mIntersectionAccessor(mIntersectionTree) // use local tree.
, mOffsetData()
, mOffsets(rhs.mOffsets) // reference data from main instance.
, mIsovalue(rhs.mIsovalue)
{
}
template<typename InputTreeType>
void
IdentifyIntersectingVoxels<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range)
{
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 0> xEdgeAcc(mIntersectionAccessor);
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 1> yEdgeAcc(mIntersectionAccessor);
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 2> zEdgeAcc(mIntersectionAccessor);
for (size_t n = range.begin(); n != range.end(); ++n) {
const InputLeafNodeType& node = *mInputNodes[n];
// internal x + 1 voxel edges
evalInternalVoxelEdges(xEdgeAcc, node, *mOffsets, mIsovalue);
// internal y + 1 voxel edges
evalInternalVoxelEdges(yEdgeAcc, node, *mOffsets, mIsovalue);
// internal z + 1 voxel edges
evalInternalVoxelEdges(zEdgeAcc, node, *mOffsets, mIsovalue);
// external x + 1 voxels edges (back face)
evalExtrenalVoxelEdges(xEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
// external y + 1 voxels edges (top face)
evalExtrenalVoxelEdges(yEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
// external z + 1 voxels edges (right face)
evalExtrenalVoxelEdges(zEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
// The remaining edges are only checked if the leafnode neighbour, in the
// corresponding direction, is an inactive tile.
// external x - 1 voxels edges (front face)
evalExtrenalVoxelEdgesInv(xEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
// external y - 1 voxels edges (bottom face)
evalExtrenalVoxelEdgesInv(yEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
// external z - 1 voxels edges (left face)
evalExtrenalVoxelEdgesInv(zEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue);
}
} // IdentifyIntersectingVoxels::operator()
template<typename InputTreeType>
inline void
identifySurfaceIntersectingVoxels(
typename InputTreeType::template ValueConverter<bool>::Type& intersectionTree,
const InputTreeType& inputTree,
typename InputTreeType::ValueType isovalue)
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
std::vector<const InputLeafNodeType*> inputLeafNodes;
inputTree.getNodes(inputLeafNodes);
IdentifyIntersectingVoxels<InputTreeType> op(
inputTree, inputLeafNodes, intersectionTree, isovalue);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, inputLeafNodes.size()), op);
maskActiveTileBorders(inputTree, isovalue, intersectionTree);
}
////////////////////////////////////////
template<typename InputTreeType>
struct MaskIntersectingVoxels
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MaskIntersectingVoxels(
const InputTreeType& inputTree,
const std::vector<BoolLeafNodeType*>& nodes,
BoolTreeType& intersectionTree,
InputValueType iso);
MaskIntersectingVoxels(MaskIntersectingVoxels&, tbb::split);
void operator()(const tbb::blocked_range<size_t>&);
void join(const MaskIntersectingVoxels& rhs) {
mIntersectionAccessor.tree().merge(rhs.mIntersectionAccessor.tree());
}
private:
tree::ValueAccessor<const InputTreeType> mInputAccessor;
BoolLeafNodeType const * const * const mNodes;
BoolTreeType mIntersectionTree;
tree::ValueAccessor<BoolTreeType> mIntersectionAccessor;
InputValueType mIsovalue;
}; // struct MaskIntersectingVoxels
template<typename InputTreeType>
MaskIntersectingVoxels<InputTreeType>::MaskIntersectingVoxels(
const InputTreeType& inputTree,
const std::vector<BoolLeafNodeType*>& nodes,
BoolTreeType& intersectionTree,
InputValueType iso)
: mInputAccessor(inputTree)
, mNodes(nodes.empty() ? nullptr : &nodes.front())
, mIntersectionTree(false)
, mIntersectionAccessor(intersectionTree)
, mIsovalue(iso)
{
}
template<typename InputTreeType>
MaskIntersectingVoxels<InputTreeType>::MaskIntersectingVoxels(
MaskIntersectingVoxels& rhs, tbb::split)
: mInputAccessor(rhs.mInputAccessor.tree())
, mNodes(rhs.mNodes)
, mIntersectionTree(false)
, mIntersectionAccessor(mIntersectionTree) // use local tree.
, mIsovalue(rhs.mIsovalue)
{
}
template<typename InputTreeType>
void
MaskIntersectingVoxels<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range)
{
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 0> xEdgeAcc(mIntersectionAccessor);
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 1> yEdgeAcc(mIntersectionAccessor);
VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 2> zEdgeAcc(mIntersectionAccessor);
Coord ijk(0, 0, 0);
InputValueType iso(mIsovalue);
for (size_t n = range.begin(); n != range.end(); ++n) {
const BoolLeafNodeType& node = *mNodes[n];
for (typename BoolLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) {
if (!it.getValue()) {
ijk = it.getCoord();
const bool inside = isInsideValue(mInputAccessor.getValue(ijk), iso);
if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(1, 0, 0)), iso)) {
xEdgeAcc.set(ijk);
}
if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(0, 1, 0)), iso)) {
yEdgeAcc.set(ijk);
}
if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(0, 0, 1)), iso)) {
zEdgeAcc.set(ijk);
}
}
}
}
} // MaskIntersectingVoxels::operator()
template<typename BoolTreeType>
struct MaskBorderVoxels
{
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MaskBorderVoxels(const BoolTreeType& maskTree,
const std::vector<BoolLeafNodeType*>& maskNodes,
BoolTreeType& borderTree)
: mMaskTree(&maskTree)
, mMaskNodes(maskNodes.empty() ? nullptr : &maskNodes.front())
, mTmpBorderTree(false)
, mBorderTree(&borderTree)
{
}
MaskBorderVoxels(MaskBorderVoxels& rhs, tbb::split)
: mMaskTree(rhs.mMaskTree)
, mMaskNodes(rhs.mMaskNodes)
, mTmpBorderTree(false)
, mBorderTree(&mTmpBorderTree)
{
}
void join(MaskBorderVoxels& rhs) { mBorderTree->merge(*rhs.mBorderTree); }
void operator()(const tbb::blocked_range<size_t>& range)
{
tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree);
tree::ValueAccessor<BoolTreeType> borderAcc(*mBorderTree);
Coord ijk(0, 0, 0);
for (size_t n = range.begin(); n != range.end(); ++n) {
const BoolLeafNodeType& node = *mMaskNodes[n];
for (typename BoolLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) {
ijk = it.getCoord();
const bool lhs = it.getValue();
bool rhs = lhs;
bool isEdgeVoxel = false;
ijk[2] += 1; // i, j, k+1
isEdgeVoxel = (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[1] += 1; // i, j+1, k+1
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[0] += 1; // i+1, j+1, k+1
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[1] -= 1; // i+1, j, k+1
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[2] -= 1; // i+1, j, k
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[1] += 1; // i+1, j+1, k
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
ijk[0] -= 1; // i, j+1, k
isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs);
if (isEdgeVoxel) {
ijk[1] -= 1; // i, j, k
borderAcc.setValue(ijk, true);
}
}
}
}
private:
BoolTreeType const * const mMaskTree;
BoolLeafNodeType const * const * const mMaskNodes;
BoolTreeType mTmpBorderTree;
BoolTreeType * const mBorderTree;
}; // struct MaskBorderVoxels
template<typename BoolTreeType>
struct SyncMaskValues
{
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
SyncMaskValues(const std::vector<BoolLeafNodeType*>& nodes, const BoolTreeType& mask)
: mNodes(nodes.empty() ? nullptr : &nodes.front())
, mMaskTree(&mask)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using ValueOnIter = typename BoolLeafNodeType::ValueOnIter;
tree::ValueAccessor<const BoolTreeType> maskTreeAcc(*mMaskTree);
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
BoolLeafNodeType& node = *mNodes[n];
const BoolLeafNodeType * maskNode = maskTreeAcc.probeConstLeaf(node.origin());
if (maskNode) {
for (ValueOnIter it = node.beginValueOn(); it; ++it) {
const Index pos = it.pos();
if (maskNode->getValue(pos)) {
node.setValueOnly(pos, true);
}
}
}
}
}
private:
BoolLeafNodeType * const * const mNodes;
BoolTreeType const * const mMaskTree;
}; // struct SyncMaskValues
////////////////////////////////////////
template<typename BoolTreeType>
struct MaskSurface
{
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
MaskSurface(const std::vector<BoolLeafNodeType*>& nodes, const BoolTreeType& mask,
const math::Transform& inputTransform, const math::Transform& maskTransform, bool invert)
: mNodes(nodes.empty() ? nullptr : &nodes.front())
, mMaskTree(&mask)
, mInputTransform(inputTransform)
, mMaskTransform(maskTransform)
, mInvertMask(invert)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using ValueOnIter = typename BoolLeafNodeType::ValueOnIter;
tree::ValueAccessor<const BoolTreeType> maskTreeAcc(*mMaskTree);
const bool matchingTransforms = mInputTransform == mMaskTransform;
const bool maskState = mInvertMask;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
BoolLeafNodeType& node = *mNodes[n];
if (matchingTransforms) {
const BoolLeafNodeType * maskNode = maskTreeAcc.probeConstLeaf(node.origin());
if (maskNode) {
for (ValueOnIter it = node.beginValueOn(); it; ++it) {
const Index pos = it.pos();
if (maskNode->isValueOn(pos) == maskState) {
node.setValueOnly(pos, true);
}
}
} else {
if (maskTreeAcc.isValueOn(node.origin()) == maskState) {
for (ValueOnIter it = node.beginValueOn(); it; ++it) {
node.setValueOnly(it.pos(), true);
}
}
}
} else {
Coord ijk(0, 0, 0);
for (ValueOnIter it = node.beginValueOn(); it; ++it) {
ijk = mMaskTransform.worldToIndexCellCentered(
mInputTransform.indexToWorld(it.getCoord()));
if (maskTreeAcc.isValueOn(ijk) == maskState) {
node.setValueOnly(it.pos(), true);
}
}
}
}
}
private:
BoolLeafNodeType * const * const mNodes;
BoolTreeType const * const mMaskTree;
math::Transform const mInputTransform;
math::Transform const mMaskTransform;
bool const mInvertMask;
}; // struct MaskSurface
template<typename InputGridType>
inline void
applySurfaceMask(
typename InputGridType::TreeType::template ValueConverter<bool>::Type& intersectionTree,
typename InputGridType::TreeType::template ValueConverter<bool>::Type& borderTree,
const InputGridType& inputGrid,
const GridBase::ConstPtr& maskGrid,
bool invertMask,
typename InputGridType::ValueType isovalue)
{
using InputTreeType = typename InputGridType::TreeType;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
using BoolGridType = Grid<BoolTreeType>;
if (maskGrid && maskGrid->type() == BoolGridType::gridType()) {
const math::Transform& transform = inputGrid.transform();
const InputTreeType& inputTree = inputGrid.tree();
const BoolGridType * surfaceMask = static_cast<const BoolGridType*>(maskGrid.get());
const BoolTreeType& maskTree = surfaceMask->tree();
const math::Transform& maskTransform = surfaceMask->transform();
// mark masked voxels
std::vector<BoolLeafNodeType*> intersectionLeafNodes;
intersectionTree.getNodes(intersectionLeafNodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()),
MaskSurface<BoolTreeType>(
intersectionLeafNodes, maskTree, transform, maskTransform, invertMask));
// mask surface-mask border
MaskBorderVoxels<BoolTreeType> borderOp(
intersectionTree, intersectionLeafNodes, borderTree);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), borderOp);
// recompute isosurface intersection mask
BoolTreeType tmpIntersectionTree(false);
MaskIntersectingVoxels<InputTreeType> op(
inputTree, intersectionLeafNodes, tmpIntersectionTree, isovalue);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), op);
std::vector<BoolLeafNodeType*> tmpIntersectionLeafNodes;
tmpIntersectionTree.getNodes(tmpIntersectionLeafNodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, tmpIntersectionLeafNodes.size()),
SyncMaskValues<BoolTreeType>(tmpIntersectionLeafNodes, intersectionTree));
intersectionTree.clear();
intersectionTree.merge(tmpIntersectionTree);
}
}
////////////////////////////////////////
template<typename InputTreeType>
struct ComputeAuxiliaryData
{
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
using BoolLeafNodeType = tree::LeafNode<bool, InputLeafNodeType::LOG2DIM>;
using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type;
using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type;
ComputeAuxiliaryData(const InputTreeType& inputTree,
const std::vector<const BoolLeafNodeType*>& intersectionLeafNodes,
Int16TreeType& signFlagsTree,
Index32TreeType& pointIndexTree,
InputValueType iso);
ComputeAuxiliaryData(ComputeAuxiliaryData&, tbb::split);
void operator()(const tbb::blocked_range<size_t>&);
void join(const ComputeAuxiliaryData& rhs) {
mSignFlagsAccessor.tree().merge(rhs.mSignFlagsAccessor.tree());
mPointIndexAccessor.tree().merge(rhs.mPointIndexAccessor.tree());
}
private:
tree::ValueAccessor<const InputTreeType> mInputAccessor;
BoolLeafNodeType const * const * const mIntersectionNodes;
Int16TreeType mSignFlagsTree;
tree::ValueAccessor<Int16TreeType> mSignFlagsAccessor;
Index32TreeType mPointIndexTree;
tree::ValueAccessor<Index32TreeType> mPointIndexAccessor;
const InputValueType mIsovalue;
};
template<typename InputTreeType>
ComputeAuxiliaryData<InputTreeType>::ComputeAuxiliaryData(
const InputTreeType& inputTree,
const std::vector<const BoolLeafNodeType*>& intersectionLeafNodes,
Int16TreeType& signFlagsTree,
Index32TreeType& pointIndexTree,
InputValueType iso)
: mInputAccessor(inputTree)
, mIntersectionNodes(&intersectionLeafNodes.front())
, mSignFlagsTree(0)
, mSignFlagsAccessor(signFlagsTree)
, mPointIndexTree(std::numeric_limits<Index32>::max())
, mPointIndexAccessor(pointIndexTree)
, mIsovalue(iso)
{
pointIndexTree.root().setBackground(std::numeric_limits<Index32>::max(), false);
}
template<typename InputTreeType>
ComputeAuxiliaryData<InputTreeType>::ComputeAuxiliaryData(ComputeAuxiliaryData& rhs, tbb::split)
: mInputAccessor(rhs.mInputAccessor.tree())
, mIntersectionNodes(rhs.mIntersectionNodes)
, mSignFlagsTree(0)
, mSignFlagsAccessor(mSignFlagsTree)
, mPointIndexTree(std::numeric_limits<Index32>::max())
, mPointIndexAccessor(mPointIndexTree)
, mIsovalue(rhs.mIsovalue)
{
}
template<typename InputTreeType>
void
ComputeAuxiliaryData<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range)
{
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
Coord ijk;
math::Tuple<8, InputValueType> cellVertexValues;
typename std::unique_ptr<Int16LeafNodeType> signsNodePt(new Int16LeafNodeType(ijk, 0));
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
const BoolLeafNodeType& maskNode = *mIntersectionNodes[n];
const Coord& origin = maskNode.origin();
const InputLeafNodeType *leafPt = mInputAccessor.probeConstLeaf(origin);
if (!signsNodePt.get()) signsNodePt.reset(new Int16LeafNodeType(origin, 0));
else signsNodePt->setOrigin(origin);
bool updatedNode = false;
for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) {
const Index pos = it.pos();
ijk = BoolLeafNodeType::offsetToLocalCoord(pos);
if (leafPt &&
ijk[0] < int(BoolLeafNodeType::DIM - 1) &&
ijk[1] < int(BoolLeafNodeType::DIM - 1) &&
ijk[2] < int(BoolLeafNodeType::DIM - 1) ) {
getCellVertexValues(*leafPt, pos, cellVertexValues);
} else {
getCellVertexValues(mInputAccessor, origin + ijk, cellVertexValues);
}
uint8_t signFlags = computeSignFlags(cellVertexValues, mIsovalue);
if (signFlags != 0 && signFlags != 0xFF) {
const bool inside = signFlags & 0x1;
int edgeFlags = inside ? INSIDE : 0;
if (!it.getValue()) {
edgeFlags |= inside != ((signFlags & 0x02) != 0) ? XEDGE : 0;
edgeFlags |= inside != ((signFlags & 0x10) != 0) ? YEDGE : 0;
edgeFlags |= inside != ((signFlags & 0x08) != 0) ? ZEDGE : 0;
}
const uint8_t ambiguousCellFlags = sAmbiguousFace[signFlags];
if (ambiguousCellFlags != 0) {
correctCellSigns(signFlags, ambiguousCellFlags, mInputAccessor,
origin + ijk, mIsovalue);
}
edgeFlags |= int(signFlags);
signsNodePt->setValueOn(pos, Int16(edgeFlags));
updatedNode = true;
}
}
if (updatedNode) {
typename Index32TreeType::LeafNodeType* idxNode = mPointIndexAccessor.touchLeaf(origin);
idxNode->topologyUnion(*signsNodePt);
// zero fill
for (auto it = idxNode->beginValueOn(); it; ++it) {
idxNode->setValueOnly(it.pos(), 0);
}
mSignFlagsAccessor.addLeaf(signsNodePt.release());
}
}
} // ComputeAuxiliaryData::operator()
template<typename InputTreeType>
inline void
computeAuxiliaryData(
typename InputTreeType::template ValueConverter<Int16>::Type& signFlagsTree,
typename InputTreeType::template ValueConverter<Index32>::Type& pointIndexTree,
const typename InputTreeType::template ValueConverter<bool>::Type& intersectionTree,
const InputTreeType& inputTree,
typename InputTreeType::ValueType isovalue)
{
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using BoolLeafNodeType = typename BoolTreeType::LeafNodeType;
std::vector<const BoolLeafNodeType*> intersectionLeafNodes;
intersectionTree.getNodes(intersectionLeafNodes);
ComputeAuxiliaryData<InputTreeType> op(
inputTree, intersectionLeafNodes, signFlagsTree, pointIndexTree, isovalue);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), op);
}
////////////////////////////////////////
template<Index32 LeafNodeLog2Dim>
struct LeafNodePointCount
{
using Int16LeafNodeType = tree::LeafNode<Int16, LeafNodeLog2Dim>;
LeafNodePointCount(const std::vector<Int16LeafNodeType*>& leafNodes,
std::unique_ptr<Index32[]>& leafNodeCount)
: mLeafNodes(leafNodes.empty() ? nullptr : &leafNodes.front())
, mData(leafNodeCount.get())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
Index32 count = 0;
Int16 const * p = mLeafNodes[n]->buffer().data();
Int16 const * const endP = p + Int16LeafNodeType::SIZE;
while (p < endP) {
count += Index32(sEdgeGroupTable[(SIGNS & *p)][0]);
++p;
}
mData[n] = count;
}
}
private:
Int16LeafNodeType * const * const mLeafNodes;
Index32 *mData;
}; // struct LeafNodePointCount
template<typename PointIndexLeafNode>
struct AdaptiveLeafNodePointCount
{
using Int16LeafNodeType = tree::LeafNode<Int16, PointIndexLeafNode::LOG2DIM>;
AdaptiveLeafNodePointCount(const std::vector<PointIndexLeafNode*>& pointIndexNodes,
const std::vector<Int16LeafNodeType*>& signDataNodes,
std::unique_ptr<Index32[]>& leafNodeCount)
: mPointIndexNodes(pointIndexNodes.empty() ? nullptr : &pointIndexNodes.front())
, mSignDataNodes(signDataNodes.empty() ? nullptr : &signDataNodes.front())
, mData(leafNodeCount.get())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using IndexType = typename PointIndexLeafNode::ValueType;
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
const PointIndexLeafNode& node = *mPointIndexNodes[n];
const Int16LeafNodeType& signNode = *mSignDataNodes[n];
size_t count = 0;
std::set<IndexType> uniqueRegions;
for (typename PointIndexLeafNode::ValueOnCIter it = node.cbeginValueOn(); it; ++it) {
IndexType id = it.getValue();
if (id == 0) {
count += size_t(sEdgeGroupTable[(SIGNS & signNode.getValue(it.pos()))][0]);
} else if (id != IndexType(util::INVALID_IDX)) {
uniqueRegions.insert(id);
}
}
mData[n] = Index32(count + uniqueRegions.size());
}
}
private:
PointIndexLeafNode const * const * const mPointIndexNodes;
Int16LeafNodeType const * const * const mSignDataNodes;
Index32 *mData;
}; // struct AdaptiveLeafNodePointCount
template<typename PointIndexLeafNode>
struct MapPoints
{
using Int16LeafNodeType = tree::LeafNode<Int16, PointIndexLeafNode::LOG2DIM>;
MapPoints(const std::vector<PointIndexLeafNode*>& pointIndexNodes,
const std::vector<Int16LeafNodeType*>& signDataNodes,
std::unique_ptr<Index32[]>& leafNodeCount)
: mPointIndexNodes(pointIndexNodes.empty() ? nullptr : &pointIndexNodes.front())
, mSignDataNodes(signDataNodes.empty() ? nullptr : &signDataNodes.front())
, mData(leafNodeCount.get())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const {
for (size_t n = range.begin(), N = range.end(); n != N; ++n) {
const Int16LeafNodeType& signNode = *mSignDataNodes[n];
PointIndexLeafNode& indexNode = *mPointIndexNodes[n];
Index32 pointOffset = mData[n];
for (auto it = indexNode.beginValueOn(); it; ++it) {
const Index pos = it.pos();
indexNode.setValueOnly(pos, pointOffset);
const int signs = SIGNS & int(signNode.getValue(pos));
pointOffset += Index32(sEdgeGroupTable[signs][0]);
}
}
}
private:
PointIndexLeafNode * const * const mPointIndexNodes;
Int16LeafNodeType const * const * const mSignDataNodes;
Index32 * const mData;
}; // struct MapPoints
template<typename TreeType, typename PrimBuilder>
struct ComputePolygons
{
using Int16TreeType = typename TreeType::template ValueConverter<Int16>::Type;
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
using Index32TreeType = typename TreeType::template ValueConverter<Index32>::Type;
using Index32LeafNodeType = typename Index32TreeType::LeafNodeType;
ComputePolygons(
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
const Int16TreeType& signFlagsTree,
const Index32TreeType& idxTree,
PolygonPoolList& polygons,
bool invertSurfaceOrientation);
void setRefSignTree(const Int16TreeType * r) { mRefSignFlagsTree = r; }
void operator()(const tbb::blocked_range<size_t>&) const;
private:
Int16LeafNodeType * const * const mSignFlagsLeafNodes;
Int16TreeType const * const mSignFlagsTree;
Int16TreeType const * mRefSignFlagsTree;
Index32TreeType const * const mIndexTree;
PolygonPoolList * const mPolygonPoolList;
bool const mInvertSurfaceOrientation;
}; // struct ComputePolygons
template<typename TreeType, typename PrimBuilder>
ComputePolygons<TreeType, PrimBuilder>::ComputePolygons(
const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes,
const Int16TreeType& signFlagsTree,
const Index32TreeType& idxTree,
PolygonPoolList& polygons,
bool invertSurfaceOrientation)
: mSignFlagsLeafNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front())
, mSignFlagsTree(&signFlagsTree)
, mRefSignFlagsTree(nullptr)
, mIndexTree(&idxTree)
, mPolygonPoolList(&polygons)
, mInvertSurfaceOrientation(invertSurfaceOrientation)
{
}
template<typename InputTreeType, typename PrimBuilder>
void
ComputePolygons<InputTreeType, PrimBuilder>::operator()(const tbb::blocked_range<size_t>& range) const
{
using Int16ValueAccessor = tree::ValueAccessor<const Int16TreeType>;
Int16ValueAccessor signAcc(*mSignFlagsTree);
tree::ValueAccessor<const Index32TreeType> idxAcc(*mIndexTree);
const bool invertSurfaceOrientation = mInvertSurfaceOrientation;
PrimBuilder mesher;
size_t edgeCount;
Coord ijk, origin;
// reference data
std::unique_ptr<Int16ValueAccessor> refSignAcc;
if (mRefSignFlagsTree) refSignAcc.reset(new Int16ValueAccessor(*mRefSignFlagsTree));
for (size_t n = range.begin(); n != range.end(); ++n) {
const Int16LeafNodeType& node = *mSignFlagsLeafNodes[n];
origin = node.origin();
// Get an upper bound on the number of primitives.
edgeCount = 0;
typename Int16LeafNodeType::ValueOnCIter iter = node.cbeginValueOn();
for (; iter; ++iter) {
if (iter.getValue() & XEDGE) ++edgeCount;
if (iter.getValue() & YEDGE) ++edgeCount;
if (iter.getValue() & ZEDGE) ++edgeCount;
}
if(edgeCount == 0) continue;
mesher.init(edgeCount, (*mPolygonPoolList)[n]);
const Int16LeafNodeType *signleafPt = signAcc.probeConstLeaf(origin);
const Index32LeafNodeType *idxLeafPt = idxAcc.probeConstLeaf(origin);
if (!signleafPt || !idxLeafPt) continue;
const Int16LeafNodeType *refSignLeafPt = nullptr;
if (refSignAcc) refSignLeafPt = refSignAcc->probeConstLeaf(origin);
Vec3i offsets;
for (iter = node.cbeginValueOn(); iter; ++iter) {
ijk = iter.getCoord();
Int16 flags = iter.getValue();
if (!(flags & 0xE00)) continue;
Int16 refFlags = 0;
if (refSignLeafPt) {
refFlags = refSignLeafPt->getValue(iter.pos());
}
offsets[0] = 0;
offsets[1] = 0;
offsets[2] = 0;
const uint8_t cell = uint8_t(SIGNS & flags);
if (sEdgeGroupTable[cell][0] > 1) {
offsets[0] = (sEdgeGroupTable[cell][1] - 1);
offsets[1] = (sEdgeGroupTable[cell][9] - 1);
offsets[2] = (sEdgeGroupTable[cell][4] - 1);
}
if (ijk[0] > origin[0] && ijk[1] > origin[1] && ijk[2] > origin[2]) {
constructPolygons(invertSurfaceOrientation,
flags, refFlags, offsets, ijk, *signleafPt, *idxLeafPt, mesher);
} else {
constructPolygons(invertSurfaceOrientation,
flags, refFlags, offsets, ijk, signAcc, idxAcc, mesher);
}
}
mesher.done();
}
} // ComputePolygons::operator()
////////////////////////////////////////
template<typename T>
struct CopyArray
{
CopyArray(T * outputArray, const T * inputArray, size_t outputOffset = 0)
: mOutputArray(outputArray), mInputArray(inputArray), mOutputOffset(outputOffset)
{
}
void operator()(const tbb::blocked_range<size_t>& inputArrayRange) const
{
const size_t offset = mOutputOffset;
for (size_t n = inputArrayRange.begin(), N = inputArrayRange.end(); n < N; ++n) {
mOutputArray[offset + n] = mInputArray[n];
}
}
private:
T * const mOutputArray;
T const * const mInputArray;
size_t const mOutputOffset;
}; // struct CopyArray
struct FlagAndCountQuadsToSubdivide
{
FlagAndCountQuadsToSubdivide(PolygonPoolList& polygons,
const std::vector<uint8_t>& pointFlags,
std::unique_ptr<openvdb::Vec3s[]>& points,
std::unique_ptr<unsigned[]>& numQuadsToDivide)
: mPolygonPoolList(&polygons)
, mPointFlags(pointFlags.empty() ? nullptr : &pointFlags.front())
, mPoints(points.get())
, mNumQuadsToDivide(numQuadsToDivide.get())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
PolygonPool& polygons = (*mPolygonPoolList)[n];
unsigned count = 0;
// count and tag nonplanar seam line quads.
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
char& flags = polygons.quadFlags(i);
if ((flags & POLYFLAG_FRACTURE_SEAM) && !(flags & POLYFLAG_EXTERIOR)) {
Vec4I& quad = polygons.quad(i);
const bool edgePoly = mPointFlags[quad[0]] || mPointFlags[quad[1]]
|| mPointFlags[quad[2]] || mPointFlags[quad[3]];
if (!edgePoly) continue;
const Vec3s& p0 = mPoints[quad[0]];
const Vec3s& p1 = mPoints[quad[1]];
const Vec3s& p2 = mPoints[quad[2]];
const Vec3s& p3 = mPoints[quad[3]];
if (!isPlanarQuad(p0, p1, p2, p3, 1e-6f)) {
flags |= POLYFLAG_SUBDIVIDED;
count++;
}
}
}
mNumQuadsToDivide[n] = count;
}
}
private:
PolygonPoolList * const mPolygonPoolList;
uint8_t const * const mPointFlags;
Vec3s const * const mPoints;
unsigned * const mNumQuadsToDivide;
}; // struct FlagAndCountQuadsToSubdivide
struct SubdivideQuads
{
SubdivideQuads(PolygonPoolList& polygons,
const std::unique_ptr<openvdb::Vec3s[]>& points,
size_t pointCount,
std::unique_ptr<openvdb::Vec3s[]>& centroids,
std::unique_ptr<unsigned[]>& numQuadsToDivide,
std::unique_ptr<unsigned[]>& centroidOffsets)
: mPolygonPoolList(&polygons)
, mPoints(points.get())
, mCentroids(centroids.get())
, mNumQuadsToDivide(numQuadsToDivide.get())
, mCentroidOffsets(centroidOffsets.get())
, mPointCount(pointCount)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
PolygonPool& polygons = (*mPolygonPoolList)[n];
const size_t nonplanarCount = size_t(mNumQuadsToDivide[n]);
if (nonplanarCount > 0) {
PolygonPool tmpPolygons;
tmpPolygons.resetQuads(polygons.numQuads() - nonplanarCount);
tmpPolygons.resetTriangles(polygons.numTriangles() + size_t(4) * nonplanarCount);
size_t offset = mCentroidOffsets[n];
size_t triangleIdx = 0;
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
const char quadFlags = polygons.quadFlags(i);
if (!(quadFlags & POLYFLAG_SUBDIVIDED)) continue;
unsigned newPointIdx = unsigned(offset + mPointCount);
openvdb::Vec4I& quad = polygons.quad(i);
mCentroids[offset] = (mPoints[quad[0]] + mPoints[quad[1]] +
mPoints[quad[2]] + mPoints[quad[3]]) * 0.25f;
++offset;
{
Vec3I& triangle = tmpPolygons.triangle(triangleIdx);
triangle[0] = quad[0];
triangle[1] = newPointIdx;
triangle[2] = quad[3];
tmpPolygons.triangleFlags(triangleIdx) = quadFlags;
}
++triangleIdx;
{
Vec3I& triangle = tmpPolygons.triangle(triangleIdx);
triangle[0] = quad[0];
triangle[1] = quad[1];
triangle[2] = newPointIdx;
tmpPolygons.triangleFlags(triangleIdx) = quadFlags;
}
++triangleIdx;
{
Vec3I& triangle = tmpPolygons.triangle(triangleIdx);
triangle[0] = quad[1];
triangle[1] = quad[2];
triangle[2] = newPointIdx;
tmpPolygons.triangleFlags(triangleIdx) = quadFlags;
}
++triangleIdx;
{
Vec3I& triangle = tmpPolygons.triangle(triangleIdx);
triangle[0] = quad[2];
triangle[1] = quad[3];
triangle[2] = newPointIdx;
tmpPolygons.triangleFlags(triangleIdx) = quadFlags;
}
++triangleIdx;
quad[0] = util::INVALID_IDX; // mark for deletion
}
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
tmpPolygons.triangle(triangleIdx) = polygons.triangle(i);
tmpPolygons.triangleFlags(triangleIdx) = polygons.triangleFlags(i);
++triangleIdx;
}
size_t quadIdx = 0;
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
openvdb::Vec4I& quad = polygons.quad(i);
if (quad[0] != util::INVALID_IDX) { // ignore invalid quads
tmpPolygons.quad(quadIdx) = quad;
tmpPolygons.quadFlags(quadIdx) = polygons.quadFlags(i);
++quadIdx;
}
}
polygons.copy(tmpPolygons);
}
}
}
private:
PolygonPoolList * const mPolygonPoolList;
Vec3s const * const mPoints;
Vec3s * const mCentroids;
unsigned * const mNumQuadsToDivide;
unsigned * const mCentroidOffsets;
size_t const mPointCount;
}; // struct SubdivideQuads
inline void
subdivideNonplanarSeamLineQuads(
PolygonPoolList& polygonPoolList,
size_t polygonPoolListSize,
PointList& pointList,
size_t& pointListSize,
std::vector<uint8_t>& pointFlags)
{
const tbb::blocked_range<size_t> polygonPoolListRange(0, polygonPoolListSize);
std::unique_ptr<unsigned[]> numQuadsToDivide(new unsigned[polygonPoolListSize]);
tbb::parallel_for(polygonPoolListRange,
FlagAndCountQuadsToSubdivide(polygonPoolList, pointFlags, pointList, numQuadsToDivide));
std::unique_ptr<unsigned[]> centroidOffsets(new unsigned[polygonPoolListSize]);
size_t centroidCount = 0;
{
unsigned sum = 0;
for (size_t n = 0, N = polygonPoolListSize; n < N; ++n) {
centroidOffsets[n] = sum;
sum += numQuadsToDivide[n];
}
centroidCount = size_t(sum);
}
std::unique_ptr<Vec3s[]> centroidList(new Vec3s[centroidCount]);
tbb::parallel_for(polygonPoolListRange,
SubdivideQuads(polygonPoolList, pointList, pointListSize,
centroidList, numQuadsToDivide, centroidOffsets));
if (centroidCount > 0) {
const size_t newPointListSize = centroidCount + pointListSize;
std::unique_ptr<openvdb::Vec3s[]> newPointList(new openvdb::Vec3s[newPointListSize]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, pointListSize),
CopyArray<Vec3s>(newPointList.get(), pointList.get()));
tbb::parallel_for(tbb::blocked_range<size_t>(0, newPointListSize - pointListSize),
CopyArray<Vec3s>(newPointList.get(), centroidList.get(), pointListSize));
pointListSize = newPointListSize;
pointList.swap(newPointList);
pointFlags.resize(pointListSize, 0);
}
}
struct ReviseSeamLineFlags
{
ReviseSeamLineFlags(PolygonPoolList& polygons,
const std::vector<uint8_t>& pointFlags)
: mPolygonPoolList(&polygons)
, mPointFlags(pointFlags.empty() ? nullptr : &pointFlags.front())
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
PolygonPool& polygons = (*mPolygonPoolList)[n];
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
char& flags = polygons.quadFlags(i);
if (flags & POLYFLAG_FRACTURE_SEAM) {
openvdb::Vec4I& verts = polygons.quad(i);
const bool hasSeamLinePoint =
mPointFlags[verts[0]] || mPointFlags[verts[1]] ||
mPointFlags[verts[2]] || mPointFlags[verts[3]];
if (!hasSeamLinePoint) {
flags &= ~POLYFLAG_FRACTURE_SEAM;
}
}
} // end quad loop
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
char& flags = polygons.triangleFlags(i);
if (flags & POLYFLAG_FRACTURE_SEAM) {
openvdb::Vec3I& verts = polygons.triangle(i);
const bool hasSeamLinePoint =
mPointFlags[verts[0]] || mPointFlags[verts[1]] || mPointFlags[verts[2]];
if (!hasSeamLinePoint) {
flags &= ~POLYFLAG_FRACTURE_SEAM;
}
}
} // end triangle loop
} // end polygon pool loop
}
private:
PolygonPoolList * const mPolygonPoolList;
uint8_t const * const mPointFlags;
}; // struct ReviseSeamLineFlags
inline void
reviseSeamLineFlags(PolygonPoolList& polygonPoolList, size_t polygonPoolListSize,
std::vector<uint8_t>& pointFlags)
{
tbb::parallel_for(tbb::blocked_range<size_t>(0, polygonPoolListSize),
ReviseSeamLineFlags(polygonPoolList, pointFlags));
}
////////////////////////////////////////
template<typename InputTreeType>
struct MaskDisorientedTrianglePoints
{
MaskDisorientedTrianglePoints(const InputTreeType& inputTree, const PolygonPoolList& polygons,
const PointList& pointList, std::unique_ptr<uint8_t[]>& pointMask,
const math::Transform& transform, bool invertSurfaceOrientation)
: mInputTree(&inputTree)
, mPolygonPoolList(&polygons)
, mPointList(&pointList)
, mPointMask(pointMask.get())
, mTransform(transform)
, mInvertSurfaceOrientation(invertSurfaceOrientation)
{
}
void operator()(const tbb::blocked_range<size_t>& range) const
{
using ValueType = typename InputTreeType::LeafNodeType::ValueType;
tree::ValueAccessor<const InputTreeType> inputAcc(*mInputTree);
Vec3s centroid, normal;
Coord ijk;
const bool invertGradientDir = mInvertSurfaceOrientation || isBoolValue<ValueType>();
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const PolygonPool& polygons = (*mPolygonPoolList)[n];
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
const Vec3I& verts = polygons.triangle(i);
const Vec3s& v0 = (*mPointList)[verts[0]];
const Vec3s& v1 = (*mPointList)[verts[1]];
const Vec3s& v2 = (*mPointList)[verts[2]];
normal = (v2 - v0).cross((v1 - v0));
normal.normalize();
centroid = (v0 + v1 + v2) * (1.0f / 3.0f);
ijk = mTransform.worldToIndexCellCentered(centroid);
Vec3s dir( math::ISGradient<math::CD_2ND>::result(inputAcc, ijk) );
dir.normalize();
if (invertGradientDir) {
dir = -dir;
}
// check if the angle is obtuse
if (dir.dot(normal) < -0.5f) {
// Concurrent writes to same memory address can occur, but
// all threads are writing the same value and char is atomic.
// (It is extremely rare that disoriented triangles share points,
// false sharing related performance impacts are not a concern.)
mPointMask[verts[0]] = 1;
mPointMask[verts[1]] = 1;
mPointMask[verts[2]] = 1;
}
} // end triangle loop
} // end polygon pool loop
}
private:
InputTreeType const * const mInputTree;
PolygonPoolList const * const mPolygonPoolList;
PointList const * const mPointList;
uint8_t * const mPointMask;
math::Transform const mTransform;
bool const mInvertSurfaceOrientation;
}; // struct MaskDisorientedTrianglePoints
template<typename InputTree>
inline void
relaxDisorientedTriangles(
bool invertSurfaceOrientation,
const InputTree& inputTree,
const math::Transform& transform,
PolygonPoolList& polygonPoolList,
size_t polygonPoolListSize,
PointList& pointList,
const size_t pointListSize)
{
const tbb::blocked_range<size_t> polygonPoolListRange(0, polygonPoolListSize);
std::unique_ptr<uint8_t[]> pointMask(new uint8_t[pointListSize]);
fillArray(pointMask.get(), uint8_t(0), pointListSize);
tbb::parallel_for(polygonPoolListRange,
MaskDisorientedTrianglePoints<InputTree>(
inputTree, polygonPoolList, pointList, pointMask, transform, invertSurfaceOrientation));
std::unique_ptr<uint8_t[]> pointUpdates(new uint8_t[pointListSize]);
fillArray(pointUpdates.get(), uint8_t(0), pointListSize);
std::unique_ptr<Vec3s[]> newPoints(new Vec3s[pointListSize]);
fillArray(newPoints.get(), Vec3s(0.0f, 0.0f, 0.0f), pointListSize);
for (size_t n = 0, N = polygonPoolListSize; n < N; ++n) {
PolygonPool& polygons = polygonPoolList[n];
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
openvdb::Vec4I& verts = polygons.quad(i);
for (int v = 0; v < 4; ++v) {
const unsigned pointIdx = verts[v];
if (pointMask[pointIdx] == 1) {
newPoints[pointIdx] +=
pointList[verts[0]] + pointList[verts[1]] +
pointList[verts[2]] + pointList[verts[3]];
pointUpdates[pointIdx] = uint8_t(pointUpdates[pointIdx] + 4);
}
}
}
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
openvdb::Vec3I& verts = polygons.triangle(i);
for (int v = 0; v < 3; ++v) {
const unsigned pointIdx = verts[v];
if (pointMask[pointIdx] == 1) {
newPoints[pointIdx] +=
pointList[verts[0]] + pointList[verts[1]] + pointList[verts[2]];
pointUpdates[pointIdx] = uint8_t(pointUpdates[pointIdx] + 3);
}
}
}
}
for (size_t n = 0, N = pointListSize; n < N; ++n) {
if (pointUpdates[n] > 0) {
const double weight = 1.0 / double(pointUpdates[n]);
pointList[n] = newPoints[n] * float(weight);
}
}
}
} // volume_to_mesh_internal namespace
////////////////////////////////////////
inline
PolygonPool::PolygonPool()
: mNumQuads(0)
, mNumTriangles(0)
, mQuads(nullptr)
, mTriangles(nullptr)
, mQuadFlags(nullptr)
, mTriangleFlags(nullptr)
{
}
inline
PolygonPool::PolygonPool(const size_t numQuads, const size_t numTriangles)
: mNumQuads(numQuads)
, mNumTriangles(numTriangles)
, mQuads(new openvdb::Vec4I[mNumQuads])
, mTriangles(new openvdb::Vec3I[mNumTriangles])
, mQuadFlags(new char[mNumQuads])
, mTriangleFlags(new char[mNumTriangles])
{
}
inline void
PolygonPool::copy(const PolygonPool& rhs)
{
resetQuads(rhs.numQuads());
resetTriangles(rhs.numTriangles());
for (size_t i = 0; i < mNumQuads; ++i) {
mQuads[i] = rhs.mQuads[i];
mQuadFlags[i] = rhs.mQuadFlags[i];
}
for (size_t i = 0; i < mNumTriangles; ++i) {
mTriangles[i] = rhs.mTriangles[i];
mTriangleFlags[i] = rhs.mTriangleFlags[i];
}
}
inline void
PolygonPool::resetQuads(size_t size)
{
mNumQuads = size;
mQuads.reset(new openvdb::Vec4I[mNumQuads]);
mQuadFlags.reset(new char[mNumQuads]);
}
inline void
PolygonPool::clearQuads()
{
mNumQuads = 0;
mQuads.reset(nullptr);
mQuadFlags.reset(nullptr);
}
inline void
PolygonPool::resetTriangles(size_t size)
{
mNumTriangles = size;
mTriangles.reset(new openvdb::Vec3I[mNumTriangles]);
mTriangleFlags.reset(new char[mNumTriangles]);
}
inline void
PolygonPool::clearTriangles()
{
mNumTriangles = 0;
mTriangles.reset(nullptr);
mTriangleFlags.reset(nullptr);
}
inline bool
PolygonPool::trimQuads(const size_t n, bool reallocate)
{
if (!(n < mNumQuads)) return false;
if (reallocate) {
if (n == 0) {
mQuads.reset(nullptr);
} else {
std::unique_ptr<openvdb::Vec4I[]> quads(new openvdb::Vec4I[n]);
std::unique_ptr<char[]> flags(new char[n]);
for (size_t i = 0; i < n; ++i) {
quads[i] = mQuads[i];
flags[i] = mQuadFlags[i];
}
mQuads.swap(quads);
mQuadFlags.swap(flags);
}
}
mNumQuads = n;
return true;
}
inline bool
PolygonPool::trimTrinagles(const size_t n, bool reallocate)
{
if (!(n < mNumTriangles)) return false;
if (reallocate) {
if (n == 0) {
mTriangles.reset(nullptr);
} else {
std::unique_ptr<openvdb::Vec3I[]> triangles(new openvdb::Vec3I[n]);
std::unique_ptr<char[]> flags(new char[n]);
for (size_t i = 0; i < n; ++i) {
triangles[i] = mTriangles[i];
flags[i] = mTriangleFlags[i];
}
mTriangles.swap(triangles);
mTriangleFlags.swap(flags);
}
}
mNumTriangles = n;
return true;
}
////////////////////////////////////////
inline
VolumeToMesh::VolumeToMesh(double isovalue, double adaptivity, bool relaxDisorientedTriangles)
: mPoints(nullptr)
, mPolygons()
, mPointListSize(0)
, mSeamPointListSize(0)
, mPolygonPoolListSize(0)
, mIsovalue(isovalue)
, mPrimAdaptivity(adaptivity)
, mSecAdaptivity(0.0)
, mRefGrid(GridBase::ConstPtr())
, mSurfaceMaskGrid(GridBase::ConstPtr())
, mAdaptivityGrid(GridBase::ConstPtr())
, mAdaptivityMaskTree(TreeBase::ConstPtr())
, mRefSignTree(TreeBase::Ptr())
, mRefIdxTree(TreeBase::Ptr())
, mInvertSurfaceMask(false)
, mRelaxDisorientedTriangles(relaxDisorientedTriangles)
, mQuantizedSeamPoints(nullptr)
, mPointFlags(0)
{
}
inline void
VolumeToMesh::setRefGrid(const GridBase::ConstPtr& grid, double secAdaptivity)
{
mRefGrid = grid;
mSecAdaptivity = secAdaptivity;
// Clear out old auxiliary data
mRefSignTree = TreeBase::Ptr();
mRefIdxTree = TreeBase::Ptr();
mSeamPointListSize = 0;
mQuantizedSeamPoints.reset(nullptr);
}
inline void
VolumeToMesh::setSurfaceMask(const GridBase::ConstPtr& mask, bool invertMask)
{
mSurfaceMaskGrid = mask;
mInvertSurfaceMask = invertMask;
}
inline void
VolumeToMesh::setSpatialAdaptivity(const GridBase::ConstPtr& grid)
{
mAdaptivityGrid = grid;
}
inline void
VolumeToMesh::setAdaptivityMask(const TreeBase::ConstPtr& tree)
{
mAdaptivityMaskTree = tree;
}
template<typename InputGridType>
inline void
VolumeToMesh::operator()(const InputGridType& inputGrid)
{
// input data types
using InputTreeType = typename InputGridType::TreeType;
using InputLeafNodeType = typename InputTreeType::LeafNodeType;
using InputValueType = typename InputLeafNodeType::ValueType;
// auxiliary data types
using FloatTreeType = typename InputTreeType::template ValueConverter<float>::Type;
using FloatGridType = Grid<FloatTreeType>;
using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type;
using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type;
using Int16LeafNodeType = typename Int16TreeType::LeafNodeType;
using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type;
using Index32LeafNodeType = typename Index32TreeType::LeafNodeType;
// clear old data
mPointListSize = 0;
mPoints.reset();
mPolygonPoolListSize = 0;
mPolygons.reset();
mPointFlags.clear();
// settings
const math::Transform& transform = inputGrid.transform();
const InputValueType isovalue = InputValueType(mIsovalue);
const float adaptivityThreshold = float(mPrimAdaptivity);
const bool adaptive = mPrimAdaptivity > 1e-7 || mSecAdaptivity > 1e-7;
// The default surface orientation is setup for level set and bool/mask grids.
// Boolean grids are handled correctly by their value type. Signed distance fields,
// unsigned distance fields and fog volumes have the same value type but use different
// inside value classifications.
const bool invertSurfaceOrientation = (!volume_to_mesh_internal::isBoolValue<InputValueType>()
&& (inputGrid.getGridClass() != openvdb::GRID_LEVEL_SET));
// references, masks and auxiliary data
const InputTreeType& inputTree = inputGrid.tree();
BoolTreeType intersectionTree(false), adaptivityMask(false);
if (mAdaptivityMaskTree && mAdaptivityMaskTree->type() == BoolTreeType::treeType()) {
const BoolTreeType *refAdaptivityMask=
static_cast<const BoolTreeType*>(mAdaptivityMaskTree.get());
adaptivityMask.topologyUnion(*refAdaptivityMask);
}
Int16TreeType signFlagsTree(0);
Index32TreeType pointIndexTree(std::numeric_limits<Index32>::max());
// collect auxiliary data
volume_to_mesh_internal::identifySurfaceIntersectingVoxels(
intersectionTree, inputTree, isovalue);
volume_to_mesh_internal::applySurfaceMask(intersectionTree, adaptivityMask,
inputGrid, mSurfaceMaskGrid, mInvertSurfaceMask, isovalue);
if (intersectionTree.empty()) return;
volume_to_mesh_internal::computeAuxiliaryData(
signFlagsTree, pointIndexTree, intersectionTree, inputTree, isovalue);
intersectionTree.clear();
std::vector<Index32LeafNodeType*> pointIndexLeafNodes;
pointIndexTree.getNodes(pointIndexLeafNodes);
std::vector<Int16LeafNodeType*> signFlagsLeafNodes;
signFlagsTree.getNodes(signFlagsLeafNodes);
const tbb::blocked_range<size_t> auxiliaryLeafNodeRange(0, signFlagsLeafNodes.size());
// optionally collect auxiliary data from a reference volume.
Int16TreeType* refSignFlagsTree = nullptr;
Index32TreeType* refPointIndexTree = nullptr;
InputTreeType const* refInputTree = nullptr;
if (mRefGrid && mRefGrid->type() == InputGridType::gridType()) {
const InputGridType* refGrid = static_cast<const InputGridType*>(mRefGrid.get());
refInputTree = &refGrid->tree();
if (!mRefSignTree && !mRefIdxTree) {
// first time, collect and cache auxiliary data.
typename Int16TreeType::Ptr refSignFlagsTreePt(new Int16TreeType(0));
typename Index32TreeType::Ptr refPointIndexTreePt(
new Index32TreeType(std::numeric_limits<Index32>::max()));
BoolTreeType refIntersectionTree(false);
volume_to_mesh_internal::identifySurfaceIntersectingVoxels(
refIntersectionTree, *refInputTree, isovalue);
volume_to_mesh_internal::computeAuxiliaryData(*refSignFlagsTreePt,
*refPointIndexTreePt, refIntersectionTree, *refInputTree, isovalue);
mRefSignTree = refSignFlagsTreePt;
mRefIdxTree = refPointIndexTreePt;
}
if (mRefSignTree && mRefIdxTree) {
// get cached auxiliary data
refSignFlagsTree = static_cast<Int16TreeType*>(mRefSignTree.get());
refPointIndexTree = static_cast<Index32TreeType*>(mRefIdxTree.get());
}
if (refSignFlagsTree && refPointIndexTree) {
// generate seam line sample points
volume_to_mesh_internal::markSeamLineData(signFlagsTree, *refSignFlagsTree);
if (mSeamPointListSize == 0) {
// count unique points on reference surface
std::vector<Int16LeafNodeType*> refSignFlagsLeafNodes;
refSignFlagsTree->getNodes(refSignFlagsLeafNodes);
std::unique_ptr<Index32[]> leafNodeOffsets(
new Index32[refSignFlagsLeafNodes.size()]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, refSignFlagsLeafNodes.size()),
volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM>(
refSignFlagsLeafNodes, leafNodeOffsets));
{
Index32 count = 0;
for (size_t n = 0, N = refSignFlagsLeafNodes.size(); n < N; ++n) {
const Index32 tmp = leafNodeOffsets[n];
leafNodeOffsets[n] = count;
count += tmp;
}
mSeamPointListSize = size_t(count);
}
if (mSeamPointListSize != 0) {
mQuantizedSeamPoints.reset(new uint32_t[mSeamPointListSize]);
memset(mQuantizedSeamPoints.get(), 0, sizeof(uint32_t) * mSeamPointListSize);
std::vector<Index32LeafNodeType*> refPointIndexLeafNodes;
refPointIndexTree->getNodes(refPointIndexLeafNodes);
tbb::parallel_for(tbb::blocked_range<size_t>(0, refPointIndexLeafNodes.size()),
volume_to_mesh_internal::MapPoints<Index32LeafNodeType>(
refPointIndexLeafNodes, refSignFlagsLeafNodes, leafNodeOffsets));
}
}
if (mSeamPointListSize != 0) {
tbb::parallel_for(auxiliaryLeafNodeRange,
volume_to_mesh_internal::SeamLineWeights<InputTreeType>(
signFlagsLeafNodes, inputTree, *refPointIndexTree, *refSignFlagsTree,
mQuantizedSeamPoints.get(), isovalue));
}
}
}
const bool referenceMeshing = refSignFlagsTree && refPointIndexTree && refInputTree;
// adapt and count unique points
std::unique_ptr<Index32[]> leafNodeOffsets(new Index32[signFlagsLeafNodes.size()]);
if (adaptive) {
volume_to_mesh_internal::MergeVoxelRegions<InputGridType> mergeOp(
inputGrid, pointIndexTree, pointIndexLeafNodes, signFlagsLeafNodes,
isovalue, adaptivityThreshold, invertSurfaceOrientation);
if (mAdaptivityGrid && mAdaptivityGrid->type() == FloatGridType::gridType()) {
const FloatGridType* adaptivityGrid =
static_cast<const FloatGridType*>(mAdaptivityGrid.get());
mergeOp.setSpatialAdaptivity(*adaptivityGrid);
}
if (!adaptivityMask.empty()) {
mergeOp.setAdaptivityMask(adaptivityMask);
}
if (referenceMeshing) {
mergeOp.setRefSignFlagsData(*refSignFlagsTree, float(mSecAdaptivity));
}
tbb::parallel_for(auxiliaryLeafNodeRange, mergeOp);
volume_to_mesh_internal::AdaptiveLeafNodePointCount<Index32LeafNodeType>
op(pointIndexLeafNodes, signFlagsLeafNodes, leafNodeOffsets);
tbb::parallel_for(auxiliaryLeafNodeRange, op);
} else {
volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM>
op(signFlagsLeafNodes, leafNodeOffsets);
tbb::parallel_for(auxiliaryLeafNodeRange, op);
}
{
Index32 pointCount = 0;
for (size_t n = 0, N = signFlagsLeafNodes.size(); n < N; ++n) {
const Index32 tmp = leafNodeOffsets[n];
leafNodeOffsets[n] = pointCount;
pointCount += tmp;
}
mPointListSize = size_t(pointCount);
mPoints.reset(new openvdb::Vec3s[mPointListSize]);
mPointFlags.clear();
}
// compute points
{
volume_to_mesh_internal::ComputePoints<InputTreeType>
op(mPoints.get(), inputTree, pointIndexLeafNodes,
signFlagsLeafNodes, leafNodeOffsets, transform, mIsovalue);
if (referenceMeshing) {
mPointFlags.resize(mPointListSize);
op.setRefData(*refInputTree, *refPointIndexTree, *refSignFlagsTree,
mQuantizedSeamPoints.get(), &mPointFlags.front());
}
tbb::parallel_for(auxiliaryLeafNodeRange, op);
}
// compute polygons
mPolygonPoolListSize = signFlagsLeafNodes.size();
mPolygons.reset(new PolygonPool[mPolygonPoolListSize]);
if (adaptive) {
using PrimBuilder = volume_to_mesh_internal::AdaptivePrimBuilder;
volume_to_mesh_internal::ComputePolygons<Int16TreeType, PrimBuilder>
op(signFlagsLeafNodes, signFlagsTree, pointIndexTree,
mPolygons, invertSurfaceOrientation);
if (referenceMeshing) {
op.setRefSignTree(refSignFlagsTree);
}
tbb::parallel_for(auxiliaryLeafNodeRange, op);
} else {
using PrimBuilder = volume_to_mesh_internal::UniformPrimBuilder;
volume_to_mesh_internal::ComputePolygons<Int16TreeType, PrimBuilder>
op(signFlagsLeafNodes, signFlagsTree, pointIndexTree,
mPolygons, invertSurfaceOrientation);
if (referenceMeshing) {
op.setRefSignTree(refSignFlagsTree);
}
tbb::parallel_for(auxiliaryLeafNodeRange, op);
}
signFlagsTree.clear();
pointIndexTree.clear();
if (adaptive && mRelaxDisorientedTriangles) {
volume_to_mesh_internal::relaxDisorientedTriangles(invertSurfaceOrientation,
inputTree, transform, mPolygons, mPolygonPoolListSize, mPoints, mPointListSize);
}
if (referenceMeshing) {
volume_to_mesh_internal::subdivideNonplanarSeamLineQuads(
mPolygons, mPolygonPoolListSize, mPoints, mPointListSize, mPointFlags);
volume_to_mesh_internal::reviseSeamLineFlags(mPolygons, mPolygonPoolListSize, mPointFlags);
}
}
////////////////////////////////////////
//{
/// @cond OPENVDB_VOLUME_TO_MESH_INTERNAL
/// @internal This overload is enabled only for grids with a scalar ValueType.
template<typename GridType>
inline typename std::enable_if<std::is_scalar<typename GridType::ValueType>::value, void>::type
doVolumeToMesh(
const GridType& grid,
std::vector<Vec3s>& points,
std::vector<Vec3I>& triangles,
std::vector<Vec4I>& quads,
double isovalue,
double adaptivity,
bool relaxDisorientedTriangles)
{
VolumeToMesh mesher(isovalue, adaptivity, relaxDisorientedTriangles);
mesher(grid);
// Preallocate the point list
points.clear();
points.resize(mesher.pointListSize());
{ // Copy points
volume_to_mesh_internal::PointListCopy ptnCpy(mesher.pointList(), points);
tbb::parallel_for(tbb::blocked_range<size_t>(0, points.size()), ptnCpy);
mesher.pointList().reset(nullptr);
}
PolygonPoolList& polygonPoolList = mesher.polygonPoolList();
{ // Preallocate primitive lists
size_t numQuads = 0, numTriangles = 0;
for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) {
openvdb::tools::PolygonPool& polygons = polygonPoolList[n];
numTriangles += polygons.numTriangles();
numQuads += polygons.numQuads();
}
triangles.clear();
triangles.resize(numTriangles);
quads.clear();
quads.resize(numQuads);
}
// Copy primitives
size_t qIdx = 0, tIdx = 0;
for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) {
openvdb::tools::PolygonPool& polygons = polygonPoolList[n];
for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) {
quads[qIdx++] = polygons.quad(i);
}
for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) {
triangles[tIdx++] = polygons.triangle(i);
}
}
}
/// @internal This overload is enabled only for grids that do not have a scalar ValueType.
template<typename GridType>
inline typename std::enable_if<!std::is_scalar<typename GridType::ValueType>::value, void>::type
doVolumeToMesh(
const GridType&,
std::vector<Vec3s>&,
std::vector<Vec3I>&,
std::vector<Vec4I>&,
double,
double,
bool)
{
OPENVDB_THROW(TypeError, "volume to mesh conversion is supported only for scalar grids");
}
/// @endcond
//}
template<typename GridType>
inline void
volumeToMesh(
const GridType& grid,
std::vector<Vec3s>& points,
std::vector<Vec3I>& triangles,
std::vector<Vec4I>& quads,
double isovalue,
double adaptivity,
bool relaxDisorientedTriangles)
{
doVolumeToMesh(grid, points, triangles, quads, isovalue, adaptivity, relaxDisorientedTriangles);
}
template<typename GridType>
inline void
volumeToMesh(
const GridType& grid,
std::vector<Vec3s>& points,
std::vector<Vec4I>& quads,
double isovalue)
{
std::vector<Vec3I> triangles;
doVolumeToMesh(grid, points, triangles, quads, isovalue, 0.0, true);
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
| 179,054 | C | 33.021471 | 109 | 0.585237 |
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetFilter.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @author Ken Museth
///
/// @file tools/LevelSetFilter.h
///
/// @brief Performs various types of level set deformations with
/// interface tracking. These unrestricted deformations include
/// surface smoothing (e.g., Laplacian flow), filtering (e.g., mean
/// value) and morphological operations (e.g., morphological opening).
/// All these operations can optionally be masked with another grid that
/// acts as an alpha-mask.
#ifndef OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED
#include "LevelSetTracker.h"
#include "Interpolation.h"
#include <algorithm> // for std::max()
#include <functional>
#include <type_traits>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Filtering (e.g. diffusion) of narrow-band level sets. An
/// optional scalar field can be used to produce a (smooth) alpha mask
/// for the filtering.
///
/// @note This class performs proper interface tracking which allows
/// for unrestricted surface deformations
template<typename GridT,
typename MaskT = typename GridT::template ValueConverter<float>::Type,
typename InterruptT = util::NullInterrupter>
class LevelSetFilter : public LevelSetTracker<GridT, InterruptT>
{
public:
using BaseType = LevelSetTracker<GridT, InterruptT>;
using GridType = GridT;
using MaskType = MaskT;
using TreeType = typename GridType::TreeType;
using ValueType = typename TreeType::ValueType;
using AlphaType = typename MaskType::ValueType;
static_assert(std::is_floating_point<AlphaType>::value,
"LevelSetFilter requires a mask grid with floating-point values");
/// @brief Main constructor from a grid
/// @param grid The level set to be filtered.
/// @param interrupt Optional interrupter.
LevelSetFilter(GridType& grid, InterruptT* interrupt = nullptr)
: BaseType(grid, interrupt)
, mMinMask(0)
, mMaxMask(1)
, mInvertMask(false)
{
}
/// @brief Default destructor
~LevelSetFilter() override {}
/// @brief Return the minimum value of the mask to be used for the
/// derivation of a smooth alpha value.
AlphaType minMask() const { return mMinMask; }
/// @brief Return the maximum value of the mask to be used for the
/// derivation of a smooth alpha value.
AlphaType maxMask() const { return mMaxMask; }
/// @brief Define the range for the (optional) scalar mask.
/// @param min Minimum value of the range.
/// @param max Maximum value of the range.
/// @details Mask values outside the range maps to alpha values of
/// respectfully zero and one, and values inside the range maps
/// smoothly to 0->1 (unless of course the mask is inverted).
/// @throw ValueError if @a min is not smaller than @a max.
void setMaskRange(AlphaType min, AlphaType max)
{
if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)");
mMinMask = min;
mMaxMask = max;
}
/// @brief Return true if the mask is inverted, i.e. min->max in the
/// original mask maps to 1->0 in the inverted alpha mask.
bool isMaskInverted() const { return mInvertMask; }
/// @brief Invert the optional mask, i.e. min->max in the original
/// mask maps to 1->0 in the inverted alpha mask.
void invertMask(bool invert=true) { mInvertMask = invert; }
/// @brief One iteration of mean-curvature flow of the level set.
/// @param mask Optional alpha mask.
void meanCurvature(const MaskType* mask = nullptr)
{
Filter f(this, mask); f.meanCurvature();
}
/// @brief One iteration of Laplacian flow of the level set.
/// @param mask Optional alpha mask.
void laplacian(const MaskType* mask = nullptr)
{
Filter f(this, mask); f.laplacian();
}
/// @brief One iteration of a fast separable Gaussian filter.
/// @param width Width of the Gaussian kernel in voxel units.
/// @param mask Optional alpha mask.
///
/// @note This is approximated as 4 iterations of a separable mean filter
/// which typically leads an approximation that's better than 95%!
void gaussian(int width = 1, const MaskType* mask = nullptr)
{
Filter f(this, mask); f.gaussian(width);
}
/// @brief Offset the level set by the specified (world) distance.
/// @param offset Value of the offset.
/// @param mask Optional alpha mask.
void offset(ValueType offset, const MaskType* mask = nullptr)
{
Filter f(this, mask); f.offset(offset);
}
/// @brief One iteration of median-value flow of the level set.
/// @param width Width of the median-value kernel in voxel units.
/// @param mask Optional alpha mask.
///
/// @warning This filter is not separable and is hence relatively
/// slow!
void median(int width = 1, const MaskType* mask = nullptr)
{
Filter f(this, mask); f.median(width);
}
/// @brief One iteration of mean-value flow of the level set.
/// @param width Width of the mean-value kernel in voxel units.
/// @param mask Optional alpha mask.
///
/// @note This filter is separable so it's fast!
void mean(int width = 1, const MaskType* mask = nullptr)
{
Filter f(this, mask); f.mean(width);
}
private:
// disallow copy construction and copy by assignment!
LevelSetFilter(const LevelSetFilter&);// not implemented
LevelSetFilter& operator=(const LevelSetFilter&);// not implemented
// Private struct that implements all the filtering.
struct Filter
{
using LeafT = typename TreeType::LeafNodeType;
using VoxelIterT = typename LeafT::ValueOnIter;
using VoxelCIterT = typename LeafT::ValueOnCIter;
using BufferT = typename tree::LeafManager<TreeType>::BufferType;
using LeafRange = typename tree::LeafManager<TreeType>::LeafRange;
using LeafIterT = typename LeafRange::Iterator;
using AlphaMaskT = tools::AlphaMask<GridT, MaskT>;
Filter(LevelSetFilter* parent, const MaskType* mask) : mParent(parent), mMask(mask) {}
Filter(const Filter&) = default;
virtual ~Filter() {}
void box(int width);
void median(int width);
void mean(int width);
void gaussian(int width);
void laplacian();
void meanCurvature();
void offset(ValueType value);
void operator()(const LeafRange& r) const
{
if (mTask) mTask(const_cast<Filter*>(this), r);
else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly");
}
void cook(bool swap)
{
const int n = mParent->getGrainSize();
if (n>0) {
tbb::parallel_for(mParent->leafs().leafRange(n), *this);
} else {
(*this)(mParent->leafs().leafRange());
}
if (swap) mParent->leafs().swapLeafBuffer(1, n==0);
}
template <size_t Axis>
struct Avg {
Avg(const GridT& grid, Int32 w) :
acc(grid.tree()), width(w), frac(1/ValueType(2*w+1)) {}
inline ValueType operator()(Coord xyz)
{
ValueType sum = zeroVal<ValueType>();
Int32& i = xyz[Axis], j = i + width;
for (i -= width; i <= j; ++i) sum += acc.getValue(xyz);
return sum*frac;
}
typename GridT::ConstAccessor acc;
const Int32 width;
const ValueType frac;
};
template<typename AvgT>
void boxImpl(const LeafRange& r, Int32 w);
void boxXImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<0> >(r,w); }
void boxZImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<1> >(r,w); }
void boxYImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<2> >(r,w); }
void medianImpl(const LeafRange&, int);
void meanCurvatureImpl(const LeafRange&);
void laplacianImpl(const LeafRange&);
void offsetImpl(const LeafRange&, ValueType);
LevelSetFilter* mParent;
const MaskType* mMask;
typename std::function<void (Filter*, const LeafRange&)> mTask;
}; // end of private Filter struct
AlphaType mMinMask, mMaxMask;
bool mInvertMask;
}; // end of LevelSetFilter class
////////////////////////////////////////
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::median(int width)
{
mParent->startInterrupter("Median-value flow of level set");
mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0);
mTask = std::bind(&Filter::medianImpl,
std::placeholders::_1, std::placeholders::_2, std::max(1, width));
this->cook(true);
mParent->track();
mParent->endInterrupter();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::mean(int width)
{
mParent->startInterrupter("Mean-value flow of level set");
this->box(width);
mParent->endInterrupter();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::gaussian(int width)
{
mParent->startInterrupter("Gaussian flow of level set");
for (int n=0; n<4; ++n) this->box(width);
mParent->endInterrupter();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::box(int width)
{
mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0);
width = std::max(1, width);
mTask = std::bind(&Filter::boxXImpl, std::placeholders::_1, std::placeholders::_2, width);
this->cook(true);
mTask = std::bind(&Filter::boxYImpl, std::placeholders::_1, std::placeholders::_2, width);
this->cook(true);
mTask = std::bind(&Filter::boxZImpl, std::placeholders::_1, std::placeholders::_2, width);
this->cook(true);
mParent->track();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::meanCurvature()
{
mParent->startInterrupter("Mean-curvature flow of level set");
mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0);
mTask = std::bind(&Filter::meanCurvatureImpl, std::placeholders::_1, std::placeholders::_2);
this->cook(true);
mParent->track();
mParent->endInterrupter();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::laplacian()
{
mParent->startInterrupter("Laplacian flow of level set");
mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0);
mTask = std::bind(&Filter::laplacianImpl, std::placeholders::_1, std::placeholders::_2);
this->cook(true);
mParent->track();
mParent->endInterrupter();
}
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::offset(ValueType value)
{
mParent->startInterrupter("Offsetting level set");
mParent->leafs().removeAuxBuffers();// no auxiliary buffers required
const ValueType CFL = ValueType(0.5) * mParent->voxelSize(), offset = openvdb::math::Abs(value);
ValueType dist = 0.0;
while (offset-dist > ValueType(0.001)*CFL && mParent->checkInterrupter()) {
const ValueType delta = openvdb::math::Min(offset-dist, CFL);
dist += delta;
mTask = std::bind(&Filter::offsetImpl,
std::placeholders::_1, std::placeholders::_2, copysign(delta, value));
this->cook(false);
mParent->track();
}
mParent->endInterrupter();
}
///////////////////////// PRIVATE METHODS //////////////////////
/// Performs parabolic mean-curvature diffusion
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::meanCurvatureImpl(const LeafRange& range)
{
mParent->checkInterrupter();
//const float CFL = 0.9f, dt = CFL * mDx * mDx / 6.0f;
const ValueType dx = mParent->voxelSize(), dt = math::Pow2(dx) / ValueType(3.0);
math::CurvatureStencil<GridType> stencil(mParent->grid(), dx);
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(),
mParent->maxMask(), mParent->isMaskInverted());
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) {
stencil.moveTo(iter);
const ValueType phi0 = *iter, phi1 = phi0 + dt*stencil.meanCurvatureNormGrad();
buffer[iter.pos()] = b * phi0 + a * phi1;
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
stencil.moveTo(iter);
buffer[iter.pos()] = *iter + dt*stencil.meanCurvatureNormGrad();
}
}
}
}
/// Performs Laplacian diffusion. Note if the grids contains a true
/// signed distance field (e.g. a solution to the Eikonal equation)
/// Laplacian diffusions (e.g. geometric heat equation) is actually
/// identical to mean curvature diffusion, yet less computationally
/// expensive! In other words if you're performing renormalization
/// anyway (e.g. rebuilding the narrow-band) you should consider
/// performing Laplacian diffusion over mean curvature flow!
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::laplacianImpl(const LeafRange& range)
{
mParent->checkInterrupter();
//const float CFL = 0.9f, half_dt = CFL * mDx * mDx / 12.0f;
const ValueType dx = mParent->voxelSize(), dt = math::Pow2(dx) / ValueType(6.0);
math::GradStencil<GridType> stencil(mParent->grid(), dx);
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(),
mParent->maxMask(), mParent->isMaskInverted());
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) {
stencil.moveTo(iter);
const ValueType phi0 = *iter, phi1 = phi0 + dt*stencil.laplacian();
buffer[iter.pos()] = b * phi0 + a * phi1;
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
stencil.moveTo(iter);
buffer[iter.pos()] = *iter + dt*stencil.laplacian();
}
}
}
}
/// Offsets the values by a constant
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::offsetImpl(
const LeafRange& range, ValueType offset)
{
mParent->checkInterrupter();
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(),
mParent->maxMask(), mParent->isMaskInverted());
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) iter.setValue(*iter + a*offset);
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) {
iter.setValue(*iter + offset);
}
}
}
}
/// Performs simple but slow median-value diffusion
template<typename GridT, typename MaskT, typename InterruptT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::medianImpl(const LeafRange& range, int width)
{
mParent->checkInterrupter();
typename math::DenseStencil<GridType> stencil(mParent->grid(), width);//creates local cache!
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(),
mParent->maxMask(), mParent->isMaskInverted());
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
if (alpha(iter.getCoord(), a, b)) {
stencil.moveTo(iter);
buffer[iter.pos()] = b * (*iter) + a * stencil.median();
}
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
stencil.moveTo(iter);
buffer[iter.pos()] = stencil.median();
}
}
}
}
/// One dimensional convolution of a separable box filter
template<typename GridT, typename MaskT, typename InterruptT>
template <typename AvgT>
inline void
LevelSetFilter<GridT, MaskT, InterruptT>::Filter::boxImpl(const LeafRange& range, Int32 w)
{
mParent->checkInterrupter();
AvgT avg(mParent->grid(), w);
if (mMask) {
typename AlphaMaskT::FloatType a, b;
AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(),
mParent->maxMask(), mParent->isMaskInverted());
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
const Coord xyz = iter.getCoord();
if (alpha(xyz, a, b)) buffer[iter.pos()] = b * (*iter)+ a * avg(xyz);
}
}
} else {
for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) {
ValueType* buffer = leafIter.buffer(1).data();
for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) {
buffer[iter.pos()] = avg(iter.getCoord());
}
}
}
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED
| 19,133 | C | 36.444227 | 100 | 0.629122 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.