file_path
stringlengths
21
224
content
stringlengths
0
80.8M
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPatternGeneratorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGPATTERNGENERATORIMPL_H #define NVBLASTEXTAUTHORINGPATTERNGENERATORIMPL_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastExtAuthoringPatternGenerator.h" namespace Nv { namespace Blast { class PatternGeneratorImpl : public PatternGenerator { public: virtual DamagePattern* generateUniformPattern(const UniformPatternDesc* desc) override; virtual DamagePattern* generateBeamPattern(const BeamPatternDesc* desc) override; virtual DamagePattern* generateRegularRadialPattern(const RegularRadialPatternDesc* desc) override; virtual void release() override; virtual DamagePattern* generateVoronoiPattern(uint32_t pointCount, const NvcVec3* points, int32_t interiorMaterialId) override; private: DamagePattern* generateVoronoiPatternInternal(uint32_t pointCount, const NvcVec3* points, int32_t interiorMaterialId, float angle = 0.0f); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringBooleanTool.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringAcceleratorImpl.h" #include <NvBlastNvSharedHelpers.h> #include <math.h> #include <set> #include <algorithm> using nvidia::NvBounds3; namespace Nv { namespace Blast { /* Linear interpolation of vectors */ NV_FORCE_INLINE void vec3Lerp(const NvcVec3& a, const NvcVec3& b, NvcVec3& out, float t) { out.x = (b.x - a.x) * t + a.x; out.y = (b.y - a.y) * t + a.y; out.z = (b.z - a.z) * t + a.z; } NV_FORCE_INLINE void vec2Lerp(const NvcVec2& a, const NvcVec2& b, NvcVec2& out, float t) { out.x = (b.x - a.x) * t + a.x; out.y = (b.y - a.y) * t + a.y; } NV_FORCE_INLINE int32_t BooleanEvaluator::addIfNotExist(const Vertex& p) { mVerticesAggregate.push_back(p); return static_cast<int32_t>(mVerticesAggregate.size()) - 1; } NV_FORCE_INLINE void BooleanEvaluator::addEdgeIfValid(const EdgeWithParent& ed) { mEdgeAggregate.push_back(ed); } /** Vertex level shadowing functions */ NV_FORCE_INLINE int32_t vertexShadowing(const NvcVec3& a, const NvcVec3& b) { return (b.x >= a.x) ? 1 : 0; } /** Vertex-edge status functions */ NV_FORCE_INLINE int32_t veStatus01(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p) { return vertexShadowing(p, eEdge) - vertexShadowing(p, sEdge); } NV_FORCE_INLINE int32_t veStatus10(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p) { return -vertexShadowing(eEdge, p) + vertexShadowing(sEdge, p); } bool shouldSwap(const NvcVec3& a, const NvcVec3& b) { if (a.x < b.x) return false; if (a.x > b.x) return true; if (a.y < b.y) return false; if (a.y > b.y) return true; if (a.z < b.z) return false; if (a.z > b.z) return true; return false; } /** Vertex-edge shadowing functions */ int32_t shadowing01(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge) { int32_t winding = veStatus01(sEdge.p, eEdge.p, p); if (sEdge.p.x > eEdge.p.x) { std::swap(sEdge, eEdge); } if (winding != 0) { float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x); if (t >= 1) { onEdgePoint = eEdge; } else if (t <= 0) { onEdgePoint = sEdge; } else { vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t); vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t); vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t); } hasOnEdge = true; if (onEdgePoint.p.y >= p.y) { return winding; } } else { hasOnEdge = false; } return 0; } int32_t shadowing10(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge) { int32_t winding = veStatus10(sEdge.p, eEdge.p, p); if (sEdge.p.x > eEdge.p.x) { std::swap(sEdge, eEdge); } if (winding != 0) { float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x); if (t >= 1) { onEdgePoint = eEdge; } else if (t <= 0) { onEdgePoint = sEdge; } else { vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t); vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t); vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t); } hasOnEdge = true; if (onEdgePoint.p.y < p.y) { return winding; } } else { hasOnEdge = false; } return 0; } int32_t shadowing01(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p) { int32_t winding = veStatus01(sEdge, eEdge, p); if (winding != 0) { if (sEdge.x > eEdge.x) { std::swap(sEdge, eEdge); } float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x)); NvcVec3 onEdgePoint; if (t >= 1) onEdgePoint = eEdge; else if (t <= 0) onEdgePoint = sEdge; else vec3Lerp(sEdge, eEdge, onEdgePoint, t); if (onEdgePoint.y >= p.y) { return winding; } } return 0; } int32_t shadowing10(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p) { int32_t winding = veStatus10(sEdge, eEdge, p); if (winding != 0) { if (sEdge.x > eEdge.x) { std::swap(sEdge, eEdge); } float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x)); NvcVec3 onEdgePoint; if (t >= 1) onEdgePoint = eEdge; else if (t <= 0) onEdgePoint = sEdge; else vec3Lerp(sEdge, eEdge, onEdgePoint, t); if (onEdgePoint.y < p.y) { return winding; } } return 0; } /** Vertex-facet shadowing functions */ int32_t vfStatus02(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out) { int32_t val = 0; Vertex pnt; bool hasOnEdge = false; out[0].p.y = -MAXIMUM_EXTENT; out[1].p.y = MAXIMUM_EXTENT; for (int32_t i = 0; i < edgesCount; ++i) { val -= shadowing01(points[edges->s], points[edges->e], p, pnt, hasOnEdge); if (hasOnEdge != 0) { if (p.y > pnt.p.y && pnt.p.y > out[0].p.y) { out[0] = pnt; } if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y) { out[1] = pnt; } } ++edges; } return val; } int32_t shadowing02(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint) { Vertex outp[2]; int32_t stat = vfStatus02(p, points, edges, edgesCount, outp); float z = 0; hasOnFacetPoint = false; if (stat != 0) { Vertex& p1 = outp[0]; Vertex& p2 = outp[1]; NvcVec3 vc = p2.p - p1.p; float t = 0; t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y; t = nvidia::NvClamp(t, 0.0f, 1.0f); z = t * vc.z + p1.p.z; hasOnFacetPoint = true; onFacetPoint.p.x = p.x; onFacetPoint.p.y = p.y; onFacetPoint.p.z = z; vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t); vec3Lerp(p1.n, p2.n, onFacetPoint.n, t); if (z >= p.z) { return stat; } } return 0; } int32_t vfStatus20(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out) { int32_t val = 0; Vertex pnt; bool hasOnEdge = false; out[0].p.y = -MAXIMUM_EXTENT; out[1].p.y = MAXIMUM_EXTENT; for (int32_t i = 0; i < edgesCount; ++i) { val += shadowing10(points[edges->s], points[edges->e], p, pnt, hasOnEdge); if (hasOnEdge != 0) { if (p.y > pnt.p.y && pnt.p.y > out[0].p.y) { out[0] = pnt; } if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y) { out[1] = pnt; } } ++edges; } return val; } int32_t shadowing20(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint) { Vertex outp[2]; int32_t stat = vfStatus20(p, points, edges, edgesCount, outp); hasOnFacetPoint = false; if (stat != 0) { Vertex& p1 = outp[0]; Vertex& p2 = outp[1]; NvcVec3 vc = p2.p - p1.p; float t = 0; t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y; t = nvidia::NvClamp(t, 0.0f, 1.0f); hasOnFacetPoint = true; onFacetPoint.p.x = p.x; onFacetPoint.p.y = p.y; onFacetPoint.p.z = t * vc.z + p1.p.z; vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t); vec3Lerp(p1.n, p2.n, onFacetPoint.n, t); if (onFacetPoint.p.z < p.z) { return stat; } } return 0; } NV_FORCE_INLINE int32_t edgesCrossCheck(const NvcVec3& eAs, const NvcVec3& eAe, const NvcVec3& eBs, const NvcVec3& eBe) { return shadowing01(eBs, eBe, eAe) - shadowing01(eBs, eBe, eAs) + shadowing10(eAs, eAe, eBe) - shadowing10(eAs, eAe, eBs); } int32_t edgesIntersection(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints) { int32_t status = edgesCrossCheck(eAs.p, eAe.p, eBs.p, eBe.p); hasPoints = false; if (status == 0) { return 0; } Vertex tempPoint; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasOnEdge = false; bool aShadowing = false; bool bShadowing = false; /** Search for two pairs where parts of A shadows B, and where B shadows A. Needed for search intersection point. */ for (auto p : { &eBs, &eBe }) { int32_t shadowingType = shadowing10(eAs, eAe, p->p, tempPoint, hasOnEdge); if (shadowingType == 0 && !aShadowing && hasOnEdge) { aShadowing = true; aShadowingPair[0] = *p; aShadowingPair[1] = tempPoint; } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = *p; bShadowingPair[1] = tempPoint; } } } if (!aShadowing || !bShadowing) { for (auto p : { &eAs, &eAe }) { int32_t shadowingType = shadowing01(eBs, eBe, p->p, tempPoint, hasOnEdge); if (shadowingType == 0 && !aShadowing && hasOnEdge) { aShadowing = true; aShadowingPair[1] = *p; aShadowingPair[0] = tempPoint; } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[1] = *p; bShadowingPair[0] = tempPoint; } } } } float deltaPlus = bShadowingPair[0].p.y - bShadowingPair[1].p.y; float deltaMinus = aShadowingPair[0].p.y - aShadowingPair[1].p.y; float div = 0; if (deltaPlus > 0) div = deltaPlus / (deltaPlus - deltaMinus); else div = 0; intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.p.z = bShadowingPair[0].p.z - div * (bShadowingPair[0].p.z - aShadowingPair[0].p.z); intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; hasPoints = true; return status; } NV_FORCE_INLINE int32_t edgeEdgeShadowing(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints) { int32_t status = edgesIntersection(eAs, eAe, eBs, eBe, intersectionA, intersectionB, hasPoints); if (intersectionB.p.z >= intersectionA.p.z) { return status; } return 0; } int32_t edgeFacetIntersection12(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB) { int32_t status = 0; Vertex p1, p2; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasPoint = false; bool aShadowing = false; bool bShadowing = false; int32_t mlt = -1; int32_t shadowingType; for (auto p : { &edEnd, &edSt }) { shadowingType = shadowing02(p->p, points, edges, edgesCount, hasPoint, p1); status += mlt * shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p1; aShadowingPair[1] = *p; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p1; bShadowingPair[1] = *p; } mlt = 1; } for (int32_t ed = 0; ed < edgesCount; ++ed) { if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p)) { shadowingType = -edgeEdgeShadowing(edSt, edEnd, points[edges[ed].e], points[edges[ed].s], p1, p2, hasPoint); } else { shadowingType = edgeEdgeShadowing(edSt, edEnd, points[edges[ed].s], points[edges[ed].e], p1, p2, hasPoint); } status -= shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p2; aShadowingPair[1] = p1; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p2; bShadowingPair[1] = p1; } } if (!status || !bShadowing || !aShadowing) { return 0; } float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z; float div = 0; if (deltaPlus != 0) { float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z; div = deltaPlus / (deltaPlus - deltaMinus); } intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; return status; } int32_t edgeFacetIntersection21(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB) { int32_t status = 0; Vertex p1, p2; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasPoint = false; bool aShadowing = false; bool bShadowing = false; int32_t shadowingType; int32_t mlt = 1; for (auto p : { &edEnd, &edSt }) { shadowingType = shadowing20(p->p, points, edges, edgesCount, hasPoint, p1); status += mlt * shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = *p; aShadowingPair[1] = p1; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = *p; bShadowingPair[1] = p1; } mlt = -1; } for (int32_t ed = 0; ed < edgesCount; ++ed) { if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p)) { shadowingType = -edgeEdgeShadowing(points[edges[ed].e], points[edges[ed].s], edSt, edEnd, p1, p2, hasPoint); } else { shadowingType = edgeEdgeShadowing(points[edges[ed].s], points[edges[ed].e], edSt, edEnd, p1, p2, hasPoint); } status -= shadowingType; if (shadowingType == 0) { if (!aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p2; aShadowingPair[1] = p1; } } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p2; bShadowingPair[1] = p1; } } } if (!status || !bShadowing || !aShadowing) { return 0; } float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z; float div = 0; if (deltaPlus != 0) { float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z; div = deltaPlus / (deltaPlus - deltaMinus); } intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; return status; } int32_t BooleanEvaluator::vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh) { int32_t status = 0; Vertex pnt; bool hasPoint = false; mAcceleratorB->setState(p); int32_t facet = mAcceleratorB->getNextFacet(); while (facet != -1) { const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber; status += shadowing02(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoint, pnt); facet = mAcceleratorB->getNextFacet(); } return status; } int32_t BooleanEvaluator::vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh) { int32_t status = 0; bool hasPoints = false; Vertex point; mAcceleratorA->setState(p); int32_t facet = mAcceleratorA->getNextFacet(); while ( facet != -1) { const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber; status -= shadowing20(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoints, point); facet = mAcceleratorA->getNextFacet(); } return status; } NV_FORCE_INLINE int32_t inclusionValue03(const BooleanConf& conf, int32_t xValue) { return conf.ca + conf.ci * xValue; } NV_FORCE_INLINE int32_t inclusionValueEdgeFace(const BooleanConf& conf, int32_t xValue) { return conf.ci * xValue; } NV_FORCE_INLINE int32_t inclusionValue30(const BooleanConf& conf, int32_t xValue) { return conf.cb + conf.ci * xValue; } struct VertexComparator { VertexComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {}; NvcVec3 basePoint; bool operator()(const Vertex& a, const Vertex& b) { return ((b.p - a.p) | basePoint) > 0.0; } }; struct VertexPairComparator { VertexPairComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {}; NvcVec3 basePoint; bool operator()(const std::pair<Vertex, Vertex>& a, const std::pair<Vertex, Vertex>& b) { return ((b.first.p - a.first.p) | basePoint) > 0.0; } }; int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, const NvcVec3& point) { if (msh == nullptr) { return 0; } DummyAccelerator dmAccel(msh->getFacetCount()); mAcceleratorA = &dmAccel; return vertexMeshStatus30(point, msh); } int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, SpatialAccelerator* spAccel, const NvcVec3& point) { if (msh == nullptr) { return 0; } mAcceleratorA = spAccel; return vertexMeshStatus30(point, msh); } void BooleanEvaluator::buildFaceFaceIntersections(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; std::vector<std::pair<Vertex, Vertex> > retainedStarts; std::vector<std::pair<Vertex, Vertex>> retainedEnds; VertexPairComparator comp; Vertex newPointA; Vertex newPointB; const Vertex* meshAPoints = mMeshA->getVertices(); const Vertex* meshBPoints = mMeshB->getVertices(); EdgeWithParent newEdge; mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount()); mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount()); for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB) { mAcceleratorA->setState(meshBPoints, mMeshB->getEdges(), *mMeshB->getFacet(facetB)); int32_t facetA = mAcceleratorA->getNextFacet(); while (facetA != -1) { const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber; const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber; const Edge* fbe = facetBEdges; const Edge* fae = facetAEdges; retainedStarts.clear(); retainedEnds.clear(); NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount; uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount; int32_t ic = 0; for (uint32_t i = 0; i < facetAEdgeCount; ++i) { if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p)) { statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } inclusionValue = -inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } fae++; } for (uint32_t i = 0; i < facetBEdgeCount; ++i) { if (shouldSwap(meshBPoints[fbe->e].p, meshBPoints[fbe->s].p)) { statusValue = -edgeFacetIntersection21(meshBPoints[fbe->e], meshBPoints[fbe->s], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection21(meshBPoints[fbe->s], meshBPoints[fbe->e], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB); } inclusionValue = inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointB.p; } mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData( i, statusValue, newPointB)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointB.p; } mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData(i, statusValue, newPointB)); } fbe++; } if (retainedStarts.size() != retainedEnds.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv) { newEdge.s = addIfNotExist(retainedStarts[rv].first); newEdge.e = addIfNotExist(retainedEnds[rv].first); newEdge.parent = facetA; addEdgeIfValid(newEdge); newEdge.parent = facetB + mMeshA->getFacetCount(); newEdge.e = addIfNotExist(retainedStarts[rv].second); newEdge.s = addIfNotExist(retainedEnds[rv].second); addEdgeIfValid(newEdge); } facetA = mAcceleratorA->getNextFacet(); } // while (*iter != -1) } // for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB) } void BooleanEvaluator::buildFastFaceFaceIntersection(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; std::vector<std::pair<Vertex, Vertex> > retainedStarts; std::vector<std::pair<Vertex, Vertex>> retainedEnds; VertexPairComparator comp; Vertex newPointA; Vertex newPointB; const Vertex* meshAPoints = mMeshA->getVertices(); const Vertex* meshBPoints = mMeshB->getVertices(); EdgeWithParent newEdge; mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount()); mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount()); for (uint32_t facetA = 0; facetA < mMeshA->getFacetCount(); ++facetA) { const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber; int32_t facetB = 0; const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber; const Edge* fae = facetAEdges; retainedStarts.clear(); retainedEnds.clear(); NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount; uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount; int32_t ic = 0; for (uint32_t i = 0; i < facetAEdgeCount; ++i) { if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p)) { statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } inclusionValue = -inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } fae++; } if (retainedStarts.size() != retainedEnds.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedStarts.size() > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStarts.begin(), retainedStarts.end(), comp); std::sort(retainedEnds.begin(), retainedEnds.end(), comp); } for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv) { newEdge.s = addIfNotExist(retainedStarts[rv].first); newEdge.e = addIfNotExist(retainedEnds[rv].first); newEdge.parent = facetA; addEdgeIfValid(newEdge); newEdge.parent = facetB + mMeshA->getFacetCount(); newEdge.e = addIfNotExist(retainedStarts[rv].second); newEdge.s = addIfNotExist(retainedEnds[rv].second); addEdgeIfValid(newEdge); } } } void BooleanEvaluator::collectRetainedPartsFromA(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; const Vertex* vertices = mMeshA->getVertices(); VertexComparator comp; const NvBounds3& bMeshBoudning = toNvShared(mMeshB->getBoundingBox()); const Edge* facetEdges = mMeshA->getEdges(); std::vector<Vertex> retainedStartVertices; std::vector<Vertex> retainedEndVertices; retainedStartVertices.reserve(255); retainedEndVertices.reserve(255); int32_t ic = 0; for (uint32_t facetId = 0; facetId < mMeshA->getFacetCount(); ++facetId) { retainedStartVertices.clear(); retainedEndVertices.clear(); for (uint32_t i = 0; i < mMeshA->getFacet(facetId)->edgesCount; ++i) { NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size()); /* Test start and end point of edge against mesh */ if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p))) { statusValue = vertexMeshStatus03(vertices[facetEdges->s].p, mMeshB); } else { statusValue = 0; } inclusionValue = -inclusionValue03(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->s]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->s]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p; } } if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p))) { statusValue = vertexMeshStatus03(vertices[facetEdges->e].p, mMeshB); } else { statusValue = 0; } inclusionValue = inclusionValue03(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->e]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->e]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p; } } /* Test edge intersection with mesh*/ for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData12[facetId].size(); ++intrs) { const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData12[facetId][intrs]; if (intr.edId != (int32_t)i) continue; inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(intr.intersectionPoint); compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(intr.intersectionPoint); compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p; } } } facetEdges++; if (retainedStartVertices.size() != retainedEndVertices.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedEndVertices.size() - lastPos > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp); std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp); } } EdgeWithParent newEdge; for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv) { newEdge.s = addIfNotExist(retainedStartVertices[rv]); newEdge.e = addIfNotExist(retainedEndVertices[rv]); newEdge.parent = facetId; addEdgeIfValid(newEdge); } } return; } void BooleanEvaluator::collectRetainedPartsFromB(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; const Vertex* vertices = mMeshB->getVertices(); VertexComparator comp; const NvBounds3& aMeshBoudning = toNvShared(mMeshA->getBoundingBox()); const Edge* facetEdges = mMeshB->getEdges(); std::vector<Vertex> retainedStartVertices; std::vector<Vertex> retainedEndVertices; retainedStartVertices.reserve(255); retainedEndVertices.reserve(255); int32_t ic = 0; for (uint32_t facetId = 0; facetId < mMeshB->getFacetCount(); ++facetId) { retainedStartVertices.clear(); retainedEndVertices.clear(); for (uint32_t i = 0; i < mMeshB->getFacet(facetId)->edgesCount; ++i) { NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size()); if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p))) { statusValue = vertexMeshStatus30(vertices[facetEdges->s].p, mMeshA); } else { statusValue = 0; } inclusionValue = -inclusionValue30(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->s]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->s]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p; } } if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p))) { statusValue = vertexMeshStatus30(vertices[facetEdges->e].p, mMeshA); } else { statusValue = 0; } inclusionValue = inclusionValue30(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->e]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->e]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p; } } for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData21[facetId].size(); ++intrs) { const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData21[facetId][intrs]; if (intr.edId != (int32_t)i) continue; inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(intr.intersectionPoint); compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(intr.intersectionPoint); compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p; } } } facetEdges++; if (retainedStartVertices.size() != retainedEndVertices.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedEndVertices.size() - lastPos > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp); std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp); } } EdgeWithParent newEdge; for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv) { newEdge.s = addIfNotExist(retainedStartVertices[rv]); newEdge.e = addIfNotExist(retainedEndVertices[rv]); newEdge.parent = facetId + mMeshA->getFacetCount(); addEdgeIfValid(newEdge); } } return; } bool EdgeWithParentSortComp(const EdgeWithParent& a, const EdgeWithParent& b) { return a.parent < b.parent; } void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; mAcceleratorA = spAccelA; mAcceleratorB = spAccelB; buildFaceFaceIntersections(mode); collectRetainedPartsFromA(mode); collectRetainedPartsFromB(mode); mAcceleratorA = nullptr; mAcceleratorB = nullptr; } void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount()); DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount()); performBoolean(meshA, meshB, &ac, &bc, mode); } void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; mAcceleratorA = spAccelA; mAcceleratorB = spAccelB; buildFastFaceFaceIntersection(mode); collectRetainedPartsFromA(mode); mAcceleratorA = nullptr; mAcceleratorB = nullptr; } void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount()); DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount()); performFastCutting(meshA, meshB, &ac, &bc, mode); } BooleanEvaluator::BooleanEvaluator() { mMeshA = nullptr; mMeshB = nullptr; mAcceleratorA = nullptr; mAcceleratorB = nullptr; } BooleanEvaluator::~BooleanEvaluator() { reset(); } Mesh* BooleanEvaluator::createNewMesh() { if (mEdgeAggregate.size() == 0) { return nullptr; } std::sort(mEdgeAggregate.begin(), mEdgeAggregate.end(), EdgeWithParentSortComp); std::vector<Facet> newFacets; std::vector<Edge> newEdges(mEdgeAggregate.size()); int32_t lastPos = 0; uint32_t lastParent = mEdgeAggregate[0].parent; uint32_t collected = 0; int64_t userData = 0; int32_t materialId = 0; int32_t smoothingGroup = 0; for (uint32_t i = 0; i < mEdgeAggregate.size(); ++i) { if (mEdgeAggregate[i].parent != lastParent) { if (lastParent < mMeshA->getFacetCount()) { userData = mMeshA->getFacet(lastParent)->userData; materialId = mMeshA->getFacet(lastParent)->materialId; smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup; } else { userData = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->userData; materialId = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->materialId; smoothingGroup = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->smoothingGroup; } newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup }); lastPos = i; lastParent = mEdgeAggregate[i].parent; collected = 0; } collected++; newEdges[i].s = mEdgeAggregate[i].s; newEdges[i].e = mEdgeAggregate[i].e; } if (lastParent < mMeshA->getFacetCount()) { userData = mMeshA->getFacet(lastParent)->userData; materialId = mMeshA->getFacet(lastParent)->materialId; smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup; } else { uint32_t pr = lastParent - mMeshA->getFacetCount(); userData = mMeshB->getFacet(pr)->userData; materialId = mMeshB->getFacet(pr)->materialId; smoothingGroup = mMeshB->getFacet(pr)->smoothingGroup; } newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup }); return new MeshImpl(mVerticesAggregate.data(), newEdges.data(), newFacets.data(), static_cast<uint32_t>(mVerticesAggregate.size()), static_cast<uint32_t>(mEdgeAggregate.size()), static_cast<uint32_t>(newFacets.size())); } void BooleanEvaluator::reset() { mMeshA = nullptr; mMeshB = nullptr; mAcceleratorA = nullptr; mAcceleratorB = nullptr; mEdgeAggregate.clear(); mVerticesAggregate.clear(); mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); } /// BooleanTool void BooleanToolImpl::release() { delete this; } Mesh* BooleanToolImpl::performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op) { const BooleanConf modes[] = { BooleanConfigurations::BOOLEAN_INTERSECTION(), BooleanConfigurations::BOOLEAN_UNION(), BooleanConfigurations::BOOLEAN_DIFFERENCE(), }; constexpr size_t modeCount = sizeof(modes)/sizeof(modes[0]); if (op < 0 || op >= modeCount) { NVBLAST_LOG_ERROR("Illegal mode passed into BooleanToolImpl::performBoolean."); return nullptr; } if (!meshA || !meshB) { NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::performBoolean."); return nullptr; } DummyAccelerator dmAccelA(meshA->getFacetCount()); DummyAccelerator dmAccelB(meshA->getFacetCount()); m_evaluator.performBoolean(meshA, meshB, accelA ? accelA : &dmAccelA, accelB ? accelB : &dmAccelB, modes[op]); return m_evaluator.createNewMesh(); } bool BooleanToolImpl::pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) { if (!mesh) { NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::pointInMesh."); return false; } DummyAccelerator dmAccel(mesh->getFacetCount()); return m_evaluator.isPointContainedInMesh(mesh, accel ? accel : &dmAccel, point); } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringVSA.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGVSA_H #define NVBLASTEXTAUTHORINGVSA_H namespace Nv { namespace Blast { /* This code copied from APEX GSA */ namespace VSA { typedef float real; struct VS3D_Halfspace_Set { virtual real farthest_halfspace(real plane[4], const real point[4]) = 0; }; // Simple types and operations for internal calculations struct Vec3 { real x, y, z; }; // 3-vector inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } // vector addition inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder inline real operator | (const Vec4& a, const Vec4& b) { return (a.v | b.v) + a.w*b.w; } // dot product // More accurate perpendicular inline Vec3 perp(const Vec3& a, const Vec3& b) { Vec3 c = a^b; // Cross-product gives perpendicular #if VS3D_HIGH_ACCURACY || REAL_DOUBLE const real c2 = c | c; if (c2 != 0) c = c + (1 / c2)*((a | c)*(c^b) + (b | c)*(a^c)); // Improvement to (a b)^T(c) = (0) #endif return c; } // Square inline real sq(real x) { return x*x; } // Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n. inline int ext_index(int c_10, int c_21, int c_20) { return c_10 << c_21 | (c_21&c_20) << 1; } // Returns index (0, 1, or 2) of minimum argument inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); } // Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2) inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2) { const bool a_num_neg = a_num < 0; const bool b_num_neg = b_num < 0; return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg); } // Returns index (0, 1, or 2) of maximum fraction with positive deominators inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2) { return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2)); } // Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them. inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); } // Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them. inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2) { return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0)); } // Project 2D (homogeneous) vector onto 2D half-space boundary inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2) { r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0); r = r + (-(r | plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy if ((r | r) > eps2) return; r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0); r.z = 1; } // Update function for vs3d_test static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2) { // h plane is the last plane const Vec4& h = S[plane_count - 1]; // Handle plane_count == 1 specially (optimization; this could be commented out) if (plane_count == 1) { // Solution is objective projected onto h plane p = q; p.v = p.v + -(p | h)*h.v; if ((p | p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point return true; } // Create basis in the h plane const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z); const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2)); const Vec3 x = y^h.v; // Use reduced vector r instead of p Vec3 r = { x | q.v, y | q.v, q.w*(y | y) }; // (x|x) = (y|y) = square of plane basis scale // If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution if ((r | r) <= eps2) r.z = 1; // Create plane equations in the h plane. These will not be normalized in general. int N = 0; // Plane count in h subspace Vec3 R[3]; // Planes in h subspace real recip_n2[3]; // Plane normal vector reciprocal lengths squared real delta[3]; // Signed distance of objective to the planes int index[3]; // Keep track of original plane indices for (int i = 0; i < plane_count - 1; ++i) { const Vec3& vi = S[i].v; const real cos_theta = h.v | vi; R[N] = vec3(x | vi, y | vi, S[i].w - h.w*cos_theta); index[N] = i; const real n2 = R[N].x*R[N].x + R[N].y*R[N].y; if (n2 >= eps2) { const real lin_norm = (real)1.5 - (real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1 R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior recip_n2[N] = 1 / (R[N].x*R[N].x + R[N].y*R[N].y); delta[N] = r | R[N]; ++N; // Keep this plane } else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids } // Now work with the N-sized R array of half-spaces in the h plane switch (N) { case 1: one_plane : if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it else project2D(r, R[0], delta[0], recip_n2[0], eps2); break; case 2: two_planes : if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them else { const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]); project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2); const int min_d_index = max_d_index ^ 1; const real new_delta_min = r | R[min_d_index]; if (new_delta_min < 0) { index[0] = index[max_d_index]; N = 1; // S[min_d_index] is redundant, eliminate it } else { // Set r to the intersection of R[0] and R[1] and keep both r = perp(R[0], R[1]); if (r.z*r.z*recip_n2[0] * recip_n2[1] < eps2) { if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found goto one_plane; } r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0 } } break; case 3: if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them else { const Vec3 row_x = { R[0].x, R[1].x, R[2].x }; const Vec3 row_y = { R[0].y, R[1].y, R[2].y }; const Vec3 row_w = { R[0].z, R[1].z, R[2].z }; const Vec3 cof_w = perp(row_x, row_y); const bool detR_pos = (row_w | cof_w) > 0; const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1] * recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos) << 1) - 1); const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2] * recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos) << 1) - 1); const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0] * recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos) << 1) - 1); if ((nrw_sgn0 | nrw_sgn1 | nrw_sgn2) >= 0) return false; // 3D void found const int positive_width_count = ((nrw_sgn0 >> 1) & 1) + ((nrw_sgn1 >> 1) & 1) + ((nrw_sgn2 >> 1) & 1); if (positive_width_count == 1) { // A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation. const int pos_width_index = ((nrw_sgn1 >> 1) & 1) | (nrw_sgn2 & 2); // Calculates which index corresponds to the positive-width side R[pos_width_index] = R[2]; recip_n2[pos_width_index] = recip_n2[2]; delta[pos_width_index] = delta[2]; index[pos_width_index] = index[2]; N = 2; goto two_planes; } // Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below: const int max_d_index = r.z != 0 ? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product : index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle. // Project r onto max-d plane project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2); N = 1; // Unless we use a vertex in the loop below const int index_max = index[max_d_index]; // The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here. // If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct. const int finite_width_count = (nrw_sgn0 & 1) + (nrw_sgn1 & 1) + (nrw_sgn2 & 1); if (finite_width_count >= 2) { const int i_remaining[2] = { (1 << max_d_index) & 3, (3 >> max_d_index) ^ 1 }; // = {(max_d_index+1)%3, (max_d_index+2)%3} const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products for (int i = 0; i < 2; ++i) { const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least if ((r | R[j]) >= 0) { r = perp(R[max_d_index], R[j]); r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0 index[1] = index[j]; N = 2; break; } } } index[0] = index_max; } break; } // Transform r back to 3D space p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z); // Pack S array with kept planes if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite S[N] = h; plane_count = N + 1; return true; } // Performs the VS algorithm for D = 3 inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = nullptr) { // Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates const Vec4 objective = q ? (q[3] != 0 ? vec4((1 / q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1); // Tolerance for 3D void simplex algorithm const real eps_f = (real)1 / (sizeof(real) == 4 ? (1L << 23) : (1LL << 52)); // Floating-point epsilon #if VS3D_HIGH_ACCURACY || REAL_DOUBLE const real eps = 8 * eps_f; #else const real eps = 80 * eps_f; #endif const real eps2 = eps*eps; // Using epsilon squared // Maximum allowed iterations of main loop. If exceeded, error code is returned const int max_iteration_count = 50; // State Vec4 S[4]; // Up to 4 planes int plane_count = 0; // Number of valid planes Vec4 p = objective; // Test point, initialized to objective // Default result, changed to valid result if found in loop below int result = -1; // Iterate until a stopping condition is met or the maximum number of iterations is reached for (int i = 0; result < 0 && i < max_iteration_count; ++i) { Vec4& plane = S[plane_count++]; real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x); #if VS3D_UNNORMALIZED_PLANE_HANDLING != 0 const real recip_norm = vs3d_recip_sqrt(plane.v | plane.v); plane = vec4(recip_norm*plane.v, recip_norm*plane.w); delta *= recip_norm; #endif if (delta <= 0 || delta*delta <= eps2*(p | p)) result = 1; // Intersection found else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found } // If q is given, fill it with the solution (normalize p.w if it is not zero) if (q) *(Vec4*)q = (p.w != 0) ? vec4((1 / p.w)*p.v, 1) : p; return result; } } // namespace VSA } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGVSA_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtApexSharedParts.h" #include "NvBlastGlobals.h" #include "NvBlastMemory.h" #include "NvBlastAssert.h" #include "NsVecMath.h" #include "NvMat44.h" #include "NvBounds3.h" #include "NsVecMath.h" #include <vector> using namespace nvidia; using namespace nvidia::shdfnd::aos; namespace Nv { namespace Blast { NV_NOALIAS NV_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d) { // this is not 0 because of the following scenario: // All the points lie on the same plane and the plane goes through the origin (0,0,0). // On the Wii U, the math below has the problem that when point A gets projected on the // plane cumputed by A, B, C, the distance to the plane might not be 0 for the mentioned // scenario but a small positive or negative value. This can lead to the wrong boolean // results. Using a small negative value as threshold is more conservative but safer. const Vec4V zero = V4Load(-1e-6f); const Vec3V ab = V3Sub(_b, _a); const Vec3V ac = V3Sub(_c, _a); const Vec3V ad = V3Sub(_d, _a); const Vec3V bd = V3Sub(_d, _b); const Vec3V bc = V3Sub(_c, _b); const Vec3V v0 = V3Cross(ab, ac); const Vec3V v1 = V3Cross(ac, ad); const Vec3V v2 = V3Cross(ad, ab); const Vec3V v3 = V3Cross(bd, bc); const FloatV signa0 = V3Dot(v0, _a); const FloatV signa1 = V3Dot(v1, _a); const FloatV signa2 = V3Dot(v2, _a); const FloatV signd3 = V3Dot(v3, _a); const FloatV signd0 = V3Dot(v0, _d); const FloatV signd1 = V3Dot(v1, _b); const FloatV signd2 = V3Dot(v2, _c); const FloatV signa3 = V3Dot(v3, _b); const Vec4V signa = V4Merge(signa0, signa1, signa2, signa3); const Vec4V signd = V4Merge(signd0, signd1, signd2, signd3); return V4IsGrtrOrEq(V4Mul(signa, signd), zero);//same side, outside of the plane } NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b) { const FloatV zero = FZero(); const FloatV one = FOne(); //Test degenerated case const Vec3V ab = V3Sub(b, a); const FloatV denom = V3Dot(ab, ab); const Vec3V ap = V3Neg(a);//V3Sub(origin, a); const FloatV nom = V3Dot(ap, ab); const BoolV con = FIsEq(denom, zero); const FloatV tValue = FClamp(FDiv(nom, denom), zero, one); const FloatV t = FSel(con, zero, tValue); return V3Sel(con, a, V3ScaleAdd(ab, t, a)); } NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1, const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const Vec3V a = Q0; const Vec3V b = Q1; const BoolV bTrue = BTTTT(); const FloatV zero = FZero(); const FloatV one = FOne(); //Test degenerated case const Vec3V ab = V3Sub(b, a); const FloatV denom = V3Dot(ab, ab); const Vec3V ap = V3Neg(a);//V3Sub(origin, a); const FloatV nom = V3Dot(ap, ab); const BoolV con = FIsEq(denom, zero); if (BAllEq(con, bTrue)) { size = 1; closestA = A0; closestB = B0; return Q0; } const Vec3V v = V3Sub(A1, A0); const Vec3V w = V3Sub(B1, B0); const FloatV tValue = FClamp(FDiv(nom, denom), zero, one); const FloatV t = FSel(con, zero, tValue); const Vec3V tempClosestA = V3ScaleAdd(v, t, A0); const Vec3V tempClosestB = V3ScaleAdd(w, t, B0); closestA = tempClosestA; closestB = tempClosestB; return V3Sub(tempClosestA, tempClosestB); } NV_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1, const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const FloatV half = FHalf(); const FloatV targetSegmentLengthSq = FLoad(10000.f);//100 unit Vec3V q0 = Q0; Vec3V q1 = Q1; Vec3V a0 = A0; Vec3V a1 = A1; Vec3V b0 = B0; Vec3V b1 = B1; for (;;) { const Vec3V midPoint = V3Scale(V3Add(q0, q1), half); const Vec3V midA = V3Scale(V3Add(a0, a1), half); const Vec3V midB = V3Scale(V3Add(b0, b1), half); const Vec3V v = V3Sub(midPoint, q0); const FloatV sqV = V3Dot(v, v); if (FAllGrtr(targetSegmentLengthSq, sqV)) break; //split the segment into half const Vec3V tClos0 = closestPtPointSegment(q0, midPoint); const FloatV sqDist0 = V3Dot(tClos0, tClos0); const Vec3V tClos1 = closestPtPointSegment(q1, midPoint); const FloatV sqDist1 = V3Dot(tClos1, tClos1); //const BoolV con = FIsGrtr(sqDist0, sqDist1); if (FAllGrtr(sqDist0, sqDist1)) { //segment [m, q1] q0 = midPoint; a0 = midA; b0 = midB; } else { //segment [q0, m] q1 = midPoint; a1 = midA; b1 = midB; } } return closestPtPointSegment(q0, q1, a0, a1, b0, b1, size, closestA, closestB); } NV_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* NV_RESTRICT Q, const Vec3V* NV_RESTRICT A, const Vec3V* NV_RESTRICT B, const uint32_t* NV_RESTRICT indices, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { size = 3; const FloatV zero = FZero(); const FloatV eps = FEps(); const FloatV half = FHalf(); const BoolV bTrue = BTTTT(); const FloatV four = FLoad(4.f); const FloatV sixty = FLoad(100.f); const uint32_t ind0 = indices[0]; const uint32_t ind1 = indices[1]; const uint32_t ind2 = indices[2]; const Vec3V a = Q[ind0]; const Vec3V b = Q[ind1]; const Vec3V c = Q[ind2]; Vec3V ab_ = V3Sub(b, a); Vec3V ac_ = V3Sub(c, a); Vec3V bc_ = V3Sub(b, c); const FloatV dac_ = V3Dot(ac_, ac_); const FloatV dbc_ = V3Dot(bc_, bc_); if (FAllGrtrOrEq(eps, FMin(dac_, dbc_))) { //degenerate size = 2; return closestPtPointSegment(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB); } Vec3V ap = V3Neg(a); Vec3V bp = V3Neg(b); Vec3V cp = V3Neg(c); FloatV d1 = V3Dot(ab_, ap); // snom FloatV d2 = V3Dot(ac_, ap); // tnom FloatV d3 = V3Dot(ab_, bp); // -sdenom FloatV d4 = V3Dot(ac_, bp); // unom = d4 - d3 FloatV d5 = V3Dot(ab_, cp); // udenom = d5 - d6 FloatV d6 = V3Dot(ac_, cp); // -tdenom /* FloatV unom = FSub(d4, d3); FloatV udenom = FSub(d5, d6);*/ FloatV va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC FloatV vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC FloatV vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB //check if p in vertex region outside a const BoolV con00 = FIsGrtrOrEq(zero, d1); // snom <= 0 const BoolV con01 = FIsGrtrOrEq(zero, d2); // tnom <= 0 const BoolV con0 = BAnd(con00, con01); // vertex region a if (BAllEq(con0, bTrue)) { //size = 1; closestA = A[ind0]; closestB = B[ind0]; return Q[ind0]; } //check if p in vertex region outside b const BoolV con10 = FIsGrtrOrEq(d3, zero); const BoolV con11 = FIsGrtrOrEq(d3, d4); const BoolV con1 = BAnd(con10, con11); // vertex region b if (BAllEq(con1, bTrue)) { /*size = 1; indices[0] = ind1;*/ closestA = A[ind1]; closestB = B[ind1]; return Q[ind1]; } //check if p in vertex region outside of c const BoolV con20 = FIsGrtrOrEq(d6, zero); const BoolV con21 = FIsGrtrOrEq(d6, d5); const BoolV con2 = BAnd(con20, con21); // vertex region c if (BAllEq(con2, bTrue)) { closestA = A[ind2]; closestB = B[ind2]; return Q[ind2]; } //check if p in edge region of AB const BoolV con30 = FIsGrtrOrEq(zero, vc); const BoolV con31 = FIsGrtrOrEq(d1, zero); const BoolV con32 = FIsGrtrOrEq(zero, d3); const BoolV con3 = BAnd(con30, BAnd(con31, con32)); if (BAllEq(con3, bTrue)) { //size = 2; //p in edge region of AB, split AB return closestPtPointSegmentTesselation(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB); } //check if p in edge region of BC const BoolV con40 = FIsGrtrOrEq(zero, va); const BoolV con41 = FIsGrtrOrEq(d4, d3); const BoolV con42 = FIsGrtrOrEq(d5, d6); const BoolV con4 = BAnd(con40, BAnd(con41, con42)); if (BAllEq(con4, bTrue)) { //p in edge region of BC, split BC return closestPtPointSegmentTesselation(Q[ind1], Q[ind2], A[ind1], A[ind2], B[ind1], B[ind2], size, closestA, closestB); } //check if p in edge region of AC const BoolV con50 = FIsGrtrOrEq(zero, vb); const BoolV con51 = FIsGrtrOrEq(d2, zero); const BoolV con52 = FIsGrtrOrEq(zero, d6); const BoolV con5 = BAnd(con50, BAnd(con51, con52)); if (BAllEq(con5, bTrue)) { //p in edge region of AC, split AC return closestPtPointSegmentTesselation(Q[ind0], Q[ind2], A[ind0], A[ind2], B[ind0], B[ind2], size, closestA, closestB); } size = 3; Vec3V q0 = Q[ind0]; Vec3V q1 = Q[ind1]; Vec3V q2 = Q[ind2]; Vec3V a0 = A[ind0]; Vec3V a1 = A[ind1]; Vec3V a2 = A[ind2]; Vec3V b0 = B[ind0]; Vec3V b1 = B[ind1]; Vec3V b2 = B[ind2]; for (;;) { const Vec3V ab = V3Sub(q1, q0); const Vec3V ac = V3Sub(q2, q0); const Vec3V bc = V3Sub(q2, q1); const FloatV dab = V3Dot(ab, ab); const FloatV dac = V3Dot(ac, ac); const FloatV dbc = V3Dot(bc, bc); const FloatV fMax = FMax(dab, FMax(dac, dbc)); const FloatV fMin = FMin(dab, FMin(dac, dbc)); const Vec3V w = V3Cross(ab, ac); const FloatV area = V3Length(w); const FloatV ratio = FDiv(FSqrt(fMax), FSqrt(fMin)); if (FAllGrtr(four, ratio) && FAllGrtr(sixty, area)) break; //calculate the triangle normal const Vec3V triNormal = V3Normalize(w); NVBLAST_ASSERT(V3AllEq(triNormal, V3Zero()) == 0); //split the longest edge if (FAllGrtrOrEq(dab, dac) && FAllGrtrOrEq(dab, dbc)) { //split edge q0q1 const Vec3V midPoint = V3Scale(V3Add(q0, q1), half); const Vec3V midA = V3Scale(V3Add(a0, a1), half); const Vec3V midB = V3Scale(V3Add(b0, b1), half); const Vec3V v = V3Sub(midPoint, q2); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q0), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q0 and origin at the same side, split triangle[q0, m, q2] q1 = midPoint; a1 = midA; b1 = midB; } else { //q1 and origin at the same side, split triangle[m, q1, q2] q0 = midPoint; a0 = midA; b0 = midB; } } else if (FAllGrtrOrEq(dac, dbc)) { //split edge q0q2 const Vec3V midPoint = V3Scale(V3Add(q0, q2), half); const Vec3V midA = V3Scale(V3Add(a0, a2), half); const Vec3V midB = V3Scale(V3Add(b0, b2), half); const Vec3V v = V3Sub(midPoint, q1); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q0), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q0 and origin at the same side, split triangle[q0, q1, m] q2 = midPoint; a2 = midA; b2 = midB; } else { //q2 and origin at the same side, split triangle[m, q1, q2] q0 = midPoint; a0 = midA; b0 = midB; } } else { //split edge q1q2 const Vec3V midPoint = V3Scale(V3Add(q1, q2), half); const Vec3V midA = V3Scale(V3Add(a1, a2), half); const Vec3V midB = V3Scale(V3Add(b1, b2), half); const Vec3V v = V3Sub(midPoint, q0); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q1), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q1 and origin at the same side, split triangle[q0, q1, m] q2 = midPoint; a2 = midA; b2 = midB; } else { //q2 and origin at the same side, split triangle[q0, m, q2] q1 = midPoint; a1 = midA; b1 = midB; } } } //P must project inside face region. Compute Q using Barycentric coordinates ab_ = V3Sub(q1, q0); ac_ = V3Sub(q2, q0); ap = V3Neg(q0); bp = V3Neg(q1); cp = V3Neg(q2); d1 = V3Dot(ab_, ap); // snom d2 = V3Dot(ac_, ap); // tnom d3 = V3Dot(ab_, bp); // -sdenom d4 = V3Dot(ac_, bp); // unom = d4 - d3 d5 = V3Dot(ab_, cp); // udenom = d5 - d6 d6 = V3Dot(ac_, cp); // -tdenom va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB const FloatV toRecipD = FAdd(va, FAdd(vb, vc)); const FloatV denom = FRecip(toRecipD);//V4GetW(recipTmp); const Vec3V v0 = V3Sub(a1, a0); const Vec3V v1 = V3Sub(a2, a0); const Vec3V w0 = V3Sub(b1, b0); const Vec3V w1 = V3Sub(b2, b0); const FloatV t = FMul(vb, denom); const FloatV w = FMul(vc, denom); const Vec3V vA1 = V3Scale(v1, w); const Vec3V vB1 = V3Scale(w1, w); const Vec3V tempClosestA = V3Add(a0, V3ScaleAdd(v0, t, vA1)); const Vec3V tempClosestB = V3Add(b0, V3ScaleAdd(w0, t, vB1)); closestA = tempClosestA; closestB = tempClosestB; return V3Sub(tempClosestA, tempClosestB); } NV_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const FloatV eps = FEps(); const Vec3V zeroV = V3Zero(); uint32_t tempSize = size; FloatV bestSqDist = FLoad(NV_MAX_F32); const Vec3V a = Q[0]; const Vec3V b = Q[1]; const Vec3V c = Q[2]; const Vec3V d = Q[3]; const BoolV bTrue = BTTTT(); const BoolV bFalse = BFFFF(); //degenerated const Vec3V ad = V3Sub(d, a); const Vec3V bd = V3Sub(d, b); const Vec3V cd = V3Sub(d, c); const FloatV dad = V3Dot(ad, ad); const FloatV dbd = V3Dot(bd, bd); const FloatV dcd = V3Dot(cd, cd); const FloatV fMin = FMin(dad, FMin(dbd, dcd)); if (FAllGrtr(eps, fMin)) { size = 3; uint32_t tempIndices[] = { 0, 1, 2 }; return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB); } Vec3V _Q[] = { Q[0], Q[1], Q[2], Q[3] }; Vec3V _A[] = { A[0], A[1], A[2], A[3] }; Vec3V _B[] = { B[0], B[1], B[2], B[3] }; uint32_t indices[3] = { 0, 1, 2 }; const BoolV bIsOutside4 = PointOutsideOfPlane4(a, b, c, d); if (BAllEq(bIsOutside4, bFalse)) { //origin is inside the tetrahedron, we are done return zeroV; } Vec3V result = zeroV; Vec3V tempClosestA, tempClosestB; if (BAllEq(BGetX(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 1, 2 }; uint32_t _size = 3; result = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(result, result); bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } if (BAllEq(BGetY(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 2, 3 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } if (BAllEq(BGetZ(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 3, 1 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } if (BAllEq(BGetW(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 1, 3, 2 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } A[0] = _A[indices[0]]; A[1] = _A[indices[1]]; A[2] = _A[indices[2]]; B[0] = _B[indices[0]]; B[1] = _B[indices[1]]; B[2] = _B[indices[2]]; Q[0] = _Q[indices[0]]; Q[1] = _Q[indices[1]]; Q[2] = _Q[indices[2]]; size = tempSize; return result; } NV_NOALIAS NV_FORCE_INLINE Vec3V doTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { switch (size) { case 1: { closestA = supportA; closestB = supportB; return support; } case 2: { return closestPtPointSegmentTesselation(Q[0], support, A[0], supportA, B[0], supportB, size, closestA, closestB); } case 3: { uint32_t tempIndices[3] = { 0, 1, 2 }; return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB); } case 4: { return closestPtPointTetrahedronTesselation(Q, A, B, size, closestA, closestB); } default: NVBLAST_ASSERT(0); } return support; } enum Status { STATUS_NON_INTERSECT, STATUS_CONTACT, STATUS_DEGENERATE, }; struct Output { /// Get the normal to push apart in direction from A to B NV_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); } Vec3V mClosestA; ///< Closest point on A Vec3V mClosestB; ///< Closest point on B FloatV mDistSq; }; struct ConvexV { void calcExtent(const Vec3V& dir, float& minOut, float& maxOut) const { // Expand const Vec4V x = Vec4V_From_FloatV(V3GetX(dir)); const Vec4V y = Vec4V_From_FloatV(V3GetY(dir)); const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir)); const Vec4V* src = mAovVertices; const Vec4V* end = src + mNumAovVertices * 3; // Do first step Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); Vec4V min = max; src += 3; // Do the rest for (; src < end; src += 3) { const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); max = V4Max(dot, max); min = V4Min(dot, min); } FStore(V4ExtractMax(max), &maxOut); FStore(V4ExtractMin(min), &minOut); } Vec3V calcSupport(const Vec3V& dir) const { // Expand const Vec4V x = Vec4V_From_FloatV(V3GetX(dir)); const Vec4V y = Vec4V_From_FloatV(V3GetY(dir)); const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir)); NV_ALIGN(16, static const float index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f }; Vec4V index4 = *(const Vec4V*)index4const; NV_ALIGN(16, static const float delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f }; const Vec4V delta4 = *(const Vec4V*)delta4const; const Vec4V* src = mAovVertices; const Vec4V* end = src + mNumAovVertices * 3; // Do first step Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); Vec4V maxIndex = index4; index4 = V4Add(index4, delta4); src += 3; // Do the rest for (; src < end; src += 3) { const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); const BoolV cmp = V4IsGrtr(dot, max); max = V4Max(dot, max); maxIndex = V4Sel(cmp, index4, maxIndex); index4 = V4Add(index4, delta4); } Vec4V horiMax = Vec4V_From_FloatV(V4ExtractMax(max)); uint32_t mask = BGetBitMask(V4IsEq(horiMax, max)); const uint32_t simdIndex = (0x12131210 >> (mask + mask)) & uint32_t(3); /// NOTE! Could be load hit store /// Would be better to have all simd. NV_ALIGN(16, float f[4]); V4StoreA(maxIndex, f); uint32_t index = uint32_t(uint32_t(f[simdIndex])); const Vec4V* aovIndex = (mAovVertices + (index >> 2) * 3); const float* aovOffset = ((const float*)aovIndex) + (index & 3); return Vec3V_From_Vec4V(V4LoadXYZW(aovOffset[0], aovOffset[4], aovOffset[8], 1.0f)); } const Vec4V* mAovVertices; ///< Vertices storex x,x,x,x, y,y,y,y, z,z,z,z uint32_t mNumAovVertices; ///< Number of groups of 4 of vertices }; Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bToA, const ConvexV& convexB, Output& out) { Vec3V Q[4]; Vec3V A[4]; Vec3V B[4]; Mat33V aToB = M34Trnsps33(bToA); uint32_t size = 0; const Vec3V zeroV = V3Zero(); const BoolV bTrue = BTTTT(); //Vec3V v = V3UnitX(); Vec3V v = V3Sel(FIsGrtr(V3Dot(initialDir, initialDir), FZero()), initialDir, V3UnitX()); //const FloatV minMargin = zero; //const FloatV eps2 = FMul(minMargin, FLoad(0.01f)); //FloatV eps2 = zero; FloatV eps2 = FLoad(1e-6f); const FloatV epsRel = FLoad(0.000225f); Vec3V closA(zeroV), closB(zeroV); FloatV sDist = FMax(); FloatV minDist = sDist; Vec3V closAA = zeroV; Vec3V closBB = zeroV; BoolV bNotTerminated = bTrue; BoolV bCon = bTrue; do { minDist = sDist; closAA = closA; closBB = closB; uint32_t index = size++; NVBLAST_ASSERT(index < 4); const Vec3V supportA = convexA.calcSupport(V3Neg(v)); const Vec3V supportB = M34MulV3(bToA, convexB.calcSupport(M33MulV3(aToB, v))); const Vec3V support = Vec3V_From_Vec4V(Vec4V_From_Vec3V(V3Sub(supportA, supportB))); A[index] = supportA; B[index] = supportB; Q[index] = support; const FloatV signDist = V3Dot(v, support); const FloatV tmp0 = FSub(sDist, signDist); if (FAllGrtr(FMul(epsRel, sDist), tmp0)) { out.mClosestA = closA; out.mClosestB = closB; out.mDistSq = sDist; return STATUS_NON_INTERSECT; } //calculate the closest point between two convex hull v = doTesselation(Q, A, B, support, supportA, supportB, size, closA, closB); sDist = V3Dot(v, v); bCon = FIsGrtr(minDist, sDist); bNotTerminated = BAnd(FIsGrtr(sDist, eps2), bCon); } while (BAllEq(bNotTerminated, bTrue)); out.mClosestA = V3Sel(bCon, closA, closAA); out.mClosestB = V3Sel(bCon, closB, closBB); out.mDistSq = FSel(bCon, sDist, minDist); return Status(BAllEq(bCon, bTrue) == 1 ? STATUS_CONTACT : STATUS_DEGENERATE); } static void _calcSeparation(const ConvexV& convexA, const nvidia::NvTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, const Vec3V& centroidAToB, Output& out, Separation& sep) { Mat33V aToB = M34Trnsps33(bToA); Vec3V normalA = out.getNormal(); FloatV vEpsilon = FLoad(1e-6f); if (BAllEqFFFF(FIsGrtr(out.mDistSq, vEpsilon))) { if (BAllEqTTTT(FIsGrtr(V3Dot(centroidAToB, centroidAToB), vEpsilon))) { normalA = V3Normalize(centroidAToB); } else { normalA = V3UnitX(); } } convexA.calcExtent(normalA, sep.min0, sep.max0); Vec3V normalB = M33MulV3(aToB, normalA); convexB.calcExtent(normalB, sep.min1, sep.max1); { // Offset the min max taking into account transform // Distance of origin from B's space in As space in direction of the normal in As space should fix it... float fix; FStore(V3Dot(bToA.col3, normalA), &fix); sep.min1 += fix; sep.max1 += fix; } // Looks like it's the plane at the midpoint Vec3V center = V3Scale(V3Add(out.mClosestA, out.mClosestB), FLoad(0.5f)); // Transform to world space Mat34V aToWorld; *(NvMat44*)&aToWorld = aToWorldIn; // Put the normal in world space Vec3V worldCenter = M34MulV3(aToWorld, center); Vec3V worldNormal = M34Mul33V3(aToWorld, normalA); FloatV dist = V3Dot(worldNormal, worldCenter); V3StoreU(worldNormal, sep.plane.n); FStore(dist, &sep.plane.d); sep.plane.d = -sep.plane.d; } static void _arrayVec3ToVec4(const NvVec3* src, Vec4V* dst, uint32_t num) { const uint32_t num4 = num >> 2; for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4) { Vec3V v0 = V3LoadU(&src[0].x); Vec3V v1 = V3LoadU(&src[1].x); Vec3V v2 = V3LoadU(&src[2].x); Vec3V v3 = V3LoadU(&src[3].x); // Transpose V4Transpose(v0, v1, v2, v3); // Save dst[0] = v0; dst[1] = v1; dst[2] = v2; } const uint32_t remain = num & 3; if (remain) { Vec3V work[4]; uint32_t i = 0; for (; i < remain; i++) work[i] = V3LoadU(&src[i].x); for (; i < 4; i++) work[i] = work[remain - 1]; V4Transpose(work[0], work[1], work[2], work[3]); dst[0] = work[0]; dst[1] = work[1]; dst[2] = work[2]; } } static void _arrayVec3ToVec4(const NvVec3* src, const Vec3V& scale, Vec4V* dst, uint32_t num) { // If no scale - use the faster version if (V3AllEq(scale, V3One())) { return _arrayVec3ToVec4(src, dst, num); } const uint32_t num4 = num >> 2; for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4) { Vec3V v0 = V3Mul(scale, V3LoadU(&src[0].x)); Vec3V v1 = V3Mul(scale, V3LoadU(&src[1].x)); Vec3V v2 = V3Mul(scale, V3LoadU(&src[2].x)); Vec3V v3 = V3Mul(scale, V3LoadU(&src[3].x)); // Transpose V4Transpose(v0, v1, v2, v3); // Save dst[0] = v0; dst[1] = v1; dst[2] = v2; } const uint32_t remain = num & 3; if (remain) { Vec3V work[4]; uint32_t i = 0; for (; i < remain; i++) work[i] = V3Mul(scale, V3LoadU(&src[i].x)); for (; i < 4; i++) work[i] = work[remain - 1]; V4Transpose(work[0], work[1], work[2], work[3]); dst[0] = work[0]; dst[1] = work[1]; dst[2] = work[2]; } } // TODO: move this to a better long term home // scope based helper struct to pick between stack and heap alloc based on the size of the request struct ScopeMemoryAllocator { public: ScopeMemoryAllocator() : mAlloc(nullptr) {}; ~ScopeMemoryAllocator() { this->free(); } void* alloc(size_t buffSize) { if (mAlloc == nullptr) { mAlloc = NVBLAST_ALLOC(buffSize); return mAlloc; } return nullptr; } void free() { if (mAlloc != nullptr) { NVBLAST_FREE(mAlloc); mAlloc = nullptr; } } private: void* mAlloc; }; #define STACK_ALLOC_LIMIT (100 * 1024) #define ALLOCATE_TEMP_MEMORY(_out, buffSize) \ ScopeMemoryAllocator _out##Allocator; \ _out = (buffSize < STACK_ALLOC_LIMIT ? NvBlastAlloca(buffSize) : _out##Allocator.alloc(buffSize)) bool importerHullsInProximityApexFree(uint32_t hull0Count, const NvVec3* hull0, NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In, uint32_t hull1Count, const NvVec3* hull1, NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In, float maxDistance, Separation* separation) { const uint32_t numVerts0 = static_cast<uint32_t>(hull0Count); const uint32_t numVerts1 = static_cast<uint32_t>(hull1Count); const uint32_t numAov0 = (numVerts0 + 3) >> 2; const uint32_t numAov1 = (numVerts1 + 3) >> 2; const uint32_t buffSize = (numAov0 + numAov1) * sizeof(Vec4V) * 3; void* buff = nullptr; ALLOCATE_TEMP_MEMORY(buff, buffSize); Vec4V* verts0 = (Vec4V*)buff; // Make sure it's aligned NVBLAST_ASSERT((size_t(verts0) & 0xf) == 0); Vec4V* verts1 = verts0 + (numAov0 * 3); const Vec3V scale0 = V3LoadU(&scale0In.x); const Vec3V scale1 = V3LoadU(&scale1In.x); std::vector<NvVec3> vert0(numVerts0); for (uint32_t i = 0; i < numVerts0; ++i) { vert0[i] = hull0[i]; } std::vector<NvVec3> vert1(numVerts1); for (uint32_t i = 0; i < numVerts1; ++i) { vert1[i] = hull1[i]; } _arrayVec3ToVec4(vert0.data(), scale0, verts0, numVerts0); _arrayVec3ToVec4(vert1.data(), scale1, verts1, numVerts1); const NvTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In); // Load into simd mat Mat34V bToA; *(NvMat44*)&bToA = trans1To0; (*(NvMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly ConvexV convexA; ConvexV convexB; convexA.mNumAovVertices = numAov0; convexA.mAovVertices = verts0; convexB.mNumAovVertices = numAov1; convexB.mAovVertices = verts1; const nvidia::NvVec3 hullACenter = hull0Bounds.getCenter(); const nvidia::NvVec3 hullBCenter = hull1Bounds.getCenter(); const Vec3V centroidA = V3LoadU(&hullACenter.x); const Vec3V centroidB = M34MulV3(bToA, V3LoadU(&hullBCenter.x)); // Take the origin of B in As space as the inital direction as it is 'the difference in transform origins B-A in A's space' // Should be a good first guess // Use centroid information const Vec3V initialDir = V3Sub(centroidB, centroidA); Output output; Status status = Collide(initialDir, convexA, bToA, convexB, output); if (status == STATUS_DEGENERATE) { // Calculate the tolerance from the extents const NvVec3 extents0 = hull0Bounds.getExtents(); const NvVec3 extents1 = hull1Bounds.getExtents(); const FloatV tolerance0 = V3ExtractMin(V3Mul(V3LoadU(&extents0.x), scale0)); const FloatV tolerance1 = V3ExtractMin(V3Mul(V3LoadU(&extents1.x), scale1)); const FloatV tolerance = FMul(FAdd(tolerance0, tolerance1), FLoad(0.01f)); const FloatV sqTolerance = FMul(tolerance, tolerance); status = FAllGrtr(sqTolerance, output.mDistSq) ? STATUS_CONTACT : STATUS_NON_INTERSECT; } switch (status) { case STATUS_CONTACT: { if (separation) { _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation); } return true; } default: case STATUS_NON_INTERSECT: { if (separation) { _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation); } float val; FStore(output.mDistSq, &val); return val < (maxDistance * maxDistance); } } } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCollisionBuilderImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGCOLLISIONBUILDERIIMPL_H #define NVBLASTEXTAUTHORINGCOLLISIONBUILDERIIMPL_H #include "NvBlastExtAuthoringConvexMeshBuilder.h" #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { void trimCollisionGeometry(ConvexMeshBuilder& cmb, uint32_t chunksCount, CollisionHull** in, const uint32_t* chunkDepth); int32_t buildMeshConvexDecomposition(ConvexMeshBuilder& cmb, const Triangle* mesh, uint32_t triangleCount, const ConvexDecompositionParams& params, CollisionHull**& convexes); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGCOLLISIONBUILDERIIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshCleanerImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGMESHCLEANERIMPL_H #define NVBLASTEXTAUTHORINGMESHCLEANERIMPL_H #include "NvBlastExtAuthoringMeshCleaner.h" namespace Nv { namespace Blast { class Mesh; class MeshCleanerImpl : public MeshCleaner { public: /** Tries to remove self intersections and open edges in interior of mesh. \param[in] mesh Mesh to be cleaned. \return Cleaned mesh or nullptr if failed. */ virtual Mesh* cleanMesh(const Nv::Blast::Mesh* mesh) override; virtual void release() override; ~MeshCleanerImpl() {}; }; } } #endif //NVBLASTEXTAUTHORINGMESHCLEANERIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTTRIANGLEPROCESSOR_H #define NVBLASTEXTTRIANGLEPROCESSOR_H #include "NvVec2.h" #include "NvVec3.h" #include <vector> #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /** Triangle processor internal triangle representation. Contains only vertex positions. */ struct TrPrcTriangle { NvVec3 points[3]; TrPrcTriangle(NvVec3 a = NvVec3(0.0f), NvVec3 b = NvVec3(0.0f), NvVec3 c = NvVec3(0.0f)) { points[0] = a; points[1] = b; points[2] = c; } TrPrcTriangle& operator=(const TrPrcTriangle& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; return *this; } TrPrcTriangle(const TrPrcTriangle& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; } NvVec3 getNormal() const { return (points[1] - points[0]).cross(points[2] - points[0]); } }; /** Triangle processor internal 2D triangle representation. Contains only vertex positions. */ struct TrPrcTriangle2d { NvVec2 points[3]; TrPrcTriangle2d(NvVec2 a = NvVec2(0.0f), NvVec2 b = NvVec2(0.0f), NvVec2 c = NvVec2(0.0f)) { points[0] = a; points[1] = b; points[2] = c; } TrPrcTriangle2d operator=(const TrPrcTriangle2d& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; return *this; } TrPrcTriangle2d(const TrPrcTriangle2d& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; } }; class TriangleProcessor { public: TriangleProcessor(){}; ~TriangleProcessor() {} /** Build intersection between two triangles \param[in] a First triangle (A) \param[in] aProjected Projected triangle A \param[in] b Second triangle (B) \param[in] centroid Centroid of first triangle (A) \param[out] intersectionBuffer Result intersection polygon \param[in] normal Normal vector to triangle (Common for both A and B). \return 1 - if if intersection is found. */ uint32_t getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle& b, NvVec3& centroid, std::vector<NvVec3>& intersectionBuffer, NvVec3 normal); /** Test whether BB of triangles intersect. \param[in] a First triangle (A) \param[in] b Second triangle (B) \return true - if intersect */ bool triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b); /** Test whether point is inside of triangle. \param[in] point Point coordinates in 2d space. \param[in] triangle Triangle in 2d space. \return 1 - if inside, 2 if on edge, 0 if neither inside nor edge. */ uint32_t isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle); /** Segment intersection point \param[in] s1 Segment-1 start point \param[in] e1 Segment-1 end point \param[in] s2 Segment-2 start point \param[in] e2 Segment-2 end point \param[out] t1 Intersection point parameter relatively to Segment-1, lies in [0.0, 1.0] range. \return 0 if there is no intersections, 1 - if intersection is found. */ uint32_t getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1); /** Sort vertices of polygon in CCW-order */ void sortToCCW(std::vector<NvVec3>& points, NvVec3& normal); /** Builds convex polygon for given set of points. Points should be coplanar. \param[in] points Input array of points \param[out] convexHull Output polygon \param[in] normal Normal vector to polygon. */ void buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull, const NvVec3& normal); }; } // namespace Blast } // namespace Nv #endif // NVBLASTEXTTRIANGLEPROCESSOR_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H #define NVBLASTEXTAUTHORINGTRIANGULATOR_H #include <vector> #include <map> #include "NvBlastExtAuthoringTypes.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" namespace Nv { namespace Blast { /** Tool for doing all post processing steps of authoring. */ class Triangulator { public: /** Triangulates provided mesh and saves result internally. Uses Ear-clipping algorithm. \param[in] mesh Mesh for triangulation */ void triangulate(const Mesh* mesh); /** \return Return array of triangles of base mesh. */ std::vector<Triangle>& getBaseMesh() { return mBaseMeshUVFittedTriangles; } std::vector<Triangle>& getBaseMeshNotFitted() { return mBaseMeshResultTriangles; } /** \return Return array of TriangleIndexed of base mesh. Each TriangleIndexed contains index of corresponding vertex in internal vertex buffer. */ std::vector<TriangleIndexed>& getBaseMeshIndexed() { return mBaseMeshTriangles; } /** \return Return mapping from vertices of input Mesh to internal vertices buffer. Used for island detection. */ std::vector<uint32_t>& getBaseMapping() { return mBaseMapping; }; /** \return Return mapping from vertices of input Mesh to internal vertices buffer, only positions are accounted. Used for island detection. */ std::vector<int32_t>& getPositionedMapping() { return mPositionMappedVrt; }; /** \return Return internal vertex buffer size. Vertices internally are welded with some threshold. */ uint32_t getWeldedVerticesCount() { return static_cast<uint32_t>(mVertices.size()); } /** Removes all information about mesh triangulation. */ void reset(); int32_t& getParentChunkId() { return parentChunkId; }; private: int32_t parentChunkId; int32_t addVerticeIfNotExist(const Vertex& p); void addEdgeIfValid(EdgeWithParent& ed); /* Data used before triangulation to build polygon loops*/ std::vector<Vertex> mVertices; std::vector<EdgeWithParent> mBaseMeshEdges; std::map<Vertex, int32_t, VrtComp> mVertMap; std::map<EdgeWithParent, int32_t, EdgeComparator> mEdgeMap; std::vector<uint32_t> mBaseMapping; std::vector<int32_t> mPositionMappedVrt; /* ------------------------------------------------------------ */ /** Unite all almost similar vertices, update edges according to this changes */ void prepare(const Mesh* mesh); void triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert, const ProjectionDirections& dir); void buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData, int32_t materialId, int32_t smoothingGroup); void computePositionedMapping(); std::vector<TriangleIndexed> mBaseMeshTriangles; /** Final triangles */ std::vector<Triangle> mBaseMeshResultTriangles; std::vector<Triangle> mBaseMeshUVFittedTriangles; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringFractureToolImpl.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringMeshUtils.h" // This warning arises when using some stl containers with older versions of VC // c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code #if NV_VC && NV_VC < 14 #pragma warning(disable : 4702) #endif #include <queue> #include <vector> #include <map> #include <stack> #include <functional> #include "NvBlastExtAuthoringVSA.h" #include <float.h> #include "NvBlastExtAuthoring.h" #include "NvBlastExtAuthoringTriangulator.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringCutout.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastExtAuthoringPerlinNoise.h" #include <NvBlastAssert.h> #include <NvBlastNvSharedHelpers.h> #ifndef SAFE_DELETE #define SAFE_DELETE(p) \ { \ if (p) \ { \ delete (p); \ (p) = NULL; \ } \ } #endif namespace Nv { namespace Blast { /* Vector operations using TransformST */ inline TransformST createCubeTMFromBounds(const NvcBounds3& bounds) { // scale = max extent, translation = center const NvcVec3 center = 0.5f*(bounds.maximum + bounds.minimum); const NvcVec3 extent = 0.5f*(bounds.maximum - bounds.minimum); const float maxExtent = std::max(extent.x, std::max(extent.y, extent.z)); return {center, maxExtent > 0.0f ? maxExtent : 1.0f}; // Keep the transformation from being singular } ////////////////////////////////////////// struct Halfspace_partitioning : public VSA::VS3D_Halfspace_Set { std::vector<NvcPlane> planes; VSA::real farthest_halfspace(VSA::real plane[4], const VSA::real point[4]) { float biggest_d = -FLT_MAX; for (uint32_t i = 0; i < planes.size(); ++i) { float d = planes[i].n.x * point[0] + planes[i].n.y * point[1] + planes[i].n.z * point[2] + planes[i].d * point[3]; if (d > biggest_d) { biggest_d = d; plane[0] = planes[i].n.x; plane[1] = planes[i].n.y; plane[2] = planes[i].n.z; plane[3] = planes[i].d; } } return biggest_d; }; }; int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors) { Halfspace_partitioning prt; std::vector<NvcPlane>& planes = prt.planes; int32_t neighborGlobalIndex = 0; neighbors.resize(sites.size()); for (uint32_t cellId = 0; cellId + 1 < sites.size(); ++cellId) { planes.clear(); planes.resize(sites.size() - 1 - cellId); std::vector<NvcVec3> midpoints(sites.size() - 1); int32_t collected = 0; for (uint32_t i = cellId + 1; i < sites.size(); ++i) { NvcVec3 midpoint = 0.5 * (sites[i] + sites[cellId]); NvcVec3 direction = fromNvShared(toNvShared(sites[i] - sites[cellId]).getNormalized()); planes[collected].n = direction; planes[collected].d = -(direction | midpoint); midpoints[collected] = midpoint; ++collected; } for (uint32_t i = 0; i < planes.size(); ++i) { planes[i].n = -planes[i].n; planes[i].d = -planes[i].d; if (VSA::vs3d_test(prt)) { const uint32_t nId = i + cellId + 1; neighbors[cellId].push_back(std::pair<int32_t, int32_t>(nId, neighborGlobalIndex)); neighbors[nId].push_back(std::pair<int32_t, int32_t>(cellId, neighborGlobalIndex)); ++neighborGlobalIndex; }; planes[i].n = -planes[i].n; planes[i].d = -planes[i].d; } } return neighborGlobalIndex; } #define SITE_BOX_SIZE 4 #define CUTTING_BOX_SIZE 40 Mesh* getCellMesh(BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites, const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin) { Mesh* cell = getBigBox(toNvShared(origin), SITE_BOX_SIZE, interiorMaterialId); Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(1, 1, 1), CUTTING_BOX_SIZE, 0, interiorMaterialId); for (uint32_t i = 0; i < neighbors[cellId].size(); ++i) { std::pair<int32_t, int32_t> neighbor = neighbors[cellId][i]; int32_t nCell = neighbor.first; NvVec3 midpoint = 0.5 * toNvShared(sites[nCell] + sites[cellId]); NvVec3 direction = toNvShared(sites[nCell] - sites[cellId]).getNormalized(); int32_t planeIndex = neighbor.second + planeIndexerOffset; if (nCell < cellId) planeIndex = -planeIndex; setCuttingBox(midpoint, -direction, cuttingMesh, CUTTING_BOX_SIZE, planeIndex); eval.performFastCutting(cell, cuttingMesh, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* newCell = eval.createNewMesh(); delete cell; cell = newCell; if (cell == nullptr) break; } delete cuttingMesh; return cell; } #define MAX_VORONOI_ATTEMPT_NUMBER 450 VoronoiSitesGeneratorImpl::VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd) { mMesh = mesh; mRnd = rnd; mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution); mStencil = nullptr; } void VoronoiSitesGeneratorImpl::setBaseMesh(const Mesh* m) { mGeneratedSites.clear(); delete mAccelerator; mMesh = m; mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution); } VoronoiSitesGeneratorImpl::~VoronoiSitesGeneratorImpl() { delete mAccelerator; mAccelerator = nullptr; } void VoronoiSitesGeneratorImpl::release() { delete this; } void VoronoiSitesGeneratorImpl::setStencil(const Mesh* stencil) { mStencil = stencil; } void VoronoiSitesGeneratorImpl::clearStencil() { mStencil = nullptr; } void VoronoiSitesGeneratorImpl::uniformlyGenerateSitesInMesh(const uint32_t sitesCount) { BooleanEvaluator voronoiMeshEval; NvcVec3 mn = mMesh->getBoundingBox().minimum; NvcVec3 mx = mMesh->getBoundingBox().maximum; NvcVec3 vc = mx - mn; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; while (generatedSites < sitesCount && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER) { float rn1 = mRnd->getRandomValue() * vc.x; float rn2 = mRnd->getRandomValue() * vc.y; float rn3 = mRnd->getRandomValue() * vc.z; if (voronoiMeshEval.isPointContainedInMesh(mMesh, NvcVec3{ rn1, rn2, rn3 } + mn) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, NvcVec3{ rn1, rn2, rn3 } + mn))) { generatedSites++; mGeneratedSites.push_back(NvcVec3{ rn1, rn2, rn3 } + mn); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } void VoronoiSitesGeneratorImpl::clusteredSitesGeneration(const uint32_t numberOfClusters, const uint32_t sitesPerCluster, float clusterRadius) { BooleanEvaluator voronoiMeshEval; NvcVec3 mn = mMesh->getBoundingBox().minimum; NvcVec3 mx = mMesh->getBoundingBox().maximum; NvcVec3 middle = (mx + mn) * 0.5; NvcVec3 vc = (mx - mn) * 0.5; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; std::vector<NvcVec3> tempPoints; while (generatedSites < numberOfClusters) { float rn1 = mRnd->getRandomValue() * 2 - 1; float rn2 = mRnd->getRandomValue() * 2 - 1; float rn3 = mRnd->getRandomValue() * 2 - 1; NvcVec3 p = { middle.x + rn1 * vc.x, middle.y + rn2 * vc.y, middle.z + rn3 * vc.z }; if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p))) { generatedSites++; tempPoints.push_back(p); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } int32_t totalCount = 0; for (; tempPoints.size() > 0; tempPoints.pop_back()) { uint32_t unif = sitesPerCluster; generatedSites = 0; while (generatedSites < unif) { NvcVec3 p = tempPoints.back() + fromNvShared(NvVec3(mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1) .getNormalized()) * (mRnd->getRandomValue() + 0.001f) * clusterRadius; if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p))) { totalCount++; generatedSites++; mGeneratedSites.push_back(p); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } } #define IN_SPHERE_ATTEMPT_NUMBER 20 void VoronoiSitesGeneratorImpl::addSite(const NvcVec3& site) { mGeneratedSites.push_back(site); } void VoronoiSitesGeneratorImpl::generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) { BooleanEvaluator voronoiMeshEval; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; std::vector<NvcVec3> tempPoints; float radiusSquared = radius * radius; while (generatedSites < count && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER) { float rn1 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; float rn2 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; float rn3 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; NvcVec3 point = { rn1, rn2, rn3 }; if (toNvShared(point).magnitudeSquared() < radiusSquared && voronoiMeshEval.isPointContainedInMesh(mMesh, point + center) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, point + center))) { generatedSites++; mGeneratedSites.push_back(point + center); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } void VoronoiSitesGeneratorImpl::deleteInSphere(const float radius, const NvcVec3& center, float deleteProbability) { float r2 = radius * radius; for (uint32_t i = 0; i < mGeneratedSites.size(); ++i) { if (toNvShared(mGeneratedSites[i] - center).magnitudeSquared() < r2 && mRnd->getRandomValue() <= deleteProbability) { std::swap(mGeneratedSites[i], mGeneratedSites.back()); mGeneratedSites.pop_back(); --i; } } } void VoronoiSitesGeneratorImpl::radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset, float variability) { // mGeneratedSites.push_back(center); NvVec3 t1, t2; if (std::abs(normal.z) < 0.9) { t1 = toNvShared(normal).cross(NvVec3(0, 0, 1)); } else { t1 = toNvShared(normal).cross(NvVec3(1, 0, 0)); } t2 = t1.cross(toNvShared(normal)); t1.normalize(); t2.normalize(); float radStep = radius / radialSteps; int32_t cCr = 0; float angleStep = nvidia::NvPi * 2 / angularSteps; for (float cRadius = radStep; cRadius < radius; cRadius += radStep) { float cAngle = angleOffset * cCr; for (int32_t i = 0; i < angularSteps; ++i) { float angVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability); float radVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability); NvcVec3 nPos = fromNvShared(std::cos(cAngle * angVars) * t1 + std::sin(cAngle * angVars) * t2) * cRadius * radVars + center; mGeneratedSites.push_back(nPos); cAngle += angleStep; } ++cCr; } } uint32_t VoronoiSitesGeneratorImpl::getVoronoiSites(const NvcVec3*& sites) { if (mGeneratedSites.size()) { sites = &mGeneratedSites[0]; } return (uint32_t)mGeneratedSites.size(); } int32_t FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn, bool replaceChunk) { if (chunkId == 0 && replaceChunk) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1 || cellCount < 2) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = mChunkData[chunkInfoIndex].getMesh(); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); std::vector<NvcVec3> cellPoints(cellCount); for (uint32_t i = 0; i < cellCount; ++i) { cellPoints[i] = tm.invTransformPos(cellPointsIn[i]); } /** Prebuild accelerator structure */ BooleanEvaluator eval; BooleanEvaluator voronoiMeshEval; BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution); std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors; const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors); /** Fracture */ int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; for (uint32_t i = 0; i < cellPoints.size(); ++i) { Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]); if (cell == nullptr) { continue; } DummyAccelerator dmAccel(cell->getFacetCount()); voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* resultMesh = voronoiMeshEval.createNewMesh(); if (resultMesh) { uint32_t ncidx = createNewChunk(parentChunkId); mChunkData[ncidx].isLeaf = true; setChunkInfoMesh(mChunkData[ncidx], resultMesh); newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId); } eval.reset(); delete cell; } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } mPlaneIndexerOffset += neighborCount; if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } template<typename Cmp> static void compactifyAndTransformVertexBuffer ( std::vector<Nv::Blast::Vertex>& vertexBuffer, Edge* edges, const Nv::Blast::Vertex* sourceVertices, uint32_t numSourceVerts, uint32_t numEdges, const TransformST& tm ) { std::vector<uint32_t> indexMap; indexMap.reserve(numSourceVerts); std::map<Vertex, uint32_t, Cmp> vertexMapping; for (uint32_t i = 0; i < numSourceVerts; i++) { const auto& vert = sourceVertices[i]; auto it = vertexMapping.find(vert); if (it == vertexMapping.end()) { const uint32_t size = static_cast<uint32_t>(vertexBuffer.size()); vertexMapping[vert] = size; // transform the position and normalZ back to world space before storing it Nv::Blast::Vertex transformedVert = vert; transformedVert.p = tm.transformPos(vert.p); vertexBuffer.push_back(transformedVert); indexMap.push_back(size); } else { indexMap.push_back(it->second); } } // now we need convert the list of edges to be based on the compacted vertex buffer for (uint32_t i = 0; i < numEdges; i++) { Edge &edge = edges[i]; edge.s = indexMap[edges[i].s]; edge.e = indexMap[edges[i].e]; } } Mesh* FractureToolImpl::createChunkMesh(int32_t chunkInfoIndex, bool splitUVs /* = true */) { // make sure the chunk is valid if (chunkInfoIndex < 0 || uint32_t(chunkInfoIndex) >= this->getChunkCount()) { return nullptr; } // grab the original source mesh const auto sourceMesh = this->getChunkInfo(chunkInfoIndex).getMesh(); if (!sourceMesh) { return nullptr; } const Nv::Blast::Vertex* sourceVertices = sourceMesh->getVertices(); const uint32_t numSourceVerts = sourceMesh->getVerticesCount(); const auto sourceEdges = sourceMesh->getEdges(); const auto numEdges = sourceMesh->getEdgesCount(); const auto edgeBufferSize = numEdges * sizeof(Edge); Edge* edges = reinterpret_cast<Edge*>(NVBLAST_ALLOC(edgeBufferSize)); memcpy(edges, sourceEdges, edgeBufferSize); const TransformST& tm = this->getChunkInfo(chunkInfoIndex).getTmToWorld(); std::vector<Vertex> _vertexBuffer; if (splitUVs) compactifyAndTransformVertexBuffer<VrtComp>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm); else compactifyAndTransformVertexBuffer<VrtCompNoUV>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm); // now fix the order of the edges // compacting the vertex buffer can put them out of order // the end of one edge needs to be the start of the next const auto facets = sourceMesh->getFacetsBuffer(); const auto facetsCount = sourceMesh->getFacetCount(); Vertex* vertices = reinterpret_cast<Vertex*>(_vertexBuffer.data()); const auto numVerts = static_cast<uint32_t>(_vertexBuffer.size()); nvidia::NvBounds3 bnd; bnd.setEmpty(); std::set<int32_t> vertUVsToFix; for (uint32_t f = 0; f < facetsCount; f++) { const Facet& facet = facets[f]; uint32_t nextIndex = edges[facet.firstEdgeNumber].e; for (uint32_t edge = 1; edge < facet.edgesCount; edge++) { for (uint32_t test = edge; test < facet.edgesCount; test++) { if (nextIndex == edges[facet.firstEdgeNumber + test].s) { if (test != edge) { std::swap(edges[facet.firstEdgeNumber + edge], edges[facet.firstEdgeNumber + test]); } nextIndex = edges[facet.firstEdgeNumber + edge].e; break; } } // make sure the last edge wraps around and points back at the first edge NVBLAST_ASSERT(edges[facet.firstEdgeNumber + edge - 1].e == edges[facet.firstEdgeNumber + edge].s); } // we need to de-normalize the UVs for interior faces // build a set of interior vertex indices as we inflate the bounds to include all the UVs if (facet.userData != 0) { for (uint32_t edge = 0; edge < facet.edgesCount; edge++) { const int32_t v1 = edges[facet.firstEdgeNumber + edge].s; if (vertUVsToFix.insert(v1).second) { bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f)); } const int32_t v2 = edges[facet.firstEdgeNumber + edge].e; if (vertUVsToFix.insert(v2).second) { bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f)); } } } } const float xscale = (bnd.maximum.x - bnd.minimum.x); const float yscale = (bnd.maximum.y - bnd.minimum.y); const float scale = 1.0f / std::min(xscale, yscale); // To have uniform scaling for (auto vertIdx: vertUVsToFix) { NVBLAST_ASSERT(uint32_t(vertIdx) < numVerts); auto& vert = vertices[vertIdx]; vert.uv[0].x = (vert.uv[0].x - bnd.minimum.x) * scale; vert.uv[0].y = (vert.uv[0].y - bnd.minimum.y) * scale; } // build a new mesh from the converted data Mesh* chunkMesh = new MeshImpl(vertices, edges, facets, numVerts, numEdges, facetsCount); NVBLAST_FREE(edges); return chunkMesh; } bool FractureToolImpl::isMeshContainOpenEdges(const Mesh* input) { std::map<NvcVec3, int32_t, VrtPositionComparator> vertexMapping; std::vector<int32_t> vertexRemappingArray(input->getVerticesCount()); std::vector<Edge> remappedEdges(input->getEdgesCount()); /** Remap vertices */ const Vertex* vrx = input->getVertices(); for (uint32_t i = 0; i < input->getVerticesCount(); ++i) { auto it = vertexMapping.find(vrx->p); if (it == vertexMapping.end()) { vertexMapping[vrx->p] = i; vertexRemappingArray[i] = i; } else { vertexRemappingArray[i] = it->second; } ++vrx; } const Edge* ed = input->getEdges(); for (uint32_t i = 0; i < input->getEdgesCount(); ++i) { remappedEdges[i].s = vertexRemappingArray[ed->s]; remappedEdges[i].e = vertexRemappingArray[ed->e]; if (remappedEdges[i].e < remappedEdges[i].s) { std::swap(remappedEdges[i].s, remappedEdges[i].e); } ++ed; } std::sort(remappedEdges.begin(), remappedEdges.end()); int32_t collected = 1; for (uint32_t i = 1; i < remappedEdges.size(); ++i) { if (remappedEdges[i - 1].s == remappedEdges[i].s && remappedEdges[i - 1].e == remappedEdges[i].e) { collected++; } else { if (collected & 1) { return true; } else { collected = 1; } } } return collected & 1; } int32_t FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) { if (chunkId == 0 && replaceChunk) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1 || cellCount < 2) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = mChunkData[chunkInfoIndex].getMesh(); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); std::vector<NvcVec3> cellPoints(cellCount); for (uint32_t i = 0; i < cellCount; ++i) { cellPoints[i] = tm.invTransformPos(cellPointsIn[i]); toNvShared(cellPoints[i]) = toNvShared(rotation).rotateInv(toNvShared(cellPoints[i])); cellPoints[i].x *= (1.0f / scale.x); cellPoints[i].y *= (1.0f / scale.y); cellPoints[i].z *= (1.0f / scale.z); } /** Prebuild accelerator structure */ BooleanEvaluator eval; BooleanEvaluator voronoiMeshEval; BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution); std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors; const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors); /** Fracture */ int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; for (uint32_t i = 0; i < cellPoints.size(); ++i) { Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]); if (cell == nullptr) { continue; } for (uint32_t v = 0; v < cell->getVerticesCount(); ++v) { cell->getVerticesWritable()[v].p.x *= scale.x; cell->getVerticesWritable()[v].p.y *= scale.y; cell->getVerticesWritable()[v].p.z *= scale.z; toNvShared(cell->getVerticesWritable()[v].p) = toNvShared(rotation).rotate(toNvShared(cell->getVerticesWritable()[v].p)); } cell->recalculateBoundingBox(); DummyAccelerator dmAccel(cell->getFacetCount()); voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* resultMesh = voronoiMeshEval.createNewMesh(); if (resultMesh) { uint32_t ncidx = createNewChunk(parentChunkId); mChunkData[ncidx].isLeaf = true; setChunkInfoMesh(mChunkData[ncidx], resultMesh); newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId); } eval.reset(); delete cell; } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } mPlaneIndexerOffset += neighborCount; if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) { if (conf.noise.amplitude != 0) { return slicingNoisy(chunkId, conf, replaceChunk, rnd); } if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); BooleanEvaluator bTool; int32_t x_slices = conf.x_slices; int32_t y_slices = conf.y_slices; int32_t z_slices = conf.z_slices; const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox()); NvVec3 center = {mesh->getBoundingBox().minimum.x, 0, 0}; float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1)); float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1)); float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1)); center.x += x_offset; NvVec3 dir = {1, 0, 0}; Mesh* slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId); ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<Mesh*> xSlicedChunks; std::vector<Mesh*> ySlicedChunks; std::vector<uint32_t> newlyCreatedChunksIds; /** Slice along x direction */ for (int32_t slice = 0; slice < x_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* xSlice = bTool.createNewMesh(); if (xSlice != nullptr) { xSlicedChunks.push_back(xSlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset; } if (mesh != nullptr) { xSlicedChunks.push_back(mesh); } for (uint32_t chunk = 0; chunk < xSlicedChunks.size(); ++chunk) { center = NvVec3(0, sourceBBox.minimum.y, 0); center.y += y_offset; dir = NvVec3(0, 1, 0); mesh = xSlicedChunks[chunk]; for (int32_t slice = 0; slice < y_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { ySlicedChunks.push_back(ySlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset; } if (mesh != nullptr) { ySlicedChunks.push_back(mesh); } } for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk) { center = NvVec3(0, 0, sourceBBox.minimum.z); center.z += z_offset; dir = NvVec3(0, 0, 1); mesh = ySlicedChunks[chunk]; for (int32_t slice = 0; slice < z_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { setChunkInfoMesh(ch, ySlice); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset; } if (mesh != nullptr) { setChunkInfoMesh(ch, mesh); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } } delete slBox; mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) { if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); BooleanEvaluator bTool; int32_t x_slices = conf.x_slices; int32_t y_slices = conf.y_slices; int32_t z_slices = conf.z_slices; const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox()); NvVec3 center = NvVec3(mesh->getBoundingBox().minimum.x, 0, 0); float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1)); float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1)); float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1)); NvVec3 resolution(tm.s / conf.noise.samplingInterval.x, tm.s / conf.noise.samplingInterval.y, tm.s / conf.noise.samplingInterval.z); center.x += x_offset; NvVec3 dir(1, 0, 0); Mesh* slBox = nullptr; ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<Mesh*> xSlicedChunks; std::vector<Mesh*> ySlicedChunks; std::vector<uint32_t> newlyCreatedChunksIds; float noisyPartSize = 1.2f; // int32_t acceleratorRes = 8; /** Slice along x direction */ for (int32_t slice = 0; slice < x_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* xSlice = bTool.createNewMesh(); if (xSlice != nullptr) { xSlicedChunks.push_back(xSlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == nullptr) { break; } center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset; } if (mesh != nullptr) { xSlicedChunks.push_back(mesh); } slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId); uint32_t slicedChunkSize = xSlicedChunks.size(); for (uint32_t chunk = 0; chunk < slicedChunkSize; ++chunk) { center = NvVec3(0, sourceBBox.minimum.y, 0); center.y += y_offset; dir = NvVec3(0, 1, 0); mesh = xSlicedChunks[chunk]; for (int32_t slice = 0; slice < y_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { ySlicedChunks.push_back(ySlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == nullptr) { break; } center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset; } if (mesh != nullptr) { ySlicedChunks.push_back(mesh); } } for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk) { center = NvVec3(0, 0, sourceBBox.minimum.z); center.z += z_offset; dir = NvVec3(0, 0, 1); mesh = ySlicedChunks[chunk]; for (int32_t slice = 0; slice < z_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { setChunkInfoMesh(ch, ySlice); ch.chunkId = createId(); mChunkData.push_back(ch); newlyCreatedChunksIds.push_back(ch.chunkId); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete mesh; delete slBox; mesh = result; if (mesh == nullptr) { break; } center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset; } if (mesh != nullptr) { setChunkInfoMesh(ch, mesh); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } } // delete slBox; mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& point, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) { if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); BooleanEvaluator bTool; const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); ChunkInfo ch; ch.chunkId = -1; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; float noisyPartSize = 1.2f; NvVec3 resolution(tm.s / noise.samplingInterval.x, tm.s / noise.samplingInterval.y, tm.s / noise.samplingInterval.z); // Perform cut Mesh* slBox = getNoisyCuttingBoxPair(toNvShared(tm.invTransformPos(point)), toNvShared(normal), // tm doesn't change normals (up to normalization) 40, noisyPartSize, resolution, mPlaneIndexerOffset, noise.amplitude, noise.frequency, noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); setChunkInfoMesh(ch, bTool.createNewMesh()); inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == 0) // Return if it doesn't cut specified chunk { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); int32_t firstChunkId = -1; if (ch.getMesh() != 0) { ch.chunkId = createId(); mChunkData.push_back(ch); firstChunkId = ch.chunkId; } if (mesh != 0) { ch.chunkId = createId(); setChunkInfoMesh(ch, mesh); mChunkData.push_back(ch); } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands && firstChunkId >= 0) { islandDetectionAndRemoving(firstChunkId); if (mesh != 0) { islandDetectionAndRemoving(ch.chunkId); } } return 0; } bool CmpVec::operator()(const NvVec3& v1, const NvVec3& v2) const { auto v = (v2 - v1).abs(); if (v.x < 1e-5) { if (v.y < 1e-5) { return v1.z < v2.z; } return v1.y < v2.y; } return v1.x < v2.x; } int32_t FractureToolImpl::cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) { if ((replaceChunk && chunkId == 0) || conf.cutoutSet == nullptr) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Nv::Blast::CutoutSet& cutoutSet = *conf.cutoutSet; const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); float extrusionLength = toNvShared(mesh->getBoundingBox()).getDimensions().magnitude(); auto scale = toNvShared(conf.scale); conf.transform.p = tm.invTransformPos(conf.transform.p); if (scale.x < 0.f || scale.y < 0.f) { scale = { extrusionLength, extrusionLength }; } if (conf.isRelativeTransform) { toNvShared(conf.transform.p) += toNvShared(mesh->getBoundingBox()).getCenter() / tm.s; } conf.noise.samplingInterval = conf.noise.samplingInterval / tm.s; float xDim = cutoutSet.getDimensions().x; float yDim = cutoutSet.getDimensions().y; if (conf.cutoutSet->isPeriodic()) // cutout with periodic boundary do not support noise and conicity { conf.aperture = 0.f; conf.noise.amplitude = 0.f; } BooleanEvaluator bTool; ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; SharedFacesMap sharedFacesMap; std::vector<std::vector<NvVec3> > verts; std::vector<std::set<int32_t> > smoothingGroups; std::vector<uint32_t> cutoutStarts; for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++) { cutoutStarts.push_back(verts.size()); for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++) { uint32_t vertCount = cutoutSet.getCutoutVertexCount(c, l); verts.push_back(std::vector<NvVec3>(vertCount)); smoothingGroups.push_back(std::set<int32_t>()); for (uint32_t v = 0; v < vertCount; v++) { auto vert = cutoutSet.getCutoutVertex(c, l, v); vert.x = (vert.x / xDim - 0.5f) * scale.x; vert.y = (vert.y / yDim - 0.5f) * scale.y; verts.back()[v] = toNvShared(vert); if (cutoutSet.isCutoutVertexToggleSmoothingGroup(c, l, v)) { smoothingGroups.back().insert(v); } } } } float dimension = scale.magnitude(); float conicityMultiplierBot = 1.f + 2.f * extrusionLength / dimension * nvidia::NvTan(nvidia::NvClamp(conf.aperture, -179.f, 179.f) * nvidia::NvPi / 360.f); float conicityMultiplierTop = 2.f - conicityMultiplierBot; float heightBot = extrusionLength, heightTop = extrusionLength; if (conicityMultiplierBot < 0.f) { conicityMultiplierBot = 0.f; heightBot = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f)); } if (conicityMultiplierTop < 0.f) { conicityMultiplierTop = 0.f; heightTop = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f)); } uint32_t seed = rnd->getRandomValue(); buildCuttingConeFaces(conf, verts, heightBot, heightTop, conicityMultiplierBot, conicityMultiplierTop, mPlaneIndexerOffset, seed, mInteriorMaterialId, sharedFacesMap); std::vector<std::vector<Mesh*> > cutoutMeshes; for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++) { cutoutMeshes.push_back(std::vector<Mesh*>()); for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++) { if (verts[cutoutStarts[c] + l].size() < 4) { continue; } cutoutMeshes.back().push_back( getCuttingCone(conf, verts[cutoutStarts[c] + l], smoothingGroups[cutoutStarts[c] + l], heightBot, heightTop, conicityMultiplierBot, conicityMultiplierTop, mPlaneIndexerOffset, seed, mInteriorMaterialId, sharedFacesMap, l != 0)); } } std::stack<std::pair<int32_t, int32_t> > cellsStack; std::set<std::pair<int32_t, int32_t> > visited; cellsStack.push(std::make_pair(0, 0)); while (!cellsStack.empty()) { auto cell = cellsStack.top(); auto transformedCell = toNvShared(conf.transform).rotate(NvVec3(cell.first * scale.x, cell.second * scale.y, 0)); cellsStack.pop(); if (visited.find(cell) != visited.end()) { continue; } visited.insert(cell); bool hasCutout = false; for (uint32_t c = 0; c < cutoutMeshes.size(); c++) { setChunkInfoMesh(ch, nullptr); for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++) { Mesh* cutoutMesh = cutoutMeshes[c][l]; if (cutoutMesh == nullptr) { continue; } auto vertices = cutoutMesh->getVerticesWritable(); for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++) { toNvShared(vertices[v].p) += transformedCell; } toNvShared(cutoutMesh->getBoundingBoxWritable().minimum) += transformedCell; toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) += transformedCell; if (l == 0) { SweepingAccelerator accel(mesh); SweepingAccelerator dummy(cutoutMesh); bTool.performBoolean(mesh, cutoutMesh, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); setChunkInfoMesh(ch, bTool.createNewMesh()); } else { SweepingAccelerator accel(ch.getMesh()); SweepingAccelerator dummy(cutoutMesh); bTool.performBoolean(ch.getMesh(), cutoutMesh, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); setChunkInfoMesh(ch, bTool.createNewMesh()); } for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++) { toNvShared(vertices[v].p) -= transformedCell; } toNvShared(cutoutMesh->getBoundingBoxWritable().minimum )-= transformedCell; toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) -= transformedCell; } if (ch.getMesh() != 0) { ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); hasCutout = true; } } if (hasCutout && cutoutSet.isPeriodic()) { for (int32_t i = 0; i < 4; ++i) { const int32_t i0 = i & 1; const int32_t i1 = (i >> 1) & 1; auto newCell = std::make_pair(cell.first + i0 - i1, cell.second + i0 + i1 - 1); if (visited.find(newCell) == visited.end()) { cellsStack.push(newCell); } } } } for (uint32_t c = 0; c < cutoutMeshes.size(); c++) { for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++) { SAFE_DELETE(cutoutMeshes[c][l]); } } SAFE_DELETE(mesh); mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::getChunkInfoIndex(int32_t chunkId) const { for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (mChunkData[i].chunkId == chunkId) { return i; } } return -1; } int32_t FractureToolImpl::getChunkDepth(int32_t chunkId) const { int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return -1; } int32_t depth = 0; while (mChunkData[chunkInfoIndex].parentChunkId != -1) { ++depth; chunkInfoIndex = getChunkInfoIndex(mChunkData[chunkInfoIndex].parentChunkId); } return depth; } uint32_t FractureToolImpl::getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const { std::vector<int32_t> _chunkIds; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (getChunkDepth(mChunkData[i].chunkId) == (int32_t)depth) { _chunkIds.push_back(mChunkData[i].chunkId); } } chunkIds = new int32_t[_chunkIds.size()]; memcpy(chunkIds, _chunkIds.data(), _chunkIds.size() * sizeof(int32_t)); return (uint32_t)_chunkIds.size(); } bool FractureToolImpl::setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids /* = nullptr */) { if (meshes == nullptr) { return false; } reset(); for (uint32_t m = 0; m < meshesSize; m++) { const auto mesh = meshes[m]; const int32_t chunkId = (ids ? ids[m] : -1); const int32_t id = setChunkMesh(mesh, -1, chunkId); // if any mesh fails to get set up correctly, // wipe the data so it isn't in a bad state and report failure if (id < 0) { reset(); return false; } } // all source meshes were set up correctly, report success return true; } int32_t FractureToolImpl::setChunkMesh(const Mesh* meshInput, int32_t parentId, int32_t chunkId /* = -1 */) { if (chunkId < 0) { // allocate a new chunk ID chunkId = createId(); if (chunkId < 0) { return -1; } } else { // make sure the supplied chunk ID gets reserved if (!reserveId(chunkId)) { return -1; } } const int32_t parentInfoIndex = getChunkInfoIndex(parentId); if (meshInput == nullptr || (parentInfoIndex == -1 && parentId != -1)) { return -1; } mChunkData.push_back(ChunkInfo()); auto& chunk = mChunkData.back(); chunk.chunkId = chunkId; chunk.parentChunkId = parentId; chunk.isLeaf = true; chunk.isChanged = true; chunk.flags = ChunkInfo::NO_FLAGS; /** Set mesh; move to origin and scale to unit cube */ Mesh* mesh = new MeshImpl(*reinterpret_cast<const MeshImpl*>(meshInput)); setChunkInfoMesh(chunk, mesh, false); if ((size_t)parentInfoIndex < mChunkData.size()) { mChunkData[parentInfoIndex].isLeaf = false; } // Make sure our fracturing surface ID base is greater than any existing ID for (uint32_t i = 0; i < mesh->getFacetCount(); ++i) { const int64_t splitId = std::abs(mesh->getFacet(i)->userData); mPlaneIndexerOffset = std::max(mPlaneIndexerOffset, splitId + 1); } return chunk.chunkId; } void FractureToolImpl::release() { delete this; } void FractureToolImpl::reset() { for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { delete mChunkPostprocessors[i]; } mChunkPostprocessors.clear(); for (uint32_t i = 0; i < mChunkData.size(); ++i) { delete mChunkData[i].getMesh(); } mChunkData.clear(); mPlaneIndexerOffset = 1; mNextChunkId = 0; mChunkIdsUsed.clear(); mInteriorMaterialId = kMaterialInteriorId; } void FractureToolImpl::setInteriorMaterialId(int32_t materialId) { mInteriorMaterialId = materialId; } bool FractureToolImpl::isAncestorForChunk(int32_t ancestorId, int32_t chunkId) { if (ancestorId == chunkId) { return false; } while (chunkId != -1) { if (ancestorId == chunkId) { return true; } const int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return false; } chunkId = mChunkData[chunkInfoIndex].parentChunkId; } return false; } bool FractureToolImpl::deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot /*= false*/) { std::vector<int32_t> chunkToDelete; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (isAncestorForChunk(chunkId, mChunkData[i].chunkId) || (deleteRoot && chunkId == mChunkData[i].chunkId)) { chunkToDelete.push_back(i); } } for (int32_t i = (int32_t)chunkToDelete.size() - 1; i >= 0; --i) { int32_t m = chunkToDelete[i]; delete mChunkData[m].getMesh(); std::swap(mChunkData.back(), mChunkData[m]); mChunkData.pop_back(); } markLeaves(); return chunkToDelete.size() > 0; } void FractureToolImpl::finalizeFracturing() { std::vector<Triangulator*> oldTriangulators = mChunkPostprocessors; std::map<int32_t, int32_t> chunkIdToTriangulator; std::set<uint32_t> newChunkMask; for (uint32_t i = 0; i < oldTriangulators.size(); ++i) { chunkIdToTriangulator[oldTriangulators[i]->getParentChunkId()] = i; } mChunkPostprocessors.clear(); mChunkPostprocessors.resize(mChunkData.size()); newChunkMask.insert(0xffffffff); // To trigger masking mode, if newChunkMask will happen to be empty, all UVs will // be updated. for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { auto it = chunkIdToTriangulator.find(mChunkData[i].chunkId); if (mChunkData[i].isChanged || it == chunkIdToTriangulator.end()) { if (it != chunkIdToTriangulator.end()) { delete oldTriangulators[it->second]; oldTriangulators[it->second] = nullptr; } mChunkPostprocessors[i] = new Triangulator(); mChunkPostprocessors[i]->triangulate(mChunkData[i].getMesh()); mChunkPostprocessors[i]->getParentChunkId() = mChunkData[i].chunkId; newChunkMask.insert(mChunkData[i].chunkId); mChunkData[i].isChanged = false; } else { mChunkPostprocessors[i] = oldTriangulators[it->second]; } } std::vector<int32_t> badOnes; for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { if (mChunkPostprocessors[i]->getBaseMesh().empty()) { badOnes.push_back(i); } } for (int32_t i = (int32_t)badOnes.size() - 1; i >= 0; --i) { int32_t chunkId = mChunkData[badOnes[i]].chunkId; for (uint32_t j = 0; j < mChunkData.size(); ++j) { if (mChunkData[j].parentChunkId == chunkId) mChunkData[j].parentChunkId = mChunkData[badOnes[i]].parentChunkId; } std::swap(mChunkPostprocessors[badOnes[i]], mChunkPostprocessors.back()); mChunkPostprocessors.pop_back(); std::swap(mChunkData[badOnes[i]], mChunkData.back()); mChunkData.pop_back(); } if (!mChunkPostprocessors.empty()) // Failsafe to prevent infinite loop (leading to stack overflow) { fitAllUvToRect(1.0f, newChunkMask); } } uint32_t FractureToolImpl::getChunkCount() const { return (uint32_t)mChunkData.size(); } const ChunkInfo& FractureToolImpl::getChunkInfo(int32_t chunkInfoIndex) { return mChunkData[chunkInfoIndex]; } uint32_t FractureToolImpl::getBaseMesh(int32_t chunkInfoIndex, Triangle*& output) { NVBLAST_ASSERT(mChunkPostprocessors.size() > 0); if (mChunkPostprocessors.size() == 0) { return 0; // finalizeFracturing() should be called before getting mesh! } auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh(); output = new Triangle[baseMesh.size()]; memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle)); /* Scale mesh back */ const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); for (uint32_t i = 0; i < baseMesh.size(); ++i) { Triangle& triangle = output[i]; triangle.a.p = tm.transformPos(triangle.a.p); triangle.b.p = tm.transformPos(triangle.b.p); triangle.c.p = tm.transformPos(triangle.c.p); } return baseMesh.size(); } uint32_t FractureToolImpl::updateBaseMesh(int32_t chunkInfoIndex, Triangle* output) { NVBLAST_ASSERT(mChunkPostprocessors.size() > 0); if (mChunkPostprocessors.size() == 0) { return 0; // finalizeFracturing() should be called before getting mesh! } auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh(); memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle)); /* Scale mesh back */ const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); for (uint32_t i = 0; i < baseMesh.size(); ++i) { Triangle& triangle = output[i]; triangle.a.p = tm.transformPos(triangle.a.p); triangle.b.p = tm.transformPos(triangle.b.p); triangle.c.p = tm.transformPos(triangle.c.p); } return baseMesh.size(); } float getVolume(std::vector<Triangle>& triangles) { if (triangles.size() == 0) { return 0.0f; } // Find an approximate centroid for a more accurate calculation NvcVec3 centroid = { 0.0f, 0.0f, 0.0f }; for (size_t i = 0; i < triangles.size(); ++i) { centroid = centroid + triangles[i].a.p + triangles[i].b.p + triangles[i].c.p; } centroid = centroid / (3 * triangles.size()); float volume = 0.0f; for (size_t i = 0; i < triangles.size(); ++i) { const NvcVec3 a = triangles[i].a.p - centroid; const NvcVec3 b = triangles[i].b.p - centroid; const NvcVec3 c = triangles[i].c.p - centroid; volume += (a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x); } return (1.0f / 6.0f) * std::abs(volume); } float FractureToolImpl::getMeshOverlap(const Mesh& meshA, const Mesh& meshB) { BooleanEvaluator bTool; bTool.performBoolean(&meshA, &meshB, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); if (result == nullptr) { return 0.0f; } Triangulator postProcessor; postProcessor.triangulate(&meshA); float baseVolume = getVolume(postProcessor.getBaseMesh()); if (baseVolume == 0) { return 0.0f; } postProcessor.triangulate(result); float intrsVolume = getVolume(postProcessor.getBaseMesh()); delete result; return intrsVolume / baseVolume; } void weldVertices(std::map<Vertex, uint32_t, VrtComp>& vertexMapping, std::vector<Vertex>& vertexBuffer, std::vector<uint32_t>& indexBuffer, std::vector<Triangle>& trb) { for (uint32_t i = 0; i < trb.size(); ++i) { auto it = vertexMapping.find(trb[i].a); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].a] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].a); } else { indexBuffer.push_back(it->second); } it = vertexMapping.find(trb[i].b); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].b] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].b); } else { indexBuffer.push_back(it->second); } it = vertexMapping.find(trb[i].c); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].c] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].c); } else { indexBuffer.push_back(it->second); } } } void FractureToolImpl::setRemoveIslands(bool isRemoveIslands) { mRemoveIslands = isRemoveIslands; } int32_t FractureToolImpl::islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth) { if (chunkId == 0 && createAtNewDepth == false) { return 0; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); Triangulator prc; prc.triangulate(mChunkData[chunkInfoIndex].getMesh()); Mesh* chunk = mChunkData[chunkInfoIndex].getMesh(); std::vector<uint32_t>& mapping = prc.getBaseMapping(); std::vector<TriangleIndexed>& trs = prc.getBaseMeshIndexed(); std::vector<std::vector<uint32_t> > graph(prc.getWeldedVerticesCount()); std::vector<int32_t>& pm = prc.getPositionedMapping(); if (pm.size() == 0) { return 0; } /** Chunk graph */ for (uint32_t i = 0; i < trs.size(); ++i) { graph[pm[trs[i].ea]].push_back(pm[trs[i].eb]); graph[pm[trs[i].ea]].push_back(pm[trs[i].ec]); graph[pm[trs[i].ec]].push_back(pm[trs[i].eb]); graph[pm[trs[i].ec]].push_back(pm[trs[i].ea]); graph[pm[trs[i].eb]].push_back(pm[trs[i].ea]); graph[pm[trs[i].eb]].push_back(pm[trs[i].ec]); } for (uint32_t i = 0; i < chunk->getEdgesCount(); ++i) { int v1 = chunk->getEdges()[i].s; int v2 = chunk->getEdges()[i].e; v1 = pm[mapping[v1]]; v2 = pm[mapping[v2]]; graph[v1].push_back(v2); graph[v2].push_back(v1); } /** Walk graph, mark components */ std::vector<int32_t> comps(prc.getWeldedVerticesCount(), -1); std::queue<uint32_t> que; int32_t cComp = 0; for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i) { int32_t to = pm[i]; if (comps[to] != -1) continue; que.push(to); comps[to] = cComp; while (!que.empty()) { int32_t c = que.front(); que.pop(); for (uint32_t j = 0; j < graph[c].size(); ++j) { if (comps[graph[c][j]] == -1) { que.push(graph[c][j]); comps[graph[c][j]] = cComp; } } } cComp++; } for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i) { int32_t to = pm[i]; comps[i] = comps[to]; } std::vector<uint32_t> longComps(chunk->getVerticesCount()); for (uint32_t i = 0; i < chunk->getVerticesCount(); ++i) { int32_t to = mapping[i]; longComps[i] = comps[to]; } if (cComp > 1) { std::vector<std::vector<Vertex> > compVertices(cComp); std::vector<std::vector<Facet> > compFacets(cComp); std::vector<std::vector<Edge> > compEdges(cComp); std::vector<uint32_t> compVertexMapping(chunk->getVerticesCount(), 0); const Vertex* vrts = chunk->getVertices(); for (uint32_t v = 0; v < chunk->getVerticesCount(); ++v) { int32_t vComp = comps[mapping[v]]; compVertexMapping[v] = static_cast<uint32_t>(compVertices[vComp].size()); compVertices[vComp].push_back(vrts[v]); } const Facet* fcb = chunk->getFacetsBuffer(); const Edge* edb = chunk->getEdges(); for (uint32_t fc = 0; fc < chunk->getFacetCount(); ++fc) { std::vector<uint32_t> edgesPerComp(cComp, 0); for (uint32_t ep = fcb[fc].firstEdgeNumber; ep < fcb[fc].firstEdgeNumber + fcb[fc].edgesCount; ++ep) { int32_t vComp = comps[mapping[edb[ep].s]]; edgesPerComp[vComp]++; compEdges[vComp].push_back({compVertexMapping[edb[ep].s], compVertexMapping[edb[ep].e]}); } for (int32_t c = 0; c < cComp; ++c) { if (edgesPerComp[c] == 0) { continue; } compFacets[c].push_back(*chunk->getFacet(fc)); compFacets[c].back().edgesCount = edgesPerComp[c]; compFacets[c].back().firstEdgeNumber = static_cast<int32_t>(compEdges[c].size()) - edgesPerComp[c]; } } if (createAtNewDepth == false) { // We need to flag the chunk as changed, in case someone is calling this function directly // Otherwise when called as part of automatic island removal, chunks are already flagged as changed mChunkData[chunkInfoIndex].isChanged = true; delete mChunkData[chunkInfoIndex].getMesh(); Mesh* newMesh0 = new MeshImpl(compVertices[0].data(), compEdges[0].data(), compFacets[0].data(), static_cast<uint32_t>(compVertices[0].size()), static_cast<uint32_t>(compEdges[0].size()), static_cast<uint32_t>(compFacets[0].size())); setChunkInfoMesh(mChunkData[chunkInfoIndex], newMesh0); for (int32_t i = 1; i < cComp; ++i) { mChunkData.push_back(ChunkInfo(mChunkData[chunkInfoIndex])); mChunkData.back().chunkId = createId(); Mesh* newMesh_i = new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(), static_cast<uint32_t>(compVertices[i].size()), static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size())); setChunkInfoMesh(mChunkData.back(), newMesh_i); } } else { deleteChunkSubhierarchy(chunkId); for (int32_t i = 0; i < cComp; ++i) { uint32_t nc = createNewChunk(chunkId); mChunkData[nc].isLeaf = true; mChunkData[nc].flags = ChunkInfo::APPROXIMATE_BONDING; Mesh* newMesh = new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(), static_cast<uint32_t>(compVertices[i].size()), static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size())); setChunkInfoMesh(mChunkData[nc], newMesh); } mChunkData[chunkInfoIndex].isLeaf = false; } return cComp; } return 0; } uint32_t FractureToolImpl::getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) { std::map<Vertex, uint32_t, VrtComp> vertexMapping; std::vector<Vertex> _vertexBuffer; std::vector<uint32_t> _indexBuffer; indexBufferOffsets = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC((mChunkPostprocessors.size() + 1) * sizeof(uint32_t))); for (uint32_t ch = 0; ch < mChunkPostprocessors.size(); ++ch) { const TransformST& tm = mChunkData[ch].getTmToWorld(); std::vector<Triangle> trb = mChunkPostprocessors[ch]->getBaseMesh(); for (uint32_t i = 0; i < trb.size(); ++i) { Triangle& tri = trb[i]; tri.a.p = tm.transformPos(tri.a.p); tri.b.p = tm.transformPos(tri.b.p); tri.c.p = tm.transformPos(tri.c.p); } indexBufferOffsets[ch] = _indexBuffer.size(); weldVertices(vertexMapping, _vertexBuffer, _indexBuffer, trb); } indexBufferOffsets[mChunkPostprocessors.size()] = _indexBuffer.size(); vertexBuffer = reinterpret_cast<Vertex*>(NVBLAST_ALLOC(_vertexBuffer.size() * sizeof(Vertex))); indexBuffer = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC(_indexBuffer.size() * sizeof(uint32_t))); memcpy(vertexBuffer, _vertexBuffer.data(), _vertexBuffer.size() * sizeof(Vertex)); memcpy(indexBuffer, _indexBuffer.data(), _indexBuffer.size() * sizeof(uint32_t)); return _vertexBuffer.size(); } int32_t FractureToolImpl::getChunkId(int32_t chunkInfoIndex) const { if (chunkInfoIndex < 0 || static_cast<uint32_t>(chunkInfoIndex) >= mChunkData.size()) { return -1; } return mChunkData[chunkInfoIndex].chunkId; } int32_t FractureToolImpl::getInteriorMaterialId() const { return mInteriorMaterialId; } void FractureToolImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (auto& chunkData : mChunkData) { if (chunkData.getMesh()) { chunkData.getMesh()->replaceMaterialId(oldMaterialId, newMaterialId); } } } uint32_t FractureToolImpl::stretchGroup(const std::vector<uint32_t>& grp, std::vector<std::vector<uint32_t> >& graph) { uint32_t parentChunkId = mChunkData[grp[0]].parentChunkId; uint32_t newChunkIndex = createNewChunk(parentChunkId); graph.push_back(std::vector<uint32_t>()); std::vector<Vertex> nVertices; std::vector<Edge> nEdges; std::vector<Facet> nFacets; uint32_t offsetVertices = 0; uint32_t offsetEdges = 0; for (uint32_t i = 0; i < grp.size(); ++i) { mChunkData[grp[i]].parentChunkId = mChunkData[newChunkIndex].chunkId; auto vr = mChunkData[grp[i]].getMesh()->getVertices(); auto ed = mChunkData[grp[i]].getMesh()->getEdges(); auto fc = mChunkData[grp[i]].getMesh()->getFacetsBuffer(); for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getVerticesCount(); ++v) { nVertices.push_back(vr[v]); } for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getEdgesCount(); ++v) { nEdges.push_back(ed[v]); nEdges.back().s += offsetVertices; nEdges.back().e += offsetVertices; } for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getFacetCount(); ++v) { nFacets.push_back(fc[v]); nFacets.back().firstEdgeNumber += offsetEdges; } offsetEdges = nEdges.size(); offsetVertices = nVertices.size(); if (mChunkData[grp[i]].flags & ChunkInfo::APPROXIMATE_BONDING) { mChunkData[newChunkIndex].flags |= ChunkInfo::APPROXIMATE_BONDING; } } std::vector<Facet> finalFacets; std::set<int64_t> hasCutting; for (uint32_t i = 0; i < nFacets.size(); ++i) { if (nFacets[i].userData != 0) hasCutting.insert(nFacets[i].userData); } for (uint32_t i = 0; i < nFacets.size(); ++i) { // N.B. This can lead to open meshes for non-voronoi fracturing. // We need to check if the opposing faces match exactly, or even better reconstruct parts that stick out. if (nFacets[i].userData == 0 || (hasCutting.find(-nFacets[i].userData) == hasCutting.end())) { finalFacets.push_back(nFacets[i]); } } Mesh* newMesh = new MeshImpl(nVertices.data(), nEdges.data(), finalFacets.data(), static_cast<uint32_t>(nVertices.size()), static_cast<uint32_t>(nEdges.size()), static_cast<uint32_t>(finalFacets.size())); setChunkInfoMesh(mChunkData[newChunkIndex], newMesh); return newChunkIndex; } uint32_t FractureToolImpl::createNewChunk(uint32_t parentChunkId) { const uint32_t index = static_cast<uint32_t>(mChunkData.size()); mChunkData.push_back(ChunkInfo()); mChunkData.back().parentChunkId = parentChunkId; mChunkData.back().chunkId = createId(); return index; } void FractureToolImpl::fitUvToRect(float side, uint32_t chunk) { int32_t infoIndex = getChunkInfoIndex(chunk); if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here. { finalizeFracturing(); } if (infoIndex == -1 || (int32_t)mChunkPostprocessors.size() <= infoIndex) { return; // We dont have such chunk tringulated; } nvidia::NvBounds3 bnd; bnd.setEmpty(); std::vector<Triangle>& ctrs = mChunkPostprocessors[infoIndex]->getBaseMesh(); std::vector<Triangle>& output = mChunkPostprocessors[infoIndex]->getBaseMesh(); for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; bnd.include(NvVec3(ctrs[trn].a.uv[0].x, ctrs[trn].a.uv[0].y, 0.0f)); bnd.include(NvVec3(ctrs[trn].b.uv[0].x, ctrs[trn].b.uv[0].y, 0.0f)); bnd.include(NvVec3(ctrs[trn].c.uv[0].x, ctrs[trn].c.uv[0].y, 0.0f)); } float xscale = side / (bnd.maximum.x - bnd.minimum.x); float yscale = side / (bnd.maximum.y - bnd.minimum.y); xscale = std::min(xscale, yscale); // To have uniform scaling for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale; output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale; output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale; output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale; output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale; output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale; } } void FractureToolImpl::fitAllUvToRect(float side) { std::set<uint32_t> mask; fitAllUvToRect(side, mask); } void FractureToolImpl::fitAllUvToRect(float side, std::set<uint32_t>& mask) { if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here. { finalizeFracturing(); } if (mChunkPostprocessors.empty()) { return; // We dont have triangulated chunks. } nvidia::NvBounds3 bnd; bnd.setEmpty(); for (uint32_t chunk = 0; chunk < mChunkData.size(); ++chunk) { Mesh* m = mChunkData[chunk].getMesh(); const Edge* edges = m->getEdges(); const Vertex* vertices = m->getVertices(); for (uint32_t trn = 0; trn < m->getFacetCount(); ++trn) { if (m->getFacet(trn)->userData == 0) continue; for (uint32_t ei = 0; ei < m->getFacet(trn)->edgesCount; ++ei) { int32_t v1 = edges[m->getFacet(trn)->firstEdgeNumber + ei].s; int32_t v2 = edges[m->getFacet(trn)->firstEdgeNumber + ei].e; bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f)); bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f)); } } } float xscale = side / (bnd.maximum.x - bnd.minimum.x); float yscale = side / (bnd.maximum.y - bnd.minimum.y); xscale = std::min(xscale, yscale); // To have uniform scaling for (uint32_t chunk = 0; chunk < mChunkPostprocessors.size(); ++chunk) { if (!mask.empty() && mask.find(mChunkPostprocessors[chunk]->getParentChunkId()) == mask.end()) continue; std::vector<Triangle>& ctrs = mChunkPostprocessors[chunk]->getBaseMeshNotFitted(); std::vector<Triangle>& output = mChunkPostprocessors[chunk]->getBaseMesh(); for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale; output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale; output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale; output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale; output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale; output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale; } } } void FractureToolImpl::markLeaves() { for (ChunkInfo& info : mChunkData) { info.isLeaf = true; } for (ChunkInfo& info : mChunkData) { const int32_t infoIndex = getChunkInfoIndex(info.parentChunkId); if (infoIndex >= 0) { mChunkData[infoIndex].isLeaf = false; } } } bool FractureToolImpl::setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed /*= true*/) { // Class to access protected ChunkInfo members struct ChunkInfoAuth : public ChunkInfo { void setMesh(Mesh* mesh, const TransformST& parentTM) { meshData = mesh; if (meshData != nullptr) { // Calculate the world transform meshData->recalculateBoundingBox(); const TransformST localTM = createCubeTMFromBounds(meshData->getBoundingBox()); tmToWorld.s = parentTM.s * localTM.s; tmToWorld.t = parentTM.s * localTM.t + parentTM.t; // Transform vertex buffer to fit in unit cube Vertex* verticesBuffer = meshData->getVerticesWritable(); for (uint32_t i = 0; i < meshData->getVerticesCount(); ++i) { Nv::Blast::Vertex& v = verticesBuffer[i]; v.p = localTM.invTransformPos(v.p); } // If none of chunk.tmToWorld scales are zero (or less than epsilon), then the bounds // will be { {-1.0f, -1.0f, -1.0f}, {1.0f, 1.0f, 1.0f} }. Just in case, we properly // calculate the bounds here. meshData->recalculateBoundingBox(); } else { tmToWorld = TransformST::identity(); } } bool isInitialized() const { return parentChunkId != ChunkInfo::UninitializedID; } }; ChunkInfoAuth* auth = static_cast<ChunkInfoAuth*>(&chunkInfo); if (!auth->isInitialized()) { return false; } const TransformST parentTM = fromTransformed && chunkInfo.parentChunkId >= 0 ? mChunkData[getChunkInfoIndex(chunkInfo.parentChunkId)].getTmToWorld() : TransformST::identity(); auth->setMesh(mesh, parentTM); return true; } void FractureToolImpl::rebuildAdjGraph(const std::vector<uint32_t>& chunks, const NvcVec2i* adjChunks, uint32_t adjChunksSize, std::vector<std::vector<uint32_t> >& chunkGraph) { std::vector<std::pair<uint64_t, uint32_t> > planeChunkIndex; for (uint32_t i = 0; i < chunks.size(); ++i) { for (uint32_t fc = 0; fc < mChunkData[chunks[i]].getMesh()->getFacetCount(); ++fc) { if (mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData != 0) { planeChunkIndex.push_back( std::make_pair(std::abs(mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData), chunks[i])); } } } { std::sort(planeChunkIndex.begin(), planeChunkIndex.end()); auto it = std::unique(planeChunkIndex.begin(), planeChunkIndex.end()); planeChunkIndex.resize(it - planeChunkIndex.begin()); } uint32_t a = 0; for (uint32_t i = 1; i < planeChunkIndex.size(); ++i) { if (planeChunkIndex[a].first != planeChunkIndex[i].first) { uint32_t b = i; for (uint32_t p1 = a; p1 < b; ++p1) { for (uint32_t p2 = p1 + 1; p2 < b; ++p2) { if (planeChunkIndex[p1].second == planeChunkIndex[p2].second || mChunkData[planeChunkIndex[p1].second].parentChunkId != mChunkData[planeChunkIndex[p2].second].parentChunkId) { continue; } bool has = false; for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p1].second].size(); ++k) { if (chunkGraph[planeChunkIndex[p1].second][k] == planeChunkIndex[p2].second) { has = true; break; } } if (!has) { chunkGraph[planeChunkIndex[p1].second].push_back(planeChunkIndex[p2].second); } has = false; for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p2].second].size(); ++k) { if (chunkGraph[planeChunkIndex[p2].second][k] == planeChunkIndex[p1].second) { has = true; break; } } if (!has) { chunkGraph[planeChunkIndex[p2].second].push_back(planeChunkIndex[p1].second); } } } a = b; } } // Add in extra adjacency info, if we have it if (adjChunks && adjChunksSize) { std::set<uint32_t> chunkSet(chunks.begin(), chunks.end()); #if NV_DEBUG || NV_CHECKED // Make sure these arrays are sorted for (std::vector<uint32_t>& adj : chunkGraph) { const bool isSorted = std::is_sorted(adj.begin(), adj.end()); if (!isSorted) { NVBLAST_ASSERT(0); NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eDEBUG_WARNING, "Adjacency array not sorted; subsequent code assumes it is.", __FILE__, __LINE__); } } #endif for (uint32_t i = 0; i < adjChunksSize; ++i) { const NvcVec2i& pair = adjChunks[i]; if (chunkSet.find((uint32_t)pair.x) == chunkSet.end() || chunkSet.find((uint32_t)pair.y) == chunkSet.end()) { continue; } { std::vector<uint32_t>& adj0 = chunkGraph[pair.x]; std::vector<uint32_t>::iterator it0 = std::lower_bound(adj0.begin(), adj0.end(), (uint32_t)pair.y); if (it0 == adj0.end() || *it0 != (uint32_t)pair.y) { adj0.insert(it0, (uint32_t)pair.y); } } { std::vector<uint32_t>& adj1 = chunkGraph[pair.y]; std::vector<uint32_t>::iterator it1 = std::lower_bound(adj1.begin(), adj1.end(), (uint32_t)pair.x); if (it1 == adj1.end() || *it1 != (uint32_t)pair.x) { adj1.insert(it1, (uint32_t)pair.x); } } } } } bool VecIntComp(const std::pair<NvcVec3, uint32_t>& a, const std::pair<NvcVec3, uint32_t>& b) { if (a.first.x < b.first.x) return true; if (a.first.x > b.first.x) return false; if (a.first.y < b.first.y) return true; if (a.first.y > b.first.y) return false; if (a.first.z < b.first.z) return true; if (a.first.z > b.first.z) return false; return a.second < b.second; } void FractureToolImpl::uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount, const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks /*= false*/) { std::vector<int32_t> depth(mChunkData.size(), 0); std::vector<std::vector<uint32_t> > chunkGraph(mChunkData.size()); std::vector<uint32_t> atEachDepth; std::vector<uint32_t> childNumber(mChunkData.size(), 0); std::vector<uint32_t> chunksToRemove; enum ChunkFlags { Mergeable = (1 << 0), Merged = (1 << 1) }; std::vector<uint32_t> chunkFlags(mChunkData.size()); if (chunksToMerge == nullptr) { std::fill(chunkFlags.begin(), chunkFlags.end(), Mergeable); } else { // Seed all mergeable chunks with Mergeable flag for (uint32_t chunkN = 0; chunkN < mergeChunkCount; ++chunkN) { const uint32_t chunkIndex = chunksToMerge[chunkN]; chunkFlags[chunkIndex] |= Mergeable; } // Make all descendants mergable too std::vector<int32_t> treeWalk; for (uint32_t chunkInfoIndex = 0; chunkInfoIndex < mChunkData.size(); ++chunkInfoIndex) { treeWalk.clear(); int32_t walkInfoIndex = (int32_t)chunkInfoIndex; do { if (chunkFlags[walkInfoIndex] & Mergeable) { std::for_each(treeWalk.begin(), treeWalk.end(), [&chunkFlags](int32_t index) {chunkFlags[index] |= Mergeable; }); break; } treeWalk.push_back(walkInfoIndex); } while ((walkInfoIndex = getChunkInfoIndex(mChunkData[walkInfoIndex].parentChunkId)) >= 0); } } int32_t maxDepth = 0; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (mChunkData[i].parentChunkId != -1) childNumber[getChunkInfoIndex(mChunkData[i].parentChunkId)]++; depth[i] = getChunkDepth(mChunkData[i].chunkId); NVBLAST_ASSERT(depth[i] >= 0); maxDepth = std::max(maxDepth, depth[i]); } for (int32_t level = maxDepth; level > 0; --level) // go from leaves to trunk and rebuild hierarchy { std::vector<uint32_t> cGroup; std::vector<uint32_t> chunksToUnify; NvcVec3 minPoint = {MAXIMUM_EXTENT, MAXIMUM_EXTENT, MAXIMUM_EXTENT}; VrtPositionComparator posc; for (uint32_t ch = 0; ch < depth.size(); ++ch) { if (depth[ch] == level && childNumber[getChunkInfoIndex(mChunkData[ch].parentChunkId)] > threshold && (chunkFlags[ch] & Mergeable) != 0) { chunksToUnify.push_back(ch); NvcVec3 cp = fromNvShared(toNvShared(mChunkData[ch].getMesh()->getBoundingBox()).getCenter()); if (posc(cp, minPoint)) { minPoint = cp; } } } std::vector<std::pair<float, uint32_t> > distances; for (uint32_t i = 0; i < chunksToUnify.size(); ++i) { float d = (toNvShared(minPoint) - toNvShared(mChunkData[chunksToUnify[i]].getMesh()->getBoundingBox()).getCenter()).magnitude(); distances.push_back(std::make_pair(d, chunksToUnify[i])); } std::sort(distances.begin(), distances.end()); for (uint32_t i = 0; i < chunksToUnify.size(); ++i) { chunksToUnify[i] = distances[i].second; } rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph); for (uint32_t iter = 0; iter < 32 && chunksToUnify.size() > threshold; ++iter) { std::vector<uint32_t> newChunksToUnify; for (uint32_t c = 0; c < chunksToUnify.size(); ++c) { if ((chunkFlags[chunksToUnify[c]] & Mergeable) == 0) continue; chunkFlags[chunksToUnify[c]] &= ~Mergeable; cGroup.push_back(chunksToUnify[c]); for (uint32_t sc = 0; sc < cGroup.size() && cGroup.size() < targetClusterSize; ++sc) { uint32_t sid = cGroup[sc]; for (uint32_t neighbN = 0; neighbN < chunkGraph[sid].size() && cGroup.size() < targetClusterSize; ++neighbN) { const uint32_t chunkNeighb = chunkGraph[sid][neighbN]; if (mChunkData[chunkNeighb].parentChunkId != mChunkData[sid].parentChunkId) continue; if ((chunkFlags[chunkNeighb] & Mergeable) == 0) continue; chunkFlags[chunkNeighb] &= ~Mergeable; cGroup.push_back(chunkNeighb); } } if (cGroup.size() > 1) { uint32_t newChunk = stretchGroup(cGroup, chunkGraph); for (uint32_t chunk : cGroup) { if (removeOriginalChunks && !(chunkFlags[chunk] & Merged)) { chunksToRemove.push_back(chunk); } } cGroup.clear(); newChunksToUnify.push_back(newChunk); chunkFlags.push_back(Merged); } else { cGroup.clear(); } } chunksToUnify = newChunksToUnify; rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph); } } // Remove chunks std::vector<uint32_t> remap(mChunkData.size(), 0xFFFFFFFF); std::sort(chunksToRemove.begin(), chunksToRemove.end()); std::vector<uint32_t>::iterator removeIt = chunksToRemove.begin(); size_t chunkWriteIndex = 0; for (size_t chunkReadIndex = 0; chunkReadIndex < mChunkData.size(); ++chunkReadIndex) { if (removeIt < chunksToRemove.end()) { if (*removeIt == chunkReadIndex) { ++removeIt; continue; } } if (chunkReadIndex != chunkWriteIndex) { mChunkData[chunkWriteIndex] = mChunkData[chunkReadIndex]; } remap[chunkReadIndex] = chunkWriteIndex++; } mChunkData.resize(chunkWriteIndex); for (ChunkInfo& chunkInfo : mChunkData) { if (chunkInfo.parentChunkId >= 0) { const uint32_t mappedParentIndex = remap[getChunkInfoIndex(chunkInfo.parentChunkId)]; NVBLAST_ASSERT(mappedParentIndex < mChunkData.size()); if (mappedParentIndex < mChunkData.size()) { chunkInfo.parentChunkId = mChunkData[mappedParentIndex].chunkId; } } } } bool FractureToolImpl::setApproximateBonding(uint32_t chunkIndex, bool useApproximateBonding) { if ((size_t)chunkIndex >= mChunkData.size()) { return false; } if (useApproximateBonding) { mChunkData[chunkIndex].flags |= (uint32_t)ChunkInfo::APPROXIMATE_BONDING; } else { mChunkData[chunkIndex].flags &= ~(uint32_t)ChunkInfo::APPROXIMATE_BONDING; } return true; } int32_t FractureToolImpl::createId() { // make sure there is a free ID to be returned if (mChunkIdsUsed.size() >= (size_t)INT32_MAX + 1) { NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eINTERNAL_ERROR, "Chunk IDs exhausted.", __FILE__, __LINE__); return -1; } // find the next free ID while (mChunkIdsUsed.count(mNextChunkId)) { // handle wrapping if (++mNextChunkId < 0) mNextChunkId = 0; } // step the counter and handle wrapping const int32_t id = mNextChunkId++; if (mNextChunkId < 0) mNextChunkId = 0; return (reserveId(id) ? id : -1); } bool FractureToolImpl::reserveId(int32_t id) { // add it to the used set and make sure it wasn't already in there const auto ret = mChunkIdsUsed.insert(id); NVBLAST_ASSERT_WITH_MESSAGE(ret.second, "Request to reserve ID, but it is already in use"); return ret.second; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPerlinNoise.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGPERLINNOISE_H #define NVBLASTEXTAUTHORINGPERLINNOISE_H #include <NvBlastExtAuthoringFractureTool.h> #include "NvVec4.h" #include "NvVec3.h" #define PERLIN_NOISE_SAMPLE_TABLE 512 using nvidia::NvVec3; namespace Nv { namespace Blast { /*********** Noise generation routines, copied from Apex. */ NV_INLINE float at3(const float& rx, const float& ry, const float& rz, const NvVec3 q) { return rx * q[0] + ry * q[1] + rz * q[2]; } NV_INLINE float fade(float t) { return t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f); } NV_INLINE float lerp(float t, float a, float b) { return a + t * (b - a); } NV_INLINE void setup(int i, NvVec3 point, float& t, int& b0, int& b1, float& r0, float& r1) { t = point[i] + (0x1000); b0 = ((int)t) & (PERLIN_NOISE_SAMPLE_TABLE - 1); b1 = (b0 + 1) & (PERLIN_NOISE_SAMPLE_TABLE - 1); r0 = t - (int)t; r1 = r0 - 1.0f; } NV_INLINE float noiseSample(NvVec3 point, int* p, NvVec3* g) { int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11; float rx0, rx1, ry0, ry1, rz0, rz1, sy, sz, a, b, c, d, t, u, v; NvVec3 q; int i, j; setup(0, point, t, bx0, bx1, rx0, rx1); setup(1, point, t, by0, by1, ry0, ry1); setup(2, point, t, bz0, bz1, rz0, rz1); i = p[bx0]; j = p[bx1]; b00 = p[i + by0]; b10 = p[j + by0]; b01 = p[i + by1]; b11 = p[j + by1]; t = fade(rx0); sy = fade(ry0); sz = fade(rz0); q = g[b00 + bz0]; u = at3(rx0, ry0, rz0, q); q = g[b10 + bz0]; v = at3(rx1, ry0, rz0, q); a = lerp(t, u, v); q = g[b01 + bz0]; u = at3(rx0, ry1, rz0, q); q = g[b11 + bz0]; v = at3(rx1, ry1, rz0, q); b = lerp(t, u, v); c = lerp(sy, a, b); q = g[b00 + bz1]; u = at3(rx0, ry0, rz1, q); q = g[b10 + bz1]; v = at3(rx1, ry0, rz1, q); a = lerp(t, u, v); q = g[b01 + bz1]; u = at3(rx0, ry1, rz1, q); q = g[b11 + bz1]; v = at3(rx1, ry1, rz1, q); b = lerp(t, u, v); d = lerp(sy, a, b); return lerp(sz, c, d); } /** Perlin Noise generation tool */ class PerlinNoise { public: /** \param[in] rnd Random value generator \param[in] octaves Number of noise octaves \param[in] frequency Frequency of noise \param[in] amplitude Amplitude of noise */ PerlinNoise(Nv::Blast::RandomGeneratorBase* rnd, int octaves = 1, float frequency = 1., float amplitude = 1.) : mRnd(rnd), mOctaves(octaves), mFrequency(frequency), mAmplitude(amplitude), mbInit(false) { } /* Reset state of noise generator \param[in] octaves Number of noise octaves \param[in] frequency Frequency of noise \param[in] amplitude Amplitude of noise */ void reset(int octaves = 1, float frequency = 1.f, float amplitude = 1.f) { mOctaves = octaves; mFrequency = frequency; mAmplitude = amplitude; init(); } /** Get Perlin Noise value at given point */ float sample(const nvidia::NvVec3& point) { return perlinNoise(point); } private: PerlinNoise& operator=(const PerlinNoise&); float perlinNoise(nvidia::NvVec3 point) { if (!mbInit) init(); const int octaves = mOctaves; const float frequency = mFrequency; float amplitude = mAmplitude; float result = 0.0f; point *= frequency; for (int i = 0; i < octaves; ++i) { NvVec3 lpnt; lpnt[0] = point.x; lpnt[1] = point.y; lpnt[2] = point.z; result += (noiseSample(lpnt, p, g)) * amplitude; point *= 2.0f; amplitude *= 0.5f; } return result; } void init(void) { mbInit = true; unsigned i, j; int k; for (i = 0; i < (unsigned)PERLIN_NOISE_SAMPLE_TABLE; i++) { p[i] = (int)i; for (j = 0; j < 3; ++j) g[i][j] = mRnd->getRandomValue(); g[i].normalize(); } while (--i) { k = p[i]; j = static_cast<uint32_t>(mRnd->getRandomValue() * PERLIN_NOISE_SAMPLE_TABLE); p[i] = p[j]; p[j] = k; } for (i = 0; i < PERLIN_NOISE_SAMPLE_TABLE + 2; ++i) { p[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i] = p[i]; for (j = 0; j < 3; ++j) g[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i][j] = g[i][j]; } } Nv::Blast::RandomGeneratorBase* mRnd; int mOctaves; float mFrequency; float mAmplitude; // Permutation vector int p[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)]; // Gradient vector NvVec3 g[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)]; bool mbInit; }; /** Simplex noise generation tool */ class SimplexNoise { int32_t mOctaves; float mAmplitude; float mFrequency; int32_t mSeed; static const int X_NOISE_GEN = 1619; static const int Y_NOISE_GEN = 31337; static const int Z_NOISE_GEN = 6971; static const int W_NOISE_GEN = 1999; static const int SEED_NOISE_GEN = 1013; static const int SHIFT_NOISE_GEN = 8; NV_INLINE int fastfloor(float x) { return (x >= 0) ? (int)x : (int)(x - 1); } SimplexNoise& operator=(const SimplexNoise&) { return *this; } public: /** \param[in] ampl Amplitude of noise \param[in] freq Frequency of noise \param[in] octaves Number of noise octaves \param[in] seed Random seed value */ SimplexNoise(float ampl, float freq, int32_t octaves, int32_t seed) : mOctaves(octaves), mAmplitude(ampl), mFrequency(freq), mSeed(seed) {}; // 4D simplex noise // returns: (x,y,z) = noise grad, w = noise value /** Evaluate noise at given 4d-point \param[in] x x coordinate of point \param[in] y y coordinate of point \param[in] z z coordinate of point \param[in] w w coordinate of point \param[in] seed Random seed value \return Noise valued vector (x,y,z) and scalar (w) */ nvidia::NvVec4 eval4D(float x, float y, float z, float w, int seed) { // The skewing and unskewing factors are hairy again for the 4D case const float F4 = (nvidia::NvSqrt(5.0f) - 1.0f) / 4.0f; const float G4 = (5.0f - nvidia::NvSqrt(5.0f)) / 20.0f; // Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in float s = (x + y + z + w) * F4; // Factor for 4D skewing int ix = fastfloor(x + s); int iy = fastfloor(y + s); int iz = fastfloor(z + s); int iw = fastfloor(w + s); float tu = (ix + iy + iz + iw) * G4; // Factor for 4D unskewing // Unskew the cell origin back to (x,y,z,w) space float x0 = x - (ix - tu); // The x,y,z,w distances from the cell origin float y0 = y - (iy - tu); float z0 = z - (iz - tu); float w0 = w - (iw - tu); int c = (x0 > y0) ? (1 << 0) : (1 << 2); c += (x0 > z0) ? (1 << 0) : (1 << 4); c += (x0 > w0) ? (1 << 0) : (1 << 6); c += (y0 > z0) ? (1 << 2) : (1 << 4); c += (y0 > w0) ? (1 << 2) : (1 << 6); c += (z0 > w0) ? (1 << 4) : (1 << 6); nvidia::NvVec4 res; res.setZero(); // Calculate the contribution from the five corners for (int p = 4; p >= 0; --p) { int ixp = ((c >> 0) & 3) >= p ? 1 : 0; int iyp = ((c >> 2) & 3) >= p ? 1 : 0; int izp = ((c >> 4) & 3) >= p ? 1 : 0; int iwp = ((c >> 6) & 3) >= p ? 1 : 0; float xp = x0 - ixp + (4 - p) * G4; float yp = y0 - iyp + (4 - p) * G4; float zp = z0 - izp + (4 - p) * G4; float wp = w0 - iwp + (4 - p) * G4; float t = 0.6f - xp * xp - yp * yp - zp * zp - wp * wp; if (t > 0) { //get index int gradIndex = int(( X_NOISE_GEN * (ix + ixp) + Y_NOISE_GEN * (iy + iyp) + Z_NOISE_GEN * (iz + izp) + W_NOISE_GEN * (iw + iwp) + SEED_NOISE_GEN * seed) & 0xffffffff); gradIndex ^= (gradIndex >> SHIFT_NOISE_GEN); gradIndex &= 31; nvidia::NvVec4 g; { const int h = gradIndex; const int hs = 2 - (h >> 4); const int h1 = (h >> 3); g.x = (h1 == 0) ? 0.0f : ((h & 4) ? -1.0f : 1.0f); g.y = (h1 == 1) ? 0.0f : ((h & (hs << 1)) ? -1.0f : 1.0f); g.z = (h1 == 2) ? 0.0f : ((h & hs) ? -1.0f : 1.0f); g.w = (h1 == 3) ? 0.0f : ((h & 1) ? -1.0f : 1.0f); } float gdot = (g.x * xp + g.y * yp + g.z * zp + g.w * wp); float t2 = t * t; float t3 = t2 * t; float t4 = t3 * t; float dt4gdot = 8 * t3 * gdot; res.x += t4 * g.x - dt4gdot * xp; res.y += t4 * g.y - dt4gdot * yp; res.z += t4 * g.z - dt4gdot * zp; res.w += t4 * gdot; } } // scale the result to cover the range [-1,1] res *= 27; return res; } /** Evaluate noise at given 3d-point \param[in] p Point in which noise will be evaluated \return Noise value at given point */ float sample(nvidia::NvVec3 p) { p *= mFrequency; float result = 0.0f; float alpha = 1; for (int32_t i = 1; i <= mOctaves; ++i) { result += eval4D(p.x * i, p.y * i, p.z * i, i * 5.0f, mSeed).w * alpha; alpha *= 0.45f; } return result * mAmplitude; } }; } // Blast namespace } // Nv namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H #define NVBLASTAUTHORINGFRACTURETOOLIMPL_H #include "NvBlastExtAuthoringFractureTool.h" #include "NvBlastExtAuthoringMesh.h" #include <vector> #include <set> namespace Nv { namespace Blast { class SpatialAccelerator; class Triangulator; /** Class for voronoi sites generation inside supplied mesh. */ class VoronoiSitesGeneratorImpl : public VoronoiSitesGenerator { public: /** Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator should be supplied with fracture mesh. \param[in] mesh Fracture mesh \param[in] rnd User supplied random value generator. \return */ VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd); ~VoronoiSitesGeneratorImpl(); void release() override; /** Set base fracture mesh */ void setBaseMesh(const Mesh* m) override; /** Access to generated voronoi sites. \note User should call NVBLAST_FREE for hulls and hullsOffset when it not needed anymore \param[out] Pointer to generated voronoi sites \return Count of generated voronoi sites. */ uint32_t getVoronoiSites(const NvcVec3*& sites) override; /** Add site in particular point \param[in] site Site coordinates */ void addSite(const NvcVec3& site) override; /** Uniformly generate sites inside the mesh \param[in] numberOfSites Number of generated sites */ void uniformlyGenerateSitesInMesh(uint32_t numberOfSites) override; /** Generate sites in clustered fashion \param[in] numberOfClusters Number of generated clusters \param[in] sitesPerCluster Number of sites in each cluster \param[in] clusterRadius Voronoi cells cluster radius */ void clusteredSitesGeneration(uint32_t numberOfClusters, uint32_t sitesPerCluster, float clusterRadius) override; /** Radial pattern of sites generation \param[in] center Center of generated pattern \param[in] normal Normal to plane in which sites are generated \param[in] radius Pattern radius \param[in] angularSteps Number of angular steps \param[in] radialSteps Number of radial steps \param[in] angleOffset Angle offset at each radial step \param[in] variability Randomness of sites distribution */ void radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f) override; /** Generate sites inside sphere \param[in] count Count of generated sites \param[in] radius Radius of sphere \param[in] center Center of sphere */ void generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) override; /** Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes. \param[in] stencil Stencil mesh. */ void setStencil(const Mesh* stencil) override; /** Removes stencil mesh */ void clearStencil() override; /** Deletes sites inside supplied sphere \param[in] radius Radius of sphere \param[in] center Center of sphere \param[in] eraserProbability Probability of removing some particular site */ void deleteInSphere(const float radius, const NvcVec3& center, const float eraserProbability = 1) override; private: std::vector <NvcVec3> mGeneratedSites; const Mesh* mMesh; const Mesh* mStencil; RandomGeneratorBase* mRnd; SpatialAccelerator* mAccelerator; }; /** FractureTool class provides methods to fracture provided mesh and generate Blast asset data */ class FractureToolImpl : public FractureTool { public: /** FractureTool can log asset creation info if logCallback is provided. */ FractureToolImpl() : mRemoveIslands(false) { reset(); } ~FractureToolImpl() { reset(); } void release() override; /** Reset FractureTool state. */ void reset() override; /** Set the material id to use for new interior faces. Defaults to kMaterialInteriorId */ void setInteriorMaterialId(int32_t materialId) override; /** Gets the material id to use for new interior faces */ int32_t getInteriorMaterialId() const override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set input meshes which will be fractured, FractureTool will be reset. If ids != nullptr, it must point to an array of length meshSizes. Each mesh will be assigned to a chunk with ID given by the corresponding element in ids. If the corresponding element is negative, or ids is NULL, then the chunk will be assigned an arbitrary (but currently unused) ID. Returns true iff all meshes were assigned chunks with valid IDs. */ bool setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids = nullptr) override; /** Set chunk mesh, parentId should be valid, return ID of new chunk. if chunkId >= 0 and currently unused, then that ID will be used (and returned). Otherwise an arbitrary (but currently unused) ID will be used and returned. */ int32_t setChunkMesh(const Mesh* mesh, int32_t parentId, int32_t chunkId = -1) override; /** Get chunk mesh in polygonal representation */ Mesh* createChunkMesh(int32_t chunkInfoIndex, bool splitUVs = true) override; /** Fractures specified chunk with voronoi method. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, bool replaceChunk) override; /** Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] cellPoints Array of voronoi sites \param[in] scale Voronoi cells scaling factor \param[in] rotation Voronoi cells rotation. Has no effect without cells scale factor \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) override; /** Fractures specified chunk with slicing method. \param[in] chunkId Chunk to fracture \param[in] conf Slicing parameters, see SlicingConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Cut chunk with plane. \param[in] chunkId Chunk to fracture \param[in] normal Plane normal \param[in] position Point on plane \param[in] noise Noise configuration for plane-chunk intersection, see NoiseConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& position, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Cutout fracture for specified chunk. \param[in] chunkId Chunk to fracture \param[in] conf Cutout parameters, see CutoutConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Creates resulting fractured mesh geometry from intermediate format */ void finalizeFracturing() override; uint32_t getChunkCount() const override; /** Get chunk information */ const ChunkInfo& getChunkInfo(int32_t chunkInfoIndex) override; /** Get percentage of mesh overlap. percentage computed as volume(intersection(meshA , meshB)) / volume (meshA) \param[in] meshA Mesh A \param[in] meshB Mesh B \return mesh overlap percentage */ float getMeshOverlap(const Mesh& meshA, const Mesh& meshB) override; /** Get chunk base mesh \note User should call NVBLAST_FREE for output when it not needed anymore \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ uint32_t getBaseMesh(int32_t chunkIndex, Triangle*& output) override; /** Update chunk base mesh \note Doesn't allocates output array, Triangle* output should be preallocated by user \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ uint32_t updateBaseMesh(int32_t chunkIndex, Triangle* output) override; /** Return info index of chunk with specified chunkId \param[in] chunkId Chunk ID \return Chunk index in internal buffer, if not exist -1 is returned. */ int32_t getChunkInfoIndex(int32_t chunkId) const override; /** Return id of chunk with specified index. \param[in] chunkInfoIndex Chunk info index \return Chunk id or -1 if there is no such chunk. */ int32_t getChunkId(int32_t chunkInfoIndex) const override; /** Return depth level of the given chunk \param[in] chunkId Chunk ID \return Chunk depth or -1 if there is no such chunk. */ int32_t getChunkDepth(int32_t chunkId) const override; /** Return array of chunks IDs with given depth. \note User should call NVBLAST_FREE for chunkIds when it not needed anymore \param[in] depth Chunk depth \param[out] Pointer to array of chunk IDs \return Number of chunks in array */ uint32_t getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const override; /** Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets which represent triangles. \note User should call NVBLAST_FREE for vertexBuffer, indexBuffer and indexBufferOffsets when it not needed anymore \param[out] vertexBuffer Array of vertices to be filled \param[out] indexBuffer Array of indices to be filled \param[out] indexBufferOffsets Array of offsets in indexBuffer for each base mesh. Contains getChunkCount() + 1 elements. Last one is indexBuffer size \return Number of vertices in vertexBuffer */ uint32_t getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) override; /** Set automatic islands removing. May cause instabilities. \param[in] isRemoveIslands Flag whether remove or not islands. */ void setRemoveIslands(bool isRemoveIslands) override; /** Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to wrong results! Apply it before further chunk splitting. \param[in] chunkId Chunk ID which should be checked for islands \return Number of found islands is returned */ int32_t islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth = false) override; /** Check if input mesh contains open edges. Open edges can lead to wrong fracturing results. \return true if mesh contains open edges */ bool isMeshContainOpenEdges(const Mesh* input) override; bool deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot = false) override; void uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount, const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks = false) override; bool setApproximateBonding(uint32_t chunkId, bool useApproximateBonding) override; /** Rescale interior uv coordinates of given chunk to fit square of given size. \param[in] side Size of square side \param[in] chunkId Chunk ID for which UVs should be scaled. */ void fitUvToRect(float side, uint32_t chunkId) override; /** Rescale interior uv coordinates of all existing chunks to fit square of given size, relative sizes will be preserved. \param[in] side Size of square side */ void fitAllUvToRect(float side) override; private: bool isAncestorForChunk(int32_t ancestorId, int32_t chunkId); int32_t slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd); uint32_t stretchGroup(const std::vector<uint32_t>& group, std::vector<std::vector<uint32_t>>& graph); void rebuildAdjGraph(const std::vector<uint32_t>& chunksToRebuild, const NvcVec2i* adjChunks, uint32_t adjChunksSize, std::vector<std::vector<uint32_t> >& chunkGraph); void fitAllUvToRect(float side, std::set<uint32_t>& mask); void markLeaves(); /* * Meshes are transformed to fit a unit cube, for algorithmic stability. This transform is stored * in the ChunkInfo. Some meshes are created from already-transformed chunks. If so, set * fromTransformed = true, so that the transform-to-world can be concatenated with the source mesh's. * * chunkInfo.parentChunkId must be valid if fromTransformed == true. * * Returns true iff successful. */ bool setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed = true); /** Returns newly created chunk index in mChunkData. */ uint32_t createNewChunk(uint32_t parentChunkId); /** * Returns a previously unused ID. */ int32_t createId(); /** * Mark the given ID as being used. Returns false if that ID was already marked as in use, true otherwise */ bool reserveId(int32_t id); protected: /* Chunk mesh wrappers */ std::vector<Triangulator*> mChunkPostprocessors; int64_t mPlaneIndexerOffset; int32_t mNextChunkId; std::set<int32_t> mChunkIdsUsed; std::vector<ChunkInfo> mChunkData; bool mRemoveIslands; int32_t mInteriorMaterialId; }; int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors); Mesh* getCellMesh(class BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites, const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. // This warning arises when using some stl containers with older versions of VC // c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code #include "NvPreprocessor.h" #if NV_VC && NV_VC < 14 #pragma warning(disable : 4702) #endif #include <NvBlastExtAuthoringBondGeneratorImpl.h> #include <NvBlast.h> #include <NvBlastGlobals.h> #include <NvBlastNvSharedHelpers.h> #include "NvBlastExtTriangleProcessor.h" #include "NvBlastExtApexSharedParts.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastExtAuthoringTypes.h" #include <vector> #include <map> #include "NvPlane.h" #include <algorithm> #include <cmath> #include <memory> #include <set> #define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr; //#define DEBUG_OUTPUT #ifdef DEBUG_OUTPUT void saveGeometryToObj(std::vector<NvVec3>& triangles, const char* filepath) { FILE* outStream = fopen(filepath, "w"); for (uint32_t i = 0; i < triangles.size(); ++i) { fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); ++i; fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); ++i; fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); } for (uint32_t i = 0; i < triangles.size() / 3; ++i) { NvVec3 normal = (triangles[3 * i + 2] - triangles[3 * i]).cross((triangles[3 * i + 1] - triangles[3 * i])).getNormalized(); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); } int indx = 1; for (uint32_t i = 0; i < triangles.size() / 3; ++i) { fprintf(outStream, "f %d//%d ", indx, indx); indx++; fprintf(outStream, "%d//%d ", indx, indx); indx++; fprintf(outStream, "%d//%d \n", indx, indx); indx++; } fclose(outStream); } std::vector<NvVec3> intersectionBuffer; std::vector<NvVec3> meshBuffer; #endif namespace Nv { namespace Blast { #define EPS_PLANE 0.0001f nvidia::NvVec3 getNormal(const Triangle& t) { return toNvShared(t.b.p - t.a.p).cross(toNvShared(t.c.p - t.a.p)); } bool planeComparer(const PlaneChunkIndexer& as, const PlaneChunkIndexer& bs) { const NvcPlane& a = as.plane; const NvcPlane& b = bs.plane; if (a.d + EPS_PLANE < b.d) return true; if (a.d - EPS_PLANE > b.d) return false; if (a.n.x + EPS_PLANE < b.n.x) return true; if (a.n.x - EPS_PLANE > b.n.x) return false; if (a.n.y + EPS_PLANE < b.n.y) return true; if (a.n.y - EPS_PLANE > b.n.y) return false; return a.n.z + EPS_PLANE < b.n.z; } struct Bond { int32_t m_chunkId; int32_t m_planeIndex; int32_t triangleIndex; bool operator<(const Bond& inp) const { if (abs(m_planeIndex) == abs(inp.m_planeIndex)) { return m_chunkId < inp.m_chunkId; } else { return abs(m_planeIndex) < abs(inp.m_planeIndex); } } }; struct BondInfo { float area; nvidia::NvBounds3 m_bb; nvidia::NvVec3 centroid; nvidia::NvVec3 normal; int32_t m_chunkId; }; inline nvidia::NvVec3 getVertex(const Triangle& t, uint32_t i) { return toNvShared((&t.a)[i].p); } void AddTtAnchorPoints(const Triangle* a, const Triangle* b, std::vector<NvVec3>& points) { nvidia::NvVec3 na = getNormal(*a).getNormalized(); nvidia::NvVec3 nb = getNormal(*b).getNormalized(); nvidia::NvPlane pla(toNvShared(a->a.p), na); nvidia::NvPlane plb(toNvShared(b->a.p), nb); ProjectionDirections da = getProjectionDirection(na); ProjectionDirections db = getProjectionDirection(nb); TriangleProcessor prc; TrPrcTriangle2d ta(getProjectedPoint(toNvShared(a->a.p), da), getProjectedPoint(toNvShared(a->b.p), da), getProjectedPoint(toNvShared(a->c.p), da)); TrPrcTriangle2d tb(getProjectedPoint(toNvShared(b->a.p), db), getProjectedPoint(toNvShared(b->b.p), db), getProjectedPoint(toNvShared(b->c.p), db)); /** Compute */ for (uint32_t i = 0; i < 3; ++i) { nvidia::NvVec3 pt; if (getPlaneSegmentIntersection(pla, getVertex(*b, i), getVertex(*b, (i + 1) % 3), pt)) { nvidia::NvVec2 pt2 = getProjectedPoint(pt, da); if (prc.isPointInside(pt2, ta)) { points.push_back(pt); } } if (getPlaneSegmentIntersection(plb, getVertex(*a, i), getVertex(*a, (i + 1) % 3), pt)) { NvVec2 pt2 = getProjectedPoint(pt, db); if (prc.isPointInside(pt2, tb)) { points.push_back(pt); } } } } inline bool pointInsidePoly(const NvVec3& pt, const uint8_t* indices, uint16_t indexCount, const NvVec3* verts, const NvVec3& n) { int s = 0; for (uint16_t i = 0; i < indexCount; ++i) { const NvVec3 r0 = verts[indices[i]] - pt; const NvVec3 r1 = verts[indices[(i + 1) % indexCount]] - pt; const float cn = r0.cross(r1).dot(n); const int cns = cn >= 0 ? 1 : -1; if (!s) { s = cns; } if (cns * s < 0) { return false; } } return true; } void AddPpAnchorPoints(const uint8_t* indicesA, uint16_t indexCountA, const NvVec3* vertsA, const float planeA[4], const uint8_t* indicesB, uint16_t indexCountB, const NvVec3* vertsB, const float planeB[4], std::vector<NvVec3>& points) { NvPlane pla(planeA[0], planeA[1], planeA[2], planeA[3]); NvPlane plb(planeB[0], planeB[1], planeB[2], planeB[3]); for (uint16_t iA = 0; iA < indexCountA; ++iA) { NvVec3 pt; if (getPlaneSegmentIntersection(plb, vertsA[indicesA[iA]], vertsA[indicesA[(iA + 1) % indexCountA]], pt)) { if (pointInsidePoly(pt, indicesB, indexCountB, vertsB, plb.n)) { points.push_back(pt); } } } for (uint16_t iB = 0; iB < indexCountA; ++iB) { NvVec3 pt; if (getPlaneSegmentIntersection(pla, vertsB[indicesB[iB]], vertsB[indicesA[(iB + 1) % indexCountB]], pt)) { if (pointInsidePoly(pt, indicesA, indexCountA, vertsA, pla.n)) { points.push_back(pt); } } } } float BlastBondGeneratorImpl::processWithMidplanes(TriangleProcessor* trProcessor, const Triangle* mA, uint32_t mavc, const Triangle* mB, uint32_t mbvc, const CollisionHull* hull1, const CollisionHull* hull2, const std::vector<NvVec3>& hull1p, const std::vector<NvVec3>& hull2p, NvVec3& normal, NvVec3& centroid, float maxRelSeparation) { NvBounds3 bounds; NvBounds3 aBounds; NvBounds3 bBounds; bounds.setEmpty(); aBounds.setEmpty(); bBounds.setEmpty(); NvVec3 chunk1Centroid(0, 0, 0); NvVec3 chunk2Centroid(0, 0, 0); /////////////////////////////////////////////////////////////////////////////////// if (hull1p.size() < 4 || hull2p.size() < 4) { return 0.0f; } for (uint32_t i = 0; i < hull1p.size(); ++i) { chunk1Centroid += hull1p[i]; bounds.include(hull1p[i]); aBounds.include(hull1p[i]); } for (uint32_t i = 0; i < hull2p.size(); ++i) { chunk2Centroid += hull2p[i]; bounds.include(hull2p[i]); bBounds.include(hull2p[i]); } chunk1Centroid *= (1.0f / hull1p.size()); chunk2Centroid *= (1.0f / hull2p.size()); const float maxSeparation = maxRelSeparation * std::sqrt(std::max(aBounds.getExtents().magnitudeSquared(), bBounds.getExtents().magnitudeSquared())); Separation separation; if (!importerHullsInProximityApexFree(hull1p.size(), hull1p.data(), aBounds, NvTransform(NvIdentity), NvVec3(1, 1, 1), hull2p.size(), hull2p.data(), bBounds, NvTransform(NvIdentity), NvVec3(1, 1, 1), 2.0f * maxSeparation, &separation)) { return 0.0f; } const bool have_geometry = (mA != nullptr && mB != nullptr) || (hull1 != nullptr && hull2 != nullptr); if (separation.getDistance() > 0 || !have_geometry) // If chunks don't intersect then use midplane to produce bond, // otherwise midplane can be wrong (only if we have geometry) { // Build first plane interface NvPlane midplane = separation.plane; if (!midplane.n.isFinite()) { return 0.0f; } std::vector<NvVec3> interfacePoints; float firstCentroidSide = (midplane.distance(chunk1Centroid) > 0) ? 1 : -1; float secondCentroidSide = (midplane.distance(chunk2Centroid) > 0) ? 1 : -1; for (uint32_t i = 0; i < hull1p.size(); ++i) { float dst = midplane.distance(hull1p[i]); if (dst * firstCentroidSide < maxSeparation) { interfacePoints.push_back(hull1p[i]); } } for (uint32_t i = 0; i < hull2p.size(); ++i) { float dst = midplane.distance(hull2p[i]); if (dst * secondCentroidSide < maxSeparation) { interfacePoints.push_back(hull2p[i]); } } std::vector<NvVec3> convexHull; trProcessor->buildConvexHull(interfacePoints, convexHull, midplane.n); float area = 0; NvVec3 centroidLocal(0, 0, 0); if (convexHull.size() < 3) { return 0.0f; } for (uint32_t i = 0; i < convexHull.size() - 1; ++i) { centroidLocal += convexHull[i]; area += (convexHull[i] - convexHull[0]).cross((convexHull[i + 1] - convexHull[0])).magnitude(); } centroidLocal += convexHull.back(); centroidLocal *= (1.0f / convexHull.size()); float direction = midplane.n.dot(chunk2Centroid - chunk1Centroid); if (direction < 0) { normal = -1.0f * normal; } normal = midplane.n; centroid = centroidLocal; return area * 0.5f; } else { float area = 0.0f; std::vector<NvVec3> intersectionAnchors; if (hull1 != nullptr && hull2 != nullptr) // Use hulls { for (uint32_t i1 = 0; i1 < hull1->polygonDataCount; ++i1) { HullPolygon& poly1 = hull1->polygonData[i1]; for (uint32_t i2 = 0; i2 < hull2->polygonDataCount; ++i2) { HullPolygon& poly2 = hull2->polygonData[i2]; AddPpAnchorPoints(reinterpret_cast<uint8_t*>(hull1->indices) + poly1.indexBase, poly1.vertexCount, toNvShared(hull1->points), poly1.plane, reinterpret_cast<uint8_t*>(hull2->indices) + poly2.indexBase, poly2.vertexCount, toNvShared(hull2->points), poly2.plane, intersectionAnchors); } } } else if (mA != nullptr && mB != nullptr) // Use triangles { for (uint32_t i = 0; i < mavc; ++i) { for (uint32_t j = 0; j < mbvc; ++j) { AddTtAnchorPoints(mA + i, mB + j, intersectionAnchors); } } } else { NVBLAST_ASSERT_WITH_MESSAGE(false, "collision hulls and triangle data are both invalid, this shouldn't happen"); return 0.0f; } NvVec3 lcoid(0, 0, 0); for (uint32_t i = 0; i < intersectionAnchors.size(); ++i) { lcoid += intersectionAnchors[i]; } lcoid *= (1.0f / intersectionAnchors.size()); centroid = lcoid; if (intersectionAnchors.size() < 2) { return 0.0f; } NvVec3 dir1 = intersectionAnchors[0] - lcoid; NvVec3 dir2 = chunk2Centroid - chunk1Centroid; // A more reasonable fallback than (0,0,0) float maxMagn = 0.0f; float maxDist = 0.0f; for (uint32_t j = 0; j < intersectionAnchors.size(); ++j) { float d = (intersectionAnchors[j] - lcoid).magnitude(); NvVec3 tempNormal = (intersectionAnchors[j] - lcoid).cross(dir1); maxDist = std::max(d, maxDist); if (tempNormal.magnitude() > maxMagn) { dir2 = tempNormal; } } normal = dir2.getNormalized(); area = (maxDist * maxDist) * 3.14f; // Compute area like circle area; return area; } } struct BondGenerationCandidate { NvVec3 point; bool end; uint32_t parentChunk; uint32_t parentComponent; BondGenerationCandidate(); BondGenerationCandidate(const NvVec3& p, bool isEnd, uint32_t pr, uint32_t c) : point(p), end(isEnd), parentChunk(pr), parentComponent(c){}; bool operator<(const BondGenerationCandidate& in) const { if (point.x < in.point.x) return true; if (point.x > in.point.x) return false; if (point.y < in.point.y) return true; if (point.y > in.point.y) return false; if (point.z < in.point.z) return true; if (point.z > in.point.z) return false; return end < in.end; }; }; int32_t BlastBondGeneratorImpl::createFullBondListAveraged(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const CollisionHull** chunkHulls, const bool* supportFlags, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf, std::set<std::pair<uint32_t, uint32_t> >* pairNotToTest) { std::vector<std::vector<NvcVec3> > chunksPoints(meshCount); std::vector<NvBounds3> bounds(meshCount); if (!chunkHulls) { for (uint32_t i = 0; i < meshCount; ++i) { bounds[i].setEmpty(); if (!supportFlags[i]) { continue; } uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; for (uint32_t j = 0; j < count; ++j) { chunksPoints[i].push_back(geometry[geometryOffset[i] + j].a.p); chunksPoints[i].push_back(geometry[geometryOffset[i] + j].b.p); chunksPoints[i].push_back(geometry[geometryOffset[i] + j].c.p); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].a.p)); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].b.p)); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].c.p)); } } } std::vector<std::vector<std::vector<NvVec3> > > hullPoints(meshCount); std::vector<BondGenerationCandidate> candidates; std::vector<CollisionHull*> tempChunkHulls(meshCount, nullptr); for (uint32_t chunk = 0; chunk < meshCount; ++chunk) { if (!supportFlags[chunk]) { continue; } NvBounds3 bnd(NvBounds3::empty()); uint32_t hullCountForMesh = 0; const CollisionHull** beginChunkHulls = nullptr; if (chunkHulls) { hullCountForMesh = geometryOffset[chunk + 1] - geometryOffset[chunk]; beginChunkHulls = chunkHulls + geometryOffset[chunk]; } else { // build a convex hull and store it in the temp slot tempChunkHulls[chunk] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints[chunk].size(), chunksPoints[chunk].data()); hullCountForMesh = 1; beginChunkHulls = const_cast<const CollisionHull**>(&tempChunkHulls[chunk]); } hullPoints[chunk].resize(hullCountForMesh); for (uint32_t hull = 0; hull < hullCountForMesh; ++hull) { auto& curHull = hullPoints[chunk][hull]; const uint32_t pointCount = beginChunkHulls[hull]->pointsCount; curHull.resize(pointCount); for (uint32_t i = 0; i < pointCount; ++i) { curHull[i] = toNvShared(beginChunkHulls[hull]->points[i]); bnd.include(curHull[i]); } } float minSide = bnd.getDimensions().abs().minElement(); if (minSide > 0.f) { float scaling = std::max(1.1f, conf.maxSeparation / (minSide)); bnd.scaleFast(scaling); } candidates.push_back( BondGenerationCandidate(bnd.minimum, false, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0)); candidates.push_back( BondGenerationCandidate(bnd.maximum, true, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0)); } std::sort(candidates.begin(), candidates.end()); std::set<uint32_t> listOfActiveChunks; std::vector<std::vector<uint32_t> > possibleBondGraph(meshCount); for (uint32_t idx = 0; idx < candidates.size(); ++idx) { if (!candidates[idx].end) // If new candidate { for (uint32_t activeChunk : listOfActiveChunks) { if (meshGroups != nullptr && (meshGroups[activeChunk] == candidates[idx].parentComponent)) continue; // Don't connect components with itself. possibleBondGraph[activeChunk].push_back(candidates[idx].parentChunk); } listOfActiveChunks.insert(candidates[idx].parentChunk); } else { listOfActiveChunks.erase(candidates[idx].parentChunk); } } TriangleProcessor trProcessor; std::vector<NvBlastBondDesc> mResultBondDescs; for (uint32_t i = 0; i < meshCount; ++i) { const uint32_t ihullCount = hullPoints[i].size(); for (uint32_t tj = 0; tj < possibleBondGraph[i].size(); ++tj) { uint32_t j = possibleBondGraph[i][tj]; auto pr = (i < j) ? std::make_pair(i, j) : std::make_pair(j, i); if (pairNotToTest != nullptr && pairNotToTest->find(pr) != pairNotToTest->end()) { continue; // This chunks should not generate bonds. This is used for mixed generation with bondFrom } const uint32_t jhullCount = hullPoints[j].size(); for (uint32_t ihull = 0; ihull < ihullCount; ++ihull) { for (uint32_t jhull = 0; jhull < jhullCount; ++jhull) { NvVec3 normal; NvVec3 centroid; float area = processWithMidplanes( &trProcessor, geometry ? geometry + geometryOffset[i] : nullptr, geometryOffset[i + 1] - geometryOffset[i], geometry ? geometry + geometryOffset[j] : nullptr, geometryOffset[j + 1] - geometryOffset[j], chunkHulls ? chunkHulls[geometryOffset[i] + ihull] : tempChunkHulls[i], chunkHulls ? chunkHulls[geometryOffset[j] + jhull] : tempChunkHulls[j], hullPoints[i][ihull], hullPoints[j][jhull], normal, centroid, conf.maxSeparation); if (area > 0) { NvBlastBondDesc bDesc = NvBlastBondDesc(); bDesc.chunkIndices[0] = i; bDesc.chunkIndices[1] = j; bDesc.bond.area = area; bDesc.bond.centroid[0] = centroid.x; bDesc.bond.centroid[1] = centroid.y; bDesc.bond.centroid[2] = centroid.z; uint32_t maxIndex = std::max(i, j); if ((bounds[maxIndex].getCenter() - centroid).dot(normal) < 0) { normal = -normal; } bDesc.bond.normal[0] = normal.x; bDesc.bond.normal[1] = normal.y; bDesc.bond.normal[2] = normal.z; mResultBondDescs.push_back(bDesc); } } } } } // release any temp hulls allocated for (CollisionHull* tempHullPtr : tempChunkHulls) { if (tempHullPtr) { mConvexMeshBuilder->releaseCollisionHull(tempHullPtr); } } resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } uint32_t isSamePlane(NvcPlane& a, NvcPlane& b) { if (NvAbs(a.d - b.d) > EPS_PLANE) return 0; if (NvAbs(a.n.x - b.n.x) > EPS_PLANE) return 0; if (NvAbs(a.n.y - b.n.y) > EPS_PLANE) return 0; if (NvAbs(a.n.z - b.n.z) > EPS_PLANE) return 0; return 1; } int32_t BlastBondGeneratorImpl::createFullBondListExact(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const bool* supportFlags, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf) { std::vector<PlaneChunkIndexer> planeTriangleMapping; NV_UNUSED(conf); for (uint32_t i = 0; i < meshCount; ++i) { if (!supportFlags[i]) { continue; } uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; for (uint32_t j = 0; j < count; ++j) { #ifdef DEBUG_OUTPUT meshBuffer.push_back(geometry[geometryOffset[i] + j].a.p); meshBuffer.push_back(geometry[geometryOffset[i] + j].b.p); meshBuffer.push_back(geometry[geometryOffset[i] + j].c.p); #endif NvcPlane nPlane = fromNvShared(nvidia::NvPlane(toNvShared(geometry[geometryOffset[i] + j].a.p), toNvShared(geometry[geometryOffset[i] + j].b.p), toNvShared(geometry[geometryOffset[i] + j].c.p))); planeTriangleMapping.push_back({ (int32_t)i, (int32_t)j, nPlane }); } } std::sort(planeTriangleMapping.begin(), planeTriangleMapping.end(), planeComparer); return createFullBondListExactInternal(meshCount, geometryOffset, geometry, planeTriangleMapping, resultBondDescs); } void BlastBondGeneratorImpl::buildGeometryCache(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry) { uint32_t geometryCount = geometryOffset[meshCount]; for (uint32_t i = 0; i < meshCount; i++) { mGeometryCache.push_back(std::vector<Triangle>()); uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; mGeometryCache.back().resize(count); memcpy(mGeometryCache.back().data(), geometry + geometryOffset[i], sizeof(Triangle) * count); } mHullsPointsCache.resize(geometryCount); mBoundsCache.resize(geometryCount); mCHullCache.resize(geometryCount); for (uint32_t i = 0; i < mGeometryCache.size(); ++i) { for (uint32_t j = 0; j < mGeometryCache[i].size(); ++j) { NvcPlane nPlane = fromNvShared(nvidia::NvPlane(toNvShared(mGeometryCache[i][j].a.p), toNvShared(mGeometryCache[i][j].b.p), toNvShared(mGeometryCache[i][j].c.p))); mPlaneCache.push_back({ (int32_t)i, (int32_t)j, nPlane }); } } for (uint32_t ch = 0; ch < mGeometryCache.size(); ++ch) { std::vector<NvcVec3> chunksPoints(mGeometryCache[ch].size() * 3); int32_t sp = 0; for (uint32_t i = 0; i < mGeometryCache[ch].size(); ++i) { chunksPoints[sp++] = mGeometryCache[ch][i].a.p; chunksPoints[sp++] = mGeometryCache[ch][i].b.p; chunksPoints[sp++] = mGeometryCache[ch][i].c.p; } mCHullCache[ch] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints.size(), chunksPoints.data()); mHullsPointsCache[ch].resize(mCHullCache[ch]->pointsCount); mBoundsCache[ch].setEmpty(); for (uint32_t i = 0; i < mCHullCache[ch]->pointsCount; ++i) { mHullsPointsCache[ch][i] = toNvShared(mCHullCache[ch]->points[i]); mBoundsCache[ch].include(mHullsPointsCache[ch][i]); } } } void BlastBondGeneratorImpl::resetGeometryCache() { mGeometryCache.clear(); mPlaneCache.clear(); mHullsPointsCache.clear(); for (auto h : mCHullCache) { mConvexMeshBuilder->releaseCollisionHull(h); } mCHullCache.clear(); mBoundsCache.clear(); } int32_t BlastBondGeneratorImpl::createFullBondListExactInternal(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, std::vector<PlaneChunkIndexer>& planeTriangleMapping, NvBlastBondDesc*& resultBondDescs) { NV_UNUSED(meshCount); std::map<std::pair<int32_t, int32_t>, std::pair<NvBlastBondDesc, int32_t> > bonds; TriangleProcessor trPrc; std::vector<NvVec3> intersectionBufferLocal; NvBlastBondDesc cleanBond = NvBlastBondDesc(); memset(&cleanBond, 0, sizeof(NvBlastBondDesc)); for (uint32_t tIndex = 0; tIndex < planeTriangleMapping.size(); ++tIndex) { PlaneChunkIndexer opp = planeTriangleMapping[tIndex]; opp.plane.d *= -1; opp.plane.n = opp.plane.n * - 1; uint32_t startIndex = (uint32_t)(std::lower_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin()); uint32_t endIndex = (uint32_t)(std::upper_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin()); // uint32_t startIndex = 0; // uint32_t endIndex = (uint32_t)planeTriangleMapping.size(); PlaneChunkIndexer& mappedTr = planeTriangleMapping[tIndex]; const Triangle& trl = geometry[geometryOffset[mappedTr.chunkId] + mappedTr.trId]; NvPlane pln = toNvShared(mappedTr.plane); TrPrcTriangle trp(toNvShared(trl.a.p), toNvShared(trl.b.p), toNvShared(trl.c.p)); NvVec3 trCentroid = toNvShared(trl.a.p + trl.b.p + trl.c.p) * (1.0f / 3.0f); trp.points[0] -= trCentroid; trp.points[1] -= trCentroid; trp.points[2] -= trCentroid; ProjectionDirections pDir = getProjectionDirection(pln.n); TrPrcTriangle2d trp2d; trp2d.points[0] = getProjectedPointWithWinding(trp.points[0], pDir); trp2d.points[1] = getProjectedPointWithWinding(trp.points[1], pDir); trp2d.points[2] = getProjectedPointWithWinding(trp.points[2], pDir); for (uint32_t i = startIndex; i <= endIndex && i < planeTriangleMapping.size(); ++i) { PlaneChunkIndexer& mappedTr2 = planeTriangleMapping[i]; if (mappedTr2.trId == opp.chunkId) { continue; } if (!isSamePlane(opp.plane, mappedTr2.plane)) { continue; } if (mappedTr.chunkId == mappedTr2.chunkId) { continue; } std::pair<int32_t, int32_t> bondEndPoints = std::make_pair(mappedTr.chunkId, mappedTr2.chunkId); if (bondEndPoints.second < bondEndPoints.first) continue; std::pair<int32_t, int32_t> bondEndPointsSwapped = std::make_pair(mappedTr2.chunkId, mappedTr.chunkId); if (bonds.find(bondEndPoints) == bonds.end() && bonds.find(bondEndPointsSwapped) != bonds.end()) { continue; // We do not need account interface surface twice } if (bonds.find(bondEndPoints) == bonds.end()) { bonds[bondEndPoints].second = 0; bonds[bondEndPoints].first = cleanBond; bonds[bondEndPoints].first.chunkIndices[0] = bondEndPoints.first; bonds[bondEndPoints].first.chunkIndices[1] = bondEndPoints.second; bonds[bondEndPoints].first.bond.normal[0] = pln.n[0]; bonds[bondEndPoints].first.bond.normal[1] = pln.n[1]; bonds[bondEndPoints].first.bond.normal[2] = pln.n[2]; } const Triangle& trl2 = geometry[geometryOffset[mappedTr2.chunkId] + mappedTr2.trId]; TrPrcTriangle trp2(toNvShared(trl2.a.p), toNvShared(trl2.b.p), toNvShared(trl2.c.p)); intersectionBufferLocal.clear(); intersectionBufferLocal.reserve(32); trPrc.getTriangleIntersection(trp, trp2d, trp2, trCentroid, intersectionBufferLocal, pln.n); NvVec3 centroidPoint(0, 0, 0); int32_t collectedVerticesCount = 0; float area = 0; if (intersectionBufferLocal.size() >= 3) { #ifdef DEBUG_OUTPUT for (uint32_t p = 1; p < intersectionBufferLocal.size() - 1; ++p) { intersectionBuffer.push_back(intersectionBufferLocal[0]); intersectionBuffer.push_back(intersectionBufferLocal[p]); intersectionBuffer.push_back(intersectionBufferLocal[p + 1]); } #endif centroidPoint = intersectionBufferLocal[0] + intersectionBufferLocal.back(); collectedVerticesCount = 2; for (uint32_t j = 1; j < intersectionBufferLocal.size() - 1; ++j) { ++collectedVerticesCount; centroidPoint += intersectionBufferLocal[j]; area += (intersectionBufferLocal[j + 1] - intersectionBufferLocal[0]) .cross(intersectionBufferLocal[j] - intersectionBufferLocal[0]) .magnitude(); } } if (area > 0.00001f) { bonds[bondEndPoints].second += collectedVerticesCount; bonds[bondEndPoints].first.bond.area += area * 0.5f; bonds[bondEndPoints].first.bond.centroid[0] += (centroidPoint.x); bonds[bondEndPoints].first.bond.centroid[1] += (centroidPoint.y); bonds[bondEndPoints].first.bond.centroid[2] += (centroidPoint.z); } } } std::vector<NvBlastBondDesc> mResultBondDescs; for (auto it : bonds) { if (it.second.first.bond.area > 0) { float mlt = 1.0f / (it.second.second); it.second.first.bond.centroid[0] *= mlt; it.second.first.bond.centroid[1] *= mlt; it.second.first.bond.centroid[2] *= mlt; mResultBondDescs.push_back(it.second.first); } } #ifdef DEBUG_OUTPUT saveGeometryToObj(meshBuffer, "Mesh.obj"); saveGeometryToObj(intersectionBuffer, "inter.obj"); #endif resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } int32_t BlastBondGeneratorImpl::createBondForcedInternal(const std::vector<NvVec3>& hull0, const std::vector<NvVec3>& hull1, const CollisionHull& cHull0, const CollisionHull& cHull1, NvBounds3 bound0, NvBounds3 bound1, NvBlastBond& resultBond, float overlapping) { TriangleProcessor trProcessor; Separation separation; importerHullsInProximityApexFree(hull0.size(), hull0.data(), bound0, NvTransform(NvIdentity), NvVec3(1, 1, 1), hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.000, &separation); if (std::isnan(separation.plane.d)) { importerHullsInProximityApexFree( hull0.size(), hull0.data(), bound0, NvTransform(NvVec3(0.000001f, 0.000001f, 0.000001f)), NvVec3(1, 1, 1), hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.000, &separation); if (std::isnan(separation.plane.d)) { return 1; } } NvPlane pl = separation.plane; std::vector<NvVec3> ifsPoints[2]; float dst[2][2]; dst[0][0] = 0; dst[0][1] = MAXIMUM_EXTENT; for (uint32_t p = 0; p < cHull0.pointsCount; ++p) { float d = pl.distance(toNvShared(cHull0.points[p])); if (NvAbs(d) > NvAbs(dst[0][0])) { dst[0][0] = d; } if (NvAbs(d) < NvAbs(dst[0][1])) { dst[0][1] = d; } } dst[1][0] = 0; dst[1][1] = MAXIMUM_EXTENT; for (uint32_t p = 0; p < cHull1.pointsCount; ++p) { float d = pl.distance(toNvShared(cHull0.points[p])); if (NvAbs(d) > NvAbs(dst[1][0])) { dst[1][0] = d; } if (NvAbs(d) < NvAbs(dst[1][1])) { dst[1][1] = d; } } float cvOffset[2] = { dst[0][1] + (dst[0][0] - dst[0][1]) * overlapping, dst[1][1] + (dst[1][0] - dst[1][1]) * overlapping }; for (uint32_t i = 0; i < cHull0.polygonDataCount; ++i) { auto& pd = cHull0.polygonData[i]; NvVec3 result; for (uint32_t j = 0; j < pd.vertexCount; ++j) { uint32_t nxj = (j + 1) % pd.vertexCount; const uint32_t* ind = cHull0.indices; NvVec3 a = hull0[ind[j + pd.indexBase]] - pl.n * cvOffset[0]; NvVec3 b = hull0[ind[nxj + pd.indexBase]] - pl.n * cvOffset[0]; if (getPlaneSegmentIntersection(pl, a, b, result)) { ifsPoints[0].push_back(result); } } } for (uint32_t i = 0; i < cHull1.polygonDataCount; ++i) { auto& pd = cHull1.polygonData[i]; NvVec3 result; for (uint32_t j = 0; j < pd.vertexCount; ++j) { uint32_t nxj = (j + 1) % pd.vertexCount; const uint32_t* ind = cHull1.indices; NvVec3 a = hull1[ind[j + pd.indexBase]] - pl.n * cvOffset[1]; NvVec3 b = hull1[ind[nxj + pd.indexBase]] - pl.n * cvOffset[1]; if (getPlaneSegmentIntersection(pl, a, b, result)) { ifsPoints[1].push_back(result); } } } std::vector<NvVec3> convexes[2]; trProcessor.buildConvexHull(ifsPoints[0], convexes[0], pl.n); trProcessor.buildConvexHull(ifsPoints[1], convexes[1], pl.n); float areas[2] = { 0, 0 }; NvVec3 centroids[2] = { NvVec3(0, 0, 0), NvVec3(0, 0, 0) }; for (uint32_t cv = 0; cv < 2; ++cv) { if (convexes[cv].size() == 0) { continue; } centroids[cv] = convexes[cv][0] + convexes[cv].back(); for (uint32_t i = 1; i < convexes[cv].size() - 1; ++i) { centroids[cv] += convexes[cv][i]; areas[cv] += (convexes[cv][i + 1] - convexes[cv][0]).cross(convexes[cv][i] - convexes[cv][0]).magnitude(); #ifdef DEBUG_OUTPUT intersectionBuffer.push_back(convexes[cv][0]); intersectionBuffer.push_back(convexes[cv][i]); intersectionBuffer.push_back(convexes[cv][i + 1]); #endif } centroids[cv] *= (1.0f / convexes[cv].size()); areas[cv] = NvAbs(areas[cv]); } resultBond.area = (areas[0] + areas[1]) * 0.5f; resultBond.centroid[0] = (centroids[0][0] + centroids[1][0]) * 0.5f; resultBond.centroid[1] = (centroids[0][1] + centroids[1][1]) * 0.5f; resultBond.centroid[2] = (centroids[0][2] + centroids[1][2]) * 0.5f; resultBond.normal[0] = pl.n[0]; resultBond.normal[1] = pl.n[1]; resultBond.normal[2] = pl.n[2]; resultBond.userData = 0; #ifdef DEBUG_OUTPUT saveGeometryToObj(meshBuffer, "ArbitMeshes.obj"); saveGeometryToObj(intersectionBuffer, "inter.obj"); #endif return 0; } int32_t BlastBondGeneratorImpl::buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) { uint32_t chunkCount = tool->getChunkCount(); std::vector<uint32_t> trianglesCount(chunkCount); std::vector<std::shared_ptr<Triangle> > trianglesBuffer; for (uint32_t i = 0; i < chunkCount; ++i) { Triangle* t; trianglesCount[i] = tool->getBaseMesh(i, t); trianglesBuffer.push_back(std::shared_ptr<Triangle>(t, [](Triangle* t) { delete[] t; })); } if (chunkCount == 0) { return 0; } resultChunkDescriptors = SAFE_ARRAY_NEW(NvBlastChunkDesc, trianglesBuffer.size()); std::vector<Bond> bondDescriptors; bool hasApproximateBonding = false; for (uint32_t i = 0; i < chunkCount; ++i) { NvBlastChunkDesc& desc = resultChunkDescriptors[i]; desc.userData = tool->getChunkId(i); desc.parentChunkDescIndex = tool->getChunkInfoIndex(tool->getChunkInfo(i).parentChunkId); desc.flags = NvBlastChunkDesc::NoFlags; hasApproximateBonding |= !!(tool->getChunkInfo(i).flags & ChunkInfo::APPROXIMATE_BONDING); if (chunkIsSupport[i]) { desc.flags = NvBlastChunkDesc::SupportFlag; } NvVec3 chunkCentroid(0, 0, 0); for (uint32_t tr = 0; tr < trianglesCount[i]; ++tr) { auto& trRef = trianglesBuffer[i].get()[tr]; chunkCentroid += toNvShared(trRef.a.p); chunkCentroid += toNvShared(trRef.b.p); chunkCentroid += toNvShared(trRef.c.p); int32_t id = trRef.userData; if (id == 0) continue; bondDescriptors.push_back(Bond()); Bond& bond = bondDescriptors.back(); bond.m_chunkId = i; bond.m_planeIndex = id; bond.triangleIndex = tr; } chunkCentroid *= (1.0f / (3 * trianglesCount[i])); desc.centroid[0] = chunkCentroid[0]; desc.centroid[1] = chunkCentroid[1]; desc.centroid[2] = chunkCentroid[2]; } std::sort(bondDescriptors.begin(), bondDescriptors.end()); std::vector<NvBlastBondDesc> mResultBondDescs; if (!bondDescriptors.empty()) { int32_t chunkId, planeId; chunkId = bondDescriptors[0].m_chunkId; planeId = bondDescriptors[0].m_planeIndex; std::vector<BondInfo> forwardChunks; std::vector<BondInfo> backwardChunks; float area = 0; NvVec3 normal(0, 0, 0); NvVec3 centroid(0, 0, 0); int32_t collected = 0; NvBounds3 bb = NvBounds3::empty(); chunkId = -1; planeId = bondDescriptors[0].m_planeIndex; for (uint32_t i = 0; i <= bondDescriptors.size(); ++i) { if (i == bondDescriptors.size() || (chunkId != bondDescriptors[i].m_chunkId || abs(planeId) != abs(bondDescriptors[i].m_planeIndex))) { if (chunkId != -1) { area = 0.5f * normal.normalize(); centroid /= 3.0f * collected; if (bondDescriptors[i - 1].m_planeIndex > 0) { forwardChunks.push_back(BondInfo()); forwardChunks.back().area = area; forwardChunks.back().normal = normal; forwardChunks.back().centroid = centroid; forwardChunks.back().m_chunkId = chunkId; forwardChunks.back().m_bb = bb; } else { backwardChunks.push_back(BondInfo()); backwardChunks.back().area = area; backwardChunks.back().normal = normal; backwardChunks.back().centroid = centroid; backwardChunks.back().m_chunkId = chunkId; backwardChunks.back().m_bb = bb; } } bb.setEmpty(); collected = 0; area = 0; normal = NvVec3(0, 0, 0); centroid = NvVec3(0, 0, 0); if (i != bondDescriptors.size()) chunkId = bondDescriptors[i].m_chunkId; } if (i == bondDescriptors.size() || abs(planeId) != abs(bondDescriptors[i].m_planeIndex)) { for (uint32_t fchunk = 0; fchunk < forwardChunks.size(); ++fchunk) { const BondInfo& fInfo = forwardChunks[fchunk]; if (chunkIsSupport[fInfo.m_chunkId] == false) { continue; } for (uint32_t bchunk = 0; bchunk < backwardChunks.size(); ++bchunk) { const BondInfo& bInfo = backwardChunks[bchunk]; if (weakBoundingBoxIntersection(fInfo.m_bb, bInfo.m_bb) == 0) { continue; } if (chunkIsSupport[bInfo.m_chunkId] == false) { continue; } mResultBondDescs.push_back(NvBlastBondDesc()); NvBlastBondDesc& bondDesc = mResultBondDescs.back(); // Use the minimum-area patch for the bond area and centroid if (fInfo.area < bInfo.area) { bondDesc.bond.area = fInfo.area; bondDesc.bond.centroid[0] = fInfo.centroid.x; bondDesc.bond.centroid[1] = fInfo.centroid.y; bondDesc.bond.centroid[2] = fInfo.centroid.z; bondDesc.bond.normal[0] = fInfo.normal.x; bondDesc.bond.normal[1] = fInfo.normal.y; bondDesc.bond.normal[2] = fInfo.normal.z; } else { bondDesc.bond.area = bInfo.area; bondDesc.bond.centroid[0] = bInfo.centroid.x; bondDesc.bond.centroid[1] = bInfo.centroid.y; bondDesc.bond.centroid[2] = bInfo.centroid.z; bondDesc.bond.normal[0] = -bInfo.normal.x; bondDesc.bond.normal[1] = -bInfo.normal.y; bondDesc.bond.normal[2] = -bInfo.normal.z; } bondDesc.chunkIndices[0] = fInfo.m_chunkId; bondDesc.chunkIndices[1] = bInfo.m_chunkId; } } forwardChunks.clear(); backwardChunks.clear(); if (i != bondDescriptors.size()) { planeId = bondDescriptors[i].m_planeIndex; } else { break; } } collected++; auto& trRef = trianglesBuffer[chunkId].get()[bondDescriptors[i].triangleIndex]; normal += getNormal(trRef); centroid += toNvShared(trRef.a.p); centroid += toNvShared(trRef.b.p); centroid += toNvShared(trRef.c.p); bb.include(toNvShared(trRef.a.p)); bb.include(toNvShared(trRef.b.p)); bb.include(toNvShared(trRef.c.p)); } } if (hasApproximateBonding) { std::vector<Triangle> chunkTriangles; std::vector<uint32_t> chunkTrianglesOffsets; std::set<std::pair<uint32_t, uint32_t> > pairsAlreadyCreated; for (uint32_t i = 0; i < mResultBondDescs.size(); ++i) { auto pr = (mResultBondDescs[i].chunkIndices[0] < mResultBondDescs[i].chunkIndices[1]) ? std::make_pair(mResultBondDescs[i].chunkIndices[0], mResultBondDescs[i].chunkIndices[1]) : std::make_pair(mResultBondDescs[i].chunkIndices[1], mResultBondDescs[i].chunkIndices[0]); pairsAlreadyCreated.insert(pr); } const float EXPANSION = 0.01f; chunkTrianglesOffsets.push_back(0); for (uint32_t i = 0; i < chunkCount; ++i) { const float SCALE_FACTOR = 1.001f; NvcVec3 centroid = {resultChunkDescriptors[i].centroid[0], resultChunkDescriptors[i].centroid[1], resultChunkDescriptors[i].centroid[2]}; for (uint32_t k = 0; k < trianglesCount[i]; ++k) { chunkTriangles.push_back(trianglesBuffer[i].get()[k]); // inflate mesh a bit chunkTriangles.back().a.p = chunkTriangles.back().a.p + (chunkTriangles.back().a.p - centroid) * EXPANSION; chunkTriangles.back().b.p = chunkTriangles.back().b.p + (chunkTriangles.back().b.p - centroid) * EXPANSION; chunkTriangles.back().c.p = chunkTriangles.back().c.p + (chunkTriangles.back().c.p - centroid) * EXPANSION; } chunkTrianglesOffsets.push_back(chunkTriangles.size()); } NvBlastBondDesc* adsc; BondGenerationConfig cfg; cfg.bondMode = BondGenerationConfig::AVERAGE; cfg.maxSeparation = EXPANSION; uint32_t nbListSize = createFullBondListAveraged(chunkCount, chunkTrianglesOffsets.data(), chunkTriangles.data(), nullptr, chunkIsSupport, nullptr, adsc, cfg, &pairsAlreadyCreated); for (uint32_t i = 0; i < nbListSize; ++i) { mResultBondDescs.push_back(adsc[i]); } NVBLAST_FREE(adsc); } resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB, NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) { if (cfg.bondMode == BondGenerationConfig::AVERAGE) { resetGeometryCache(); buildGeometryCache(meshCount, geometryOffset, geometry); } resultBond = SAFE_ARRAY_NEW(NvBlastBondDesc, overlapsCount); if (cfg.bondMode == BondGenerationConfig::EXACT) { for (uint32_t i = 0; i < overlapsCount; ++i) { NvBlastBondDesc& desc = resultBond[i]; desc.chunkIndices[0] = overlapsA[i]; desc.chunkIndices[1] = overlapsB[i]; uint32_t meshACount = geometryOffset[overlapsA[i] + 1] - geometryOffset[overlapsA[i]]; uint32_t meshBCount = geometryOffset[overlapsB[i] + 1] - geometryOffset[overlapsB[i]]; createBondBetweenMeshes(meshACount, geometry + geometryOffset[overlapsA[i]], meshBCount, geometry + geometryOffset[overlapsB[i]], desc.bond, cfg); } } else { for (uint32_t i = 0; i < overlapsCount; ++i) { NvBlastBondDesc& desc = resultBond[i]; desc.chunkIndices[0] = overlapsA[i]; desc.chunkIndices[1] = overlapsB[i]; createBondForcedInternal(mHullsPointsCache[overlapsA[i]], mHullsPointsCache[overlapsB[i]], *mCHullCache[overlapsA[i]], *mCHullCache[overlapsB[i]], mBoundsCache[overlapsA[i]], mBoundsCache[overlapsB[i]], desc.bond, 0.3f); } } return overlapsCount; } int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB, NvBlastBond& resultBond, BondGenerationConfig conf) { float overlapping = 0.3f; if (conf.bondMode == BondGenerationConfig::EXACT) { std::vector<uint32_t> chunksOffsets = { 0, meshACount, meshACount + meshBCount }; std::vector<Triangle> chunks; chunks.resize(meshACount + meshBCount); memcpy(chunks.data(), meshA, sizeof(Triangle) * meshACount); memcpy(chunks.data() + meshACount, meshB, sizeof(Triangle) * meshBCount); std::shared_ptr<bool> isSupport(new bool[2]{ true, true }, [](bool* b) { delete[] b; }); NvBlastBondDesc* desc; uint32_t descSize = createFullBondListExact(2, chunksOffsets.data(), chunks.data(), isSupport.get(), desc, conf); if (descSize > 0) { resultBond = desc->bond; } else { memset(&resultBond, 0, sizeof(NvBlastBond)); return 1; } return 0; } std::vector<NvcVec3> chunksPoints1(meshACount * 3); std::vector<NvcVec3> chunksPoints2(meshBCount * 3); int32_t sp = 0; for (uint32_t i = 0; i < meshACount; ++i) { chunksPoints1[sp++] = meshA[i].a.p; chunksPoints1[sp++] = meshA[i].b.p; chunksPoints1[sp++] = meshA[i].c.p; #ifdef DEBUG_OUTPUT meshBuffer.push_back(meshA[i].a.p); meshBuffer.push_back(meshA[i].b.p); meshBuffer.push_back(meshA[i].c.p); #endif } sp = 0; for (uint32_t i = 0; i < meshBCount; ++i) { chunksPoints2[sp++] = meshB[i].a.p; chunksPoints2[sp++] = meshB[i].b.p; chunksPoints2[sp++] = meshB[i].c.p; #ifdef DEBUG_OUTPUT meshBuffer.push_back(meshB[i].a.p); meshBuffer.push_back(meshB[i].b.p); meshBuffer.push_back(meshB[i].c.p); #endif } CollisionHull* cHull[2]; cHull[0] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints1.size(), chunksPoints1.data()); cHull[1] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints2.size(), chunksPoints2.data()); std::vector<NvVec3> hullPoints[2]; hullPoints[0].resize(cHull[0]->pointsCount); hullPoints[1].resize(cHull[1]->pointsCount); NvBounds3 bb[2]; bb[0].setEmpty(); bb[1].setEmpty(); for (uint32_t cv = 0; cv < 2; ++cv) { for (uint32_t i = 0; i < cHull[cv]->pointsCount; ++i) { hullPoints[cv][i] = toNvShared(cHull[cv]->points[i]); bb[cv].include(hullPoints[cv][i]); } } auto ret = createBondForcedInternal(hullPoints[0], hullPoints[1], *cHull[0], *cHull[1], bb[0], bb[1], resultBond, overlapping); mConvexMeshBuilder->releaseCollisionHull(cHull[0]); mConvexMeshBuilder->releaseCollisionHull(cHull[1]); return ret; } int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryCount, const Triangle* geometry, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf) { int32_t ret_val = 0; switch (conf.bondMode) { case BondGenerationConfig::AVERAGE: ret_val = createFullBondListAveraged(meshCount, geometryCount, geometry, nullptr, chunkIsSupport, nullptr, resultBondDescs, conf); break; case BondGenerationConfig::EXACT: ret_val = createFullBondListExact(meshCount, geometryCount, geometry, chunkIsSupport, resultBondDescs, conf); break; } return ret_val; } int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls, const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) { BondGenerationConfig conf; conf.maxSeparation = maxSeparation; conf.bondMode = BondGenerationConfig::AVERAGE; return createFullBondListAveraged(meshCount, convexHullOffset, nullptr, chunkHulls, chunkIsSupport, meshGroups, resultBondDescs, conf); } void BlastBondGeneratorImpl::release() { delete this; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include <NvBlastAssert.h> #include "NvBounds3.h" #include "NvMath.h" #include "NvAssert.h" #include <NvBlastNvSharedHelpers.h> #include "NvBlastExtAuthoringCutoutImpl.h" #include <algorithm> #include <set> #include <map> #include <stack> #define CUTOUT_DISTANCE_THRESHOLD (0.7f) #define CUTOUT_DISTANCE_EPS (0.01f) using namespace Nv::Blast; // Unsigned modulus uint32_t mod(int32_t n, uint32_t modulus) { const int32_t d = n/(int32_t)modulus; const int32_t m = n - d*(int32_t)modulus; return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus; } float square(float x) { return x * x; } // 2D cross product float dotXY(const nvidia::NvVec3& v, const nvidia::NvVec3& w) { return v.x * w.x + v.y * w.y; } // Z-component of cross product float crossZ(const nvidia::NvVec3& v, const nvidia::NvVec3& w) { return v.x * w.y - v.y * w.x; } // z coordinates may be used to store extra info - only deal with x and y float perpendicularDistanceSquared(const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, const nvidia::NvVec3& v2) { const nvidia::NvVec3 base = v2 - v0; const nvidia::NvVec3 leg = v1 - v0; const float baseLen2 = dotXY(base, base); return baseLen2 > NV_EPS_F32 * dotXY(leg, leg) ? square(crossZ(base, leg)) / baseLen2 : 0.0f; } // z coordinates may be used to store extra info - only deal with x and y float perpendicularDistanceSquared(const std::vector< nvidia::NvVec3 >& cutout, uint32_t index) { const uint32_t size = cutout.size(); return perpendicularDistanceSquared(cutout[(index + size - 1) % size], cutout[index], cutout[(index + 1) % size]); } //////////////////////////////////////////////// // ApexShareUtils - Begin //////////////////////////////////////////////// struct BoundsRep { BoundsRep() : type(0) { aabb.setEmpty(); } nvidia::NvBounds3 aabb; uint32_t type; // By default only reports if subtypes are the same, configurable. Valid range {0...7} }; struct IntPair { void set(int32_t _i0, int32_t _i1) { i0 = _i0; i1 = _i1; } int32_t i0, i1; static int compare(const void* a, const void* b) { const int32_t diff0 = ((IntPair*)a)->i0 - ((IntPair*)b)->i0; return diff0 ? diff0 : (((IntPair*)a)->i1 - ((IntPair*)b)->i1); } }; struct BoundsInteractions { BoundsInteractions() : bits(0x8040201008040201ULL) {} BoundsInteractions(bool setAll) : bits(setAll ? 0xFFFFFFFFFFFFFFFFULL : 0x0000000000000000ULL) {} bool set(unsigned group1, unsigned group2, bool interacts) { if (group1 >= 8 || group2 >= 8) { return false; } const uint64_t mask = (uint64_t)1 << ((group1 << 3) + group2) | (uint64_t)1 << ((group2 << 3) + group1); if (interacts) { bits |= mask; } else { bits &= ~mask; } return true; } uint64_t bits; }; enum Bounds3Axes { Bounds3X = 1, Bounds3Y = 2, Bounds3Z = 4, Bounds3XY = Bounds3X | Bounds3Y, Bounds3YZ = Bounds3Y | Bounds3Z, Bounds3ZX = Bounds3Z | Bounds3X, Bounds3XYZ = Bounds3X | Bounds3Y | Bounds3Z }; void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride, const BoundsInteractions& interactions = BoundsInteractions(), bool append = false); void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride); /* Index bank - double-sided free list for O(1) borrow/return of unique IDs Type IndexType should be an unsigned integer type or something that can be cast to and from an integer */ template <class IndexType> class IndexBank { public: IndexBank<IndexType>(uint32_t capacity = 0) : indexCount(0), capacityLocked(false) { maxCapacity = calculateMaxCapacity(); reserve_internal(capacity); } // Copy constructor IndexBank<IndexType>(const IndexBank<IndexType>& other) { *this = other; } virtual ~IndexBank<IndexType>() {} // Assignment operator IndexBank<IndexType>& operator = (const IndexBank<IndexType>& other) { indices = other.indices; ranks = other.ranks; maxCapacity = other.maxCapacity; indexCount = other.indexCount; capacityLocked = other.capacityLocked; return *this; } void setIndicesAndRanks(uint16_t* indicesIn, uint16_t* ranksIn, uint32_t capacityIn, uint32_t usedCountIn) { indexCount = usedCountIn; reserve_internal(capacityIn); for (uint32_t i = 0; i < capacityIn; ++i) { indices[i] = indicesIn[i]; ranks[i] = ranksIn[i]; } } void clear(uint32_t capacity = 0, bool used = false) { capacityLocked = false; indices.reset(); ranks.reset(); reserve_internal(capacity); if (used) { indexCount = capacity; indices.resize(capacity); for (IndexType i = (IndexType)0; i < (IndexType)capacity; ++i) { indices[i] = i; } } else { indexCount = 0; } } // Equivalent to calling freeLastUsed() until the used list is empty. void clearFast() { indexCount = 0; } // This is the reserve size. The bank can only grow, due to shuffling of indices virtual void reserve(uint32_t capacity) { reserve_internal(capacity); } // If lock = true, keeps bank from automatically resizing void lockCapacity(bool lock) { capacityLocked = lock; } bool isCapacityLocked() const { return capacityLocked; } void setMaxCapacity(uint32_t inMaxCapacity) { // Cannot drop below current capacity, nor above max set by data types maxCapacity = nvidia::NvClamp(inMaxCapacity, capacity(), calculateMaxCapacity()); } uint32_t capacity() const { return indices.size(); } uint32_t usedCount() const { return indexCount; } uint32_t freeCount() const { return capacity() - usedCount(); } // valid from [0] to [size()-1] const IndexType* usedIndices() const { return indices.data(); } // valid from [0] to [free()-1] const IndexType* freeIndices() const { return indices.begin() + usedCount(); } bool isValid(IndexType index) const { return index < (IndexType)capacity(); } bool isUsed(IndexType index) const { return isValid(index) && (ranks[index] < (IndexType)usedCount()); } bool isFree(IndexType index) const { return isValid(index) && !isUsed(); } IndexType getRank(IndexType index) const { return ranks[index]; } // Gets the next available index, if any bool useNextFree(IndexType& index) { if (freeCount() == 0) { if (capacityLocked) { return false; } if (capacity() >= maxCapacity) { return false; } reserve(nvidia::NvClamp(capacity() * 2, (uint32_t)1, maxCapacity)); NVBLAST_ASSERT(freeCount() > 0); } index = indices[indexCount++]; return true; } // Frees the last used index, if any bool freeLastUsed(IndexType& index) { if (usedCount() == 0) { return false; } index = indices[--indexCount]; return true; } // Requests a particular index. If that index is available, it is borrowed and the function // returns true. Otherwise nothing happens and the function returns false. bool use(IndexType index) { if (!indexIsValidForUse(index)) { return false; } IndexType oldRank; placeIndexAtRank(index, (IndexType)indexCount++, oldRank); return true; } bool free(IndexType index) { if (!indexIsValidForFreeing(index)) { return false; } IndexType oldRank; placeIndexAtRank(index, (IndexType)--indexCount, oldRank); return true; } bool useAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank) { if (!indexIsValidForUse(index)) { return false; } newRank = (IndexType)indexCount++; placeIndexAtRank(index, newRank, oldRank); return true; } bool freeAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank) { if (!indexIsValidForFreeing(index)) { return false; } newRank = (IndexType)--indexCount; placeIndexAtRank(index, newRank, oldRank); return true; } protected: bool indexIsValidForUse(IndexType index) { if (!isValid(index)) { if (capacityLocked) { return false; } if (capacity() >= maxCapacity) { return false; } reserve(nvidia::NvClamp(2 * (uint32_t)index, (uint32_t)1, maxCapacity)); NVBLAST_ASSERT(isValid(index)); } return !isUsed(index); } bool indexIsValidForFreeing(IndexType index) { if (!isValid(index)) { // Invalid index return false; } return isUsed(index); } // This is the reserve size. The bank can only grow, due to shuffling of indices void reserve_internal(uint32_t capacity) { capacity = std::min(capacity, maxCapacity); const uint32_t oldCapacity = indices.size(); if (capacity > oldCapacity) { indices.resize(capacity); ranks.resize(capacity); for (IndexType i = (IndexType)oldCapacity; i < (IndexType)capacity; ++i) { indices[i] = i; ranks[i] = i; } } } private: void placeIndexAtRank(IndexType index, IndexType newRank, IndexType& oldRank) // returns old rank { const IndexType replacementIndex = indices[newRank]; oldRank = ranks[index]; indices[oldRank] = replacementIndex; indices[newRank] = index; ranks[replacementIndex] = oldRank; ranks[index] = newRank; } uint32_t calculateMaxCapacity() { #pragma warning(push) #pragma warning(disable: 4127) // conditional expression is constant if (sizeof(IndexType) >= sizeof(uint32_t)) { return 0xFFFFFFFF; // Limited by data type we use to report capacity } else { return (1u << (8 * std::min((uint32_t)sizeof(IndexType), 3u))) - 1; // Limited by data type we use for indices } #pragma warning(pop) } protected: std::vector<IndexType> indices; std::vector<IndexType> ranks; uint32_t maxCapacity; uint32_t indexCount; bool capacityLocked; }; struct Marker { float pos; uint32_t id; // lsb = type (0 = max, 1 = min), other bits used for object index void set(float _pos, int32_t _id) { pos = _pos; id = (uint32_t)_id; } }; static int compareMarkers(const void* A, const void* B) { // Sorts by value. If values equal, sorts min types greater than max types, to reduce the # of overlaps const float delta = ((Marker*)A)->pos - ((Marker*)B)->pos; return delta != 0 ? (delta < 0 ? -1 : 1) : ((int)(((Marker*)A)->id & 1) - (int)(((Marker*)B)->id & 1)); } void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride, const BoundsInteractions& interactions, bool append) { if (!append) { overlaps.clear(); } uint32_t D = 0; uint32_t axisNums[3]; for (unsigned i = 0; i < 3; ++i) { if ((axesToUse >> i) & 1) { axisNums[D++] = i; } } if (D == 0 || D > 3) { return; } std::vector< std::vector<Marker> > axes; axes.resize(D); uint32_t overlapCount[3]; for (uint32_t n = 0; n < D; ++n) { const uint32_t axisNum = axisNums[n]; std::vector<Marker>& axis = axes[n]; overlapCount[n] = 0; axis.resize(2 * boundsCount); uint8_t* boundsPtr = (uint8_t*)bounds; for (uint32_t i = 0; i < boundsCount; ++i, boundsPtr += boundsByteStride) { const BoundsRep& boundsRep = *(const BoundsRep*)boundsPtr; const nvidia::NvBounds3& box = boundsRep.aabb; float min = box.minimum[axisNum]; float max = box.maximum[axisNum]; if (min >= max) { const float mid = 0.5f * (min + max); float pad = 0.000001f * fabsf(mid); min = mid - pad; max = mid + pad; } axis[i << 1].set(min, (int32_t)i << 1 | 1); axis[i << 1 | 1].set(max, (int32_t)i << 1); } qsort(axis.data(), axis.size(), sizeof(Marker), compareMarkers); uint32_t localOverlapCount = 0; for (uint32_t i = 0; i < axis.size(); ++i) { Marker& marker = axis[i]; if (marker.id & 1) { overlapCount[n] += localOverlapCount; ++localOverlapCount; } else { --localOverlapCount; } } } unsigned int axis0; unsigned int axis1; unsigned int axis2; unsigned int maxBin; if (D == 1) { maxBin = 0; axis0 = axisNums[0]; axis1 = axis0; axis2 = axis0; } else if (D == 2) { if (overlapCount[0] < overlapCount[1]) { maxBin = 0; axis0 = axisNums[0]; axis1 = axisNums[1]; axis2 = axis0; } else { maxBin = 1; axis0 = axisNums[1]; axis1 = axisNums[0]; axis2 = axis0; } } else { maxBin = overlapCount[0] < overlapCount[1] ? (overlapCount[0] < overlapCount[2] ? 0U : 2U) : (overlapCount[1] < overlapCount[2] ? 1U : 2U); axis0 = axisNums[maxBin]; axis1 = (axis0 + 1) % 3; axis2 = (axis0 + 2) % 3; } const uint64_t interactionBits = interactions.bits; IndexBank<uint32_t> localOverlaps(boundsCount); std::vector<Marker>& axis = axes[maxBin]; float boxMin1 = 0.0f; float boxMax1 = 0.0f; float boxMin2 = 0.0f; float boxMax2 = 0.0f; for (uint32_t i = 0; i < axis.size(); ++i) { Marker& marker = axis[i]; const uint32_t index = marker.id >> 1; if (marker.id & 1) { const BoundsRep& boundsRep = *(const BoundsRep*)((uint8_t*)bounds + index*boundsByteStride); const uint8_t interaction = (uint8_t)((interactionBits >> (boundsRep.type << 3)) & 0xFF); const nvidia::NvBounds3& box = boundsRep.aabb; // These conditionals compile out with optimization: if (D > 1) { boxMin1 = box.minimum[axis1]; boxMax1 = box.maximum[axis1]; if (D == 3) { boxMin2 = box.minimum[axis2]; boxMax2 = box.maximum[axis2]; } } const uint32_t localOverlapCount = localOverlaps.usedCount(); const uint32_t* localOverlapIndices = localOverlaps.usedIndices(); for (uint32_t j = 0; j < localOverlapCount; ++j) { const uint32_t overlapIndex = localOverlapIndices[j]; const BoundsRep& overlapBoundsRep = *(const BoundsRep*)((uint8_t*)bounds + overlapIndex*boundsByteStride); if ((interaction >> overlapBoundsRep.type) & 1) { const nvidia::NvBounds3& overlapBox = overlapBoundsRep.aabb; // These conditionals compile out with optimization: if (D > 1) { if (boxMin1 >= overlapBox.maximum[axis1] || boxMax1 <= overlapBox.minimum[axis1]) { continue; } if (D == 3) { if (boxMin2 >= overlapBox.maximum[axis2] || boxMax2 <= overlapBox.minimum[axis2]) { continue; } } } // Add overlap IntPair pair; pair.i0 = (int32_t)index; pair.i1 = (int32_t)overlapIndex; overlaps.push_back(pair); } } NVBLAST_ASSERT(localOverlaps.isValid(index)); NVBLAST_ASSERT(!localOverlaps.isUsed(index)); localOverlaps.use(index); } else { // Remove local overlap NVBLAST_ASSERT(localOverlaps.isValid(index)); localOverlaps.free(index); } } } void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride) { if (indexRange == 0) { lookup.resize(std::max(indexRange + 1, 2u)); lookup[0] = 0; lookup[1] = indexCount; } else { lookup.resize(indexRange + 1); uint32_t indexPos = 0; for (uint32_t i = 0; i < indexRange; ++i) { for (; indexPos < indexCount; ++indexPos, indexSource = (int32_t*)((uintptr_t)indexSource + indexByteStride)) { if (*indexSource >= (int32_t)i + indexBase) { lookup[i] = indexPos; break; } } if (indexPos == indexCount) { lookup[i] = indexPos; } } lookup[indexRange] = indexCount; } } //////////////////////////////////////////////// // ApexShareUtils - End //////////////////////////////////////////////// struct CutoutVert { int32_t cutoutIndex; int32_t vertIndex; void set(int32_t _cutoutIndex, int32_t _vertIndex) { cutoutIndex = _cutoutIndex; vertIndex = _vertIndex; } }; struct NewVertex { CutoutVert vertex; float edgeProj; }; static int compareNewVertices(const void* a, const void* b) { const int32_t cutoutDiff = ((NewVertex*)a)->vertex.cutoutIndex - ((NewVertex*)b)->vertex.cutoutIndex; if (cutoutDiff) { return cutoutDiff; } const int32_t vertDiff = ((NewVertex*)a)->vertex.vertIndex - ((NewVertex*)b)->vertex.vertIndex; if (vertDiff) { return vertDiff; } const float projDiff = ((NewVertex*)a)->edgeProj - ((NewVertex*)b)->edgeProj; return projDiff ? (projDiff < 0.0f ? -1 : 1) : 0; } template<typename T> class Map2d { public: Map2d(uint32_t width, uint32_t height) { create_internal(width, height, NULL); } Map2d(uint32_t width, uint32_t height, T fillValue) { create_internal(width, height, &fillValue); } Map2d(const Map2d& map) { *this = map; } Map2d& operator = (const Map2d& map) { mMem.clear(); create_internal(map.mWidth, map.mHeight, NULL); return *this; } void create(uint32_t width, uint32_t height) { return create_internal(width, height, NULL); } void create(uint32_t width, uint32_t height, T fillValue) { create_internal(width, height, &fillValue); } //void clear(const T value) //{ // for (auto it = mMem.begin(); it != mMem.end(); it++) // { // for (auto it2 = it->begin(); it2 != it->end(); it2++) // { // *it2 = value; // } // } //} void setOrigin(uint32_t x, uint32_t y) { mOriginX = x; mOriginY = y; } const T& operator()(int32_t x, int32_t y) const { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return mMem[y][x]; } T& operator()(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return mMem[y][x]; } private: void create_internal(uint32_t width, uint32_t height, T* val) { mMem.clear(); mWidth = width; mHeight = height; mMem.resize(mHeight); for (auto it = mMem.begin(); it != mMem.end(); it++) { it->resize(mWidth, val ? *val : 0); } mOriginX = 0; mOriginY = 0; } std::vector<std::vector<T>> mMem; uint32_t mWidth; uint32_t mHeight; uint32_t mOriginX; uint32_t mOriginY; }; class BitMap { public: BitMap() : mMem(NULL) {} BitMap(uint32_t width, uint32_t height) : mMem(NULL) { create_internal(width, height, NULL); } BitMap(uint32_t width, uint32_t height, bool fillValue) : mMem(NULL) { create_internal(width, height, &fillValue); } BitMap(const BitMap& map) { *this = map; } ~BitMap() { delete [] mMem; } BitMap& operator = (const BitMap& map) { delete [] mMem; mMem = NULL; if (map.mMem) { create_internal(map.mWidth, map.mHeight, NULL); memcpy(mMem, map.mMem, mHeight * mRowBytes); } return *this; } void create(uint32_t width, uint32_t height) { return create_internal(width, height, NULL); } void create(uint32_t width, uint32_t height, bool fillValue) { create_internal(width, height, &fillValue); } void clear(bool value) { memset(mMem, value ? 0xFF : 0x00, mRowBytes * mHeight); } void setOrigin(uint32_t x, uint32_t y) { mOriginX = x; mOriginY = y; } bool read(int32_t x, int32_t y) const { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return ((mMem[(x >> 3) + y * mRowBytes] >> (x & 7)) & 1) != 0; } void set(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); mMem[(x >> 3) + y * mRowBytes] |= 1 << (x & 7); } void reset(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); mMem[(x >> 3) + y * mRowBytes] &= ~(1 << (x & 7)); } private: void create_internal(uint32_t width, uint32_t height, bool* val) { delete [] mMem; mRowBytes = (width + 7) >> 3; const uint32_t bytes = mRowBytes * height; if (bytes == 0) { mWidth = mHeight = 0; mMem = NULL; return; } mWidth = width; mHeight = height; mMem = new uint8_t[bytes]; mOriginX = 0; mOriginY = 0; if (val) { clear(*val); } } uint8_t* mMem; uint32_t mWidth; uint32_t mHeight; uint32_t mRowBytes; uint32_t mOriginX; uint32_t mOriginY; }; int32_t taxicabSine(int32_t i) { // 0 1 1 1 0 -1 -1 -1 return (int32_t)((0x01A9 >> ((i & 7) << 1)) & 3) - 1; } // Only looks at x and y components bool directionsXYOrderedCCW(const nvidia::NvVec3& d0, const nvidia::NvVec3& d1, const nvidia::NvVec3& d2) { const bool ccw02 = crossZ(d0, d2) > 0.0f; const bool ccw01 = crossZ(d0, d1) > 0.0f; const bool ccw21 = crossZ(d2, d1) > 0.0f; return ccw02 ? ccw01 && ccw21 : ccw01 || ccw21; } std::pair<float, float> compareTraceSegmentToLineSegment(const std::vector<POINT2D>& trace, int _start, int delta, float distThreshold, uint32_t width, uint32_t height, bool hasBorder) { if (delta < 2) { return std::make_pair(0.0f, 0.0f); } const uint32_t size = trace.size(); uint32_t start = (uint32_t)_start, end = (uint32_t)(_start + delta) % size; const bool startIsOnBorder = hasBorder && (trace[start].x == -1 || trace[start].x == (int)width || trace[start].y == -1 || trace[start].y == (int)height); const bool endIsOnBorder = hasBorder && (trace[end].x == -1 || trace[end].x == (int)width || trace[end].y == -1 || trace[end].y == (int)height); if (startIsOnBorder || endIsOnBorder) { if ((trace[start].x == -1 && trace[end].x == -1) || (trace[start].y == -1 && trace[end].y == -1) || (trace[start].x == (int)width && trace[end].x == (int)width) || (trace[start].y == (int)height && trace[end].y == (int)height)) { return std::make_pair(0.0f, 0.0f); } return std::make_pair(NV_MAX_F32, NV_MAX_F32); } nvidia::NvVec3 orig((float)trace[start].x, (float)trace[start].y, 0); nvidia::NvVec3 dest((float)trace[end].x, (float)trace[end].y, 0); nvidia::NvVec3 dir = dest - orig; dir.normalize(); float aveError = 0.0f; float aveError2 = 0.0f; for (;;) { if (++start >= size) { start = 0; } if (start == end) { break; } nvidia::NvVec3 testDisp((float)trace[start].x, (float)trace[start].y, 0); testDisp -= orig; aveError += (float)(nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x) >= distThreshold); aveError2 += nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x); } aveError /= delta - 1; aveError2 /= delta - 1; return std::make_pair(aveError, aveError2); } // Segment i starts at vi and ends at vi+ei // Tests for overlap in segments' projection onto xy plane // Returns distance between line segments. (Negative value indicates overlap.) float segmentsIntersectXY(const nvidia::NvVec3& v0, const nvidia::NvVec3& e0, const nvidia::NvVec3& v1, const nvidia::NvVec3& e1) { const nvidia::NvVec3 dv = v1 - v0; nvidia::NvVec3 d0 = e0; d0.normalize(); nvidia::NvVec3 d1 = e1; d1.normalize(); const float c10 = crossZ(dv, d0); const float d10 = crossZ(e1, d0); float a1 = nvidia::NvAbs(c10); float b1 = nvidia::NvAbs(c10 + d10); if (c10 * (c10 + d10) < 0.0f) { if (a1 < b1) { a1 = -a1; } else { b1 = -b1; } } const float c01 = crossZ(d1, dv); const float d01 = crossZ(e0, d1); float a2 = nvidia::NvAbs(c01); float b2 = nvidia::NvAbs(c01 + d01); if (c01 * (c01 + d01) < 0.0f) { if (a2 < b2) { a2 = -a2; } else { b2 = -b2; } } return nvidia::NvMax(nvidia::NvMin(a1, b1), nvidia::NvMin(a2, b2)); } // If point projects onto segment, returns true and proj is set to a // value in the range [0,1], indicating where along the segment (from v0 to v1) // the projection lies, and dist2 is set to the distance squared from point to // the line segment. Otherwise, returns false. // Note, if v1 = v0, then the function returns true with proj = 0. bool projectOntoSegmentXY(float& proj, float& dist2, const nvidia::NvVec3& point, const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, float margin) { const nvidia::NvVec3 seg = v1 - v0; const nvidia::NvVec3 x = point - v0; const float seg2 = dotXY(seg, seg); const float d = dotXY(x, seg); if (d < 0.0f || d > seg2) { return false; } const float margin2 = margin * margin; const float p = seg2 > 0.0f ? d / seg2 : 0.0f; const float lineDist2 = d * p; if (lineDist2 < margin2) { return false; } const float pPrime = 1.0f - p; const float dPrime = seg2 - d; const float lineDistPrime2 = dPrime * pPrime; if (lineDistPrime2 < margin2) { return false; } proj = p; dist2 = dotXY(x, x) - lineDist2; return true; } bool isOnBorder(const nvidia::NvVec3& v, uint32_t width, uint32_t height) { return v.x < -0.5f || v.x >= width - 0.5f || v.y < -0.5f || v.y >= height - 0.5f; } static void createCutout(Nv::Blast::Cutout& cutout, const std::vector<POINT2D>& trace, float segmentationErrorThreshold, float snapThreshold, uint32_t width, uint32_t height, bool hasBorder) { cutout.vertices.clear(); cutout.smoothingGroups.clear(); std::vector<int> smoothingGroups; const uint32_t traceSize = trace.size(); if (traceSize == 0) { return; // Nothing to do } uint32_t size = traceSize; std::vector<int> vertexIndices; const float pixelCenterOffset = hasBorder ? 0.5f : 0.0f; // Find best segment uint32_t start = 0; uint32_t delta = 0; float err2 = 0.f; for (uint32_t iStart = 0; iStart < size; ++iStart) { uint32_t iDelta = (size >> 1) + (size & 1); for (; iDelta > 1; --iDelta) { auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)iStart, (int32_t)iDelta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder); if (fit.first < segmentationErrorThreshold) { err2 = fit.second; break; } } if (iDelta > delta) { start = iStart; delta = iDelta; } } if (err2 < segmentationErrorThreshold) { smoothingGroups.push_back(cutout.vertices.size()); } cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0)); // Now complete the loop while ((size -= delta) > 0) { start = (start + delta) % traceSize; cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0)); if (size == 1) { delta = 1; break; } bool sg = true; for (delta = size - 1; delta > 1; --delta) { auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)start, (int32_t)delta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder); if (fit.first < segmentationErrorThreshold) { if (fit.second > segmentationErrorThreshold) { sg = false; } break; } } if (sg) { smoothingGroups.push_back(cutout.vertices.size()); } } const float snapThresh2 = square(snapThreshold); // Use the snapThreshold to clean up while ((size = cutout.vertices.size()) >= 4) { bool reduced = false; for (uint32_t i = 0; i < size; ++i) { const uint32_t i1 = (i + 1) % size; const uint32_t i2 = (i + 2) % size; const uint32_t i3 = (i + 3) % size; nvidia::NvVec3& v0 = cutout.vertices[i]; nvidia::NvVec3& v1 = cutout.vertices[i1]; nvidia::NvVec3& v2 = cutout.vertices[i2]; nvidia::NvVec3& v3 = cutout.vertices[i3]; const nvidia::NvVec3 d0 = v1 - v0; const nvidia::NvVec3 d1 = v2 - v1; const nvidia::NvVec3 d2 = v3 - v2; const float den = crossZ(d0, d2); if (den != 0) { const float recipDen = 1.0f / den; const float s0 = crossZ(d1, d2) * recipDen; const float s2 = crossZ(d0, d1) * recipDen; if (s0 >= 0 || s2 >= 0) { if (d0.magnitudeSquared()*s0* s0 <= snapThresh2 && d2.magnitudeSquared()*s2* s2 <= snapThresh2) { v1 += d0 * s0; //uint32_t index = (uint32_t)(&v2 - cutout.vertices.begin()); int dist = std::distance(cutout.vertices.data(), &v2); cutout.vertices.erase(cutout.vertices.begin() + dist); for (auto& idx : smoothingGroups) { if (idx > dist) { idx--; } } reduced = true; break; } } } } if (!reduced) { break; } } for (size_t i = 0; i < smoothingGroups.size(); i++) { if (i > 0 && smoothingGroups[i] == smoothingGroups[i - 1]) { continue; } if (smoothingGroups[i] < static_cast<int>(cutout.vertices.size())) { cutout.smoothingGroups.push_back(cutout.vertices[smoothingGroups[i]]); } } } static void splitTJunctions(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold) { // Set bounds reps std::vector<BoundsRep> bounds; std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ). std::vector<IntPair> overlaps; const float distThreshold2 = threshold * threshold; // Split T-junctions uint32_t edgeCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { edgeCount += cutoutSet.cutoutLoops[i].vertices.size(); } bounds.resize(edgeCount); cutoutMap.resize(edgeCount); edgeCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; const uint32_t cutoutSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutoutSize; ++j) { bounds[edgeCount].aabb.include(cutout.vertices[j]); bounds[edgeCount].aabb.include(cutout.vertices[(j + 1) % cutoutSize]); NVBLAST_ASSERT(!bounds[edgeCount].aabb.isEmpty()); bounds[edgeCount].aabb.fattenFast(threshold); cutoutMap[edgeCount].set((int32_t)i, (int32_t)j); ++edgeCount; } } // Find bounds overlaps if (bounds.size() > 0) { boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0])); } std::vector<NewVertex> newVertices; for (uint32_t overlapIndex = 0; overlapIndex < overlaps.size(); ++overlapIndex) { const IntPair& mapPair = overlaps[overlapIndex]; const CutoutVert& seg0Map = cutoutMap[(uint32_t)mapPair.i0]; const CutoutVert& seg1Map = cutoutMap[(uint32_t)mapPair.i1]; if (seg0Map.cutoutIndex == seg1Map.cutoutIndex) { // Only split based on vertex/segment junctions from different cutouts continue; } NewVertex newVertex; float dist2 = 0; const Nv::Blast::Cutout& cutout0 = cutoutSet.cutoutLoops[(uint32_t)seg0Map.cutoutIndex]; const uint32_t cutoutSize0 = cutout0.vertices.size(); const Nv::Blast::Cutout& cutout1 = cutoutSet.cutoutLoops[(uint32_t)seg1Map.cutoutIndex]; const uint32_t cutoutSize1 = cutout1.vertices.size(); if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout1.vertices[(uint32_t)(seg1Map.vertIndex + 1) % cutoutSize1], 0.25f)) { if (dist2 <= distThreshold2) { newVertex.vertex = seg1Map; newVertices.push_back(newVertex); } } if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout0.vertices[(uint32_t)(seg0Map.vertIndex + 1) % cutoutSize0], 0.25f)) { if (dist2 <= distThreshold2) { newVertex.vertex = seg0Map; newVertices.push_back(newVertex); } } } if (newVertices.size()) { // Sort new vertices qsort(newVertices.data(), newVertices.size(), sizeof(NewVertex), compareNewVertices); // Insert new vertices uint32_t lastCutoutIndex = 0xFFFFFFFF; uint32_t lastVertexIndex = 0xFFFFFFFF; float lastProj = 1.0f; for (uint32_t newVertexIndex = newVertices.size(); newVertexIndex--;) { const NewVertex& newVertex = newVertices[newVertexIndex]; if (newVertex.vertex.cutoutIndex != (int32_t)lastCutoutIndex) { lastCutoutIndex = (uint32_t)newVertex.vertex.cutoutIndex; lastVertexIndex = 0xFFFFFFFF; } if (newVertex.vertex.vertIndex != (int32_t)lastVertexIndex) { lastVertexIndex = (uint32_t)newVertex.vertex.vertIndex; lastProj = 1.0f; } Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[(uint32_t)newVertex.vertex.cutoutIndex]; const float proj = lastProj > 0.0f ? newVertex.edgeProj / lastProj : 0.0f; const nvidia::NvVec3 pos = (1.0f - proj) * cutout.vertices[(uint32_t)newVertex.vertex.vertIndex] + proj * cutout.vertices[(uint32_t)(newVertex.vertex.vertIndex + 1) % cutout.vertices.size()]; cutout.vertices.push_back(nvidia::NvVec3()); for (uint32_t n = cutout.vertices.size(); --n > (uint32_t)newVertex.vertex.vertIndex + 1;) { cutout.vertices[n] = cutout.vertices[n - 1]; } cutout.vertices[(uint32_t)newVertex.vertex.vertIndex + 1] = pos; lastProj = newVertex.edgeProj; } } } static void mergeVertices(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height) { // Set bounds reps uint32_t vertexCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { vertexCount += cutoutSet.cutoutLoops[i].vertices.size(); } std::vector<BoundsRep> bounds; std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ). bounds.resize(vertexCount); cutoutMap.resize(vertexCount); vertexCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; for (uint32_t j = 0; j < cutout.vertices.size(); ++j) { nvidia::NvVec3& vertex = cutout.vertices[j]; nvidia::NvVec3 min(vertex.x - threshold, vertex.y - threshold, 0.0f); nvidia::NvVec3 max(vertex.x + threshold, vertex.y + threshold, 0.0f); bounds[vertexCount].aabb = nvidia::NvBounds3(min, max); cutoutMap[vertexCount].set((int32_t)i, (int32_t)j); ++vertexCount; } } // Find bounds overlaps std::vector<IntPair> overlaps; if (bounds.size() > 0) { boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0])); } uint32_t overlapCount = overlaps.size(); if (overlapCount == 0) { return; } // Sort by first index qsort(overlaps.data(), overlapCount, sizeof(IntPair), IntPair::compare); const float threshold2 = threshold * threshold; std::vector<IntPair> pairs; // Group by first index std::vector<uint32_t> lookup; createIndexStartLookup(lookup, 0, vertexCount, &overlaps.begin()->i0, overlapCount, sizeof(IntPair)); for (uint32_t i = 0; i < vertexCount; ++i) { const uint32_t start = lookup[i]; const uint32_t stop = lookup[i + 1]; if (start == stop) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)overlaps[start].i0]; const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; const bool isOnBorder0 = !cutoutSet.periodic && isOnBorder(vert0, width, height); for (uint32_t j = start; j < stop; ++j) { const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)overlaps[j].i1]; if (cutoutVert0.cutoutIndex == cutoutVert1.cutoutIndex) { // No pairs from the same cutout continue; } const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; const bool isOnBorder1 = !cutoutSet.periodic && isOnBorder(vert1, width, height); if (isOnBorder0 != isOnBorder1) { // No border/non-border pairs continue; } if ((vert0 - vert1).magnitudeSquared() > threshold2) { // Distance outside threshold continue; } // A keeper. Keep a symmetric list IntPair overlap = overlaps[j]; pairs.push_back(overlap); const int32_t i0 = overlap.i0; overlap.i0 = overlap.i1; overlap.i1 = i0; pairs.push_back(overlap); } } if (pairs.size() == 0) { return; } // Sort by first index qsort(pairs.data(), pairs.size(), sizeof(IntPair), IntPair::compare); // For every vertex, only keep closest neighbor from each cutout createIndexStartLookup(lookup, 0, vertexCount, &pairs.begin()->i0, pairs.size(), sizeof(IntPair)); for (uint32_t i = 0; i < vertexCount; ++i) { const uint32_t start = lookup[i]; const uint32_t stop = lookup[i + 1]; if (start == stop) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)pairs[start].i0]; const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; uint32_t groupStart = start; while (groupStart < stop) { uint32_t next = groupStart; const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)pairs[next].i1]; int32_t currentOtherCutoutIndex = cutoutVert1.cutoutIndex; const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)currentOtherCutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; uint32_t keep = groupStart; float minDist2 = (vert0 - vert1).magnitudeSquared(); while (++next < stop) { const CutoutVert& cutoutVertNext = cutoutMap[(uint32_t)pairs[next].i1]; if (currentOtherCutoutIndex != cutoutVertNext.cutoutIndex) { break; } const nvidia::NvVec3& vertNext = cutoutSet.cutoutLoops[(uint32_t)cutoutVertNext.cutoutIndex].vertices[(uint32_t)cutoutVertNext.vertIndex]; const float dist2 = (vert0 - vertNext).magnitudeSquared(); if (dist2 < minDist2) { pairs[keep].set(-1, -1); // Invalidate keep = next; minDist2 = dist2; } else { pairs[next].set(-1, -1); // Invalidate } } groupStart = next; } } // Eliminate invalid pairs (compactify) uint32_t pairCount = 0; for (uint32_t i = 0; i < pairs.size(); ++i) { if (pairs[i].i0 >= 0 && pairs[i].i1 >= 0) { pairs[pairCount++] = pairs[i]; } } pairs.resize(pairCount); // Snap points together std::vector<bool> pinned(vertexCount, false); for (uint32_t i = 0; i < pairCount; ++i) { const uint32_t i0 = (uint32_t)pairs[i].i0; if (pinned[i0]) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[i0]; nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; const uint32_t i1 = (uint32_t)pairs[i].i1; const CutoutVert& cutoutVert1 = cutoutMap[i1]; nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; const nvidia::NvVec3 disp = vert1 - vert0; // Move and pin pinned[i0] = true; if (pinned[i1]) { vert0 = vert1; } else { vert0 += 0.5f * disp; vert1 = vert0; pinned[i1] = true; } } } static void eliminateStraightAngles(Nv::Blast::CutoutSetImpl& cutoutSet) { // Eliminate straight angles for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; uint32_t oldSize; do { oldSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutout.vertices.size();) { // if( isOnBorder( cutout.vertices[j], width, height ) ) // { // Don't eliminate border vertices // ++j; // continue; // } if (perpendicularDistanceSquared(cutout.vertices, j) < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS) { cutout.vertices.erase(cutout.vertices.begin() + j); } else { ++j; } } } while (cutout.vertices.size() != oldSize); } } static void removeTheSamePoints(Nv::Blast::CutoutSetImpl& cutoutSet) { for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; uint32_t oldSize; do { oldSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutout.vertices.size();) { if ((cutout.vertices[(j + cutout.vertices.size() - 1) % cutout.vertices.size()] - cutout.vertices[j]).magnitudeSquared() < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS) { cutout.vertices.erase(cutout.vertices.begin() + j); } else { ++j; } } } while (cutout.vertices.size() != oldSize); } } static void simplifyCutoutSetImpl(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height) { splitTJunctions(cutoutSet, 1.0f); mergeVertices(cutoutSet, threshold, width, height); eliminateStraightAngles(cutoutSet); splitTJunctions(cutoutSet, 1.0f); removeTheSamePoints(cutoutSet); } //static void cleanCutout(Nv::Blast::Cutout& cutout, uint32_t loopIndex, float tolerance) //{ // Nv::Blast::ConvexLoop& loop = cutout.convexLoops[loopIndex]; // const float tolerance2 = tolerance * tolerance; // uint32_t oldSize; // do // { // oldSize = loop.polyVerts.size(); // uint32_t size = oldSize; // for (uint32_t i = 0; i < size; ++i) // { // Nv::Blast::PolyVert& v0 = loop.polyVerts[(i + size - 1) % size]; // Nv::Blast::PolyVert& v1 = loop.polyVerts[i]; // Nv::Blast::PolyVert& v2 = loop.polyVerts[(i + 1) % size]; // if (perpendicularDistanceSquared(cutout.vertices[v0.index], cutout.vertices[v1.index], cutout.vertices[v2.index]) <= tolerance2) // { // loop.polyVerts.erase(loop.polyVerts.begin() + i); // --size; // --i; // } // } // } // while (loop.polyVerts.size() != oldSize); //} //static bool decomposeCutoutIntoConvexLoops(Nv::Blast::Cutout& cutout, float cleanupTolerance = 0.0f) //{ // const uint32_t size = cutout.vertices.size(); // // if (size < 3) // { // return false; // } // // // Initialize to one loop, which may not be convex // cutout.convexLoops.resize(1); // cutout.convexLoops[0].polyVerts.resize(size); // // // See if the winding is ccw: // // // Scale to normalized size to avoid overflows // nvidia::NvBounds3 bounds; // bounds.setEmpty(); // for (uint32_t i = 0; i < size; ++i) // { // bounds.include(cutout.vertices[i]); // } // nvidia::NvVec3 center = bounds.getCenter(); // nvidia::NvVec3 extent = bounds.getExtents(); // if (extent[0] < NV_EPS_F32 || extent[1] < NV_EPS_F32) // { // return false; // } // const nvidia::NvVec3 scale(1.0f / extent[0], 1.0f / extent[1], 0.0f); // // // Find "area" (it will only be correct in sign!) // nvidia::NvVec3 prevV = (cutout.vertices[size - 1] - center).multiply(scale); // float area = 0.0f; // for (uint32_t i = 0; i < size; ++i) // { // const nvidia::NvVec3 v = (cutout.vertices[i] - center).multiply(scale); // area += crossZ(prevV, v); // prevV = v; // } // // if (nvidia::NvAbs(area) < NV_EPS_F32 * NV_EPS_F32) // { // return false; // } // // const bool ccw = area > 0.0f; // // for (uint32_t i = 0; i < size; ++i) // { // Nv::Blast::PolyVert& vert = cutout.convexLoops[0].polyVerts[i]; // vert.index = (uint16_t)(ccw ? i : size - i - 1); // vert.flags = 0; // } // // const float cleanupTolerance2 = square(cleanupTolerance); // // // Find reflex vertices // for (uint32_t i = 0; i < cutout.convexLoops.size();) // { // Nv::Blast::ConvexLoop& loop = cutout.convexLoops[i]; // const uint32_t loopSize = loop.polyVerts.size(); // if (loopSize <= 3) // { // ++i; // continue; // } // uint32_t j = 0; // for (; j < loopSize; ++j) // { // const nvidia::NvVec3& v0 = cutout.vertices[loop.polyVerts[(j + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& v1 = cutout.vertices[loop.polyVerts[j].index]; // const nvidia::NvVec3& v2 = cutout.vertices[loop.polyVerts[(j + 1) % loopSize].index]; // const nvidia::NvVec3 e0 = v1 - v0; // if (crossZ(e0, v2 - v1) < 0.0f) // { // // reflex // break; // } // } // if (j < loopSize) // { // // Find a vertex // float minLen2 = NV_MAX_F32; // float maxMinDist = -NV_MAX_F32; // uint32_t kToUse = 0; // uint32_t mToUse = 2; // bool cleanSliceFound = false; // A transversal is parallel with an edge // for (uint32_t k = 0; k < loopSize; ++k) // { // const nvidia::NvVec3& vkPrev = cutout.vertices[loop.polyVerts[(k + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& vk = cutout.vertices[loop.polyVerts[k].index]; // const nvidia::NvVec3& vkNext = cutout.vertices[loop.polyVerts[(k + 1) % loopSize].index]; // const uint32_t mStop = k ? loopSize : loopSize - 1; // for (uint32_t m = k + 2; m < mStop; ++m) // { // const nvidia::NvVec3& vmPrev = cutout.vertices[loop.polyVerts[(m + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& vm = cutout.vertices[loop.polyVerts[m].index]; // const nvidia::NvVec3& vmNext = cutout.vertices[loop.polyVerts[(m + 1) % loopSize].index]; // const nvidia::NvVec3 newEdge = vm - vk; // if (!directionsXYOrderedCCW(vk - vkPrev, newEdge, vkNext - vk) || // !directionsXYOrderedCCW(vm - vmPrev, -newEdge, vmNext - vm)) // { // continue; // } // const float len2 = newEdge.magnitudeSquared(); // float minDist = NV_MAX_F32; // for (uint32_t l = 0; l < loopSize; ++l) // { // const uint32_t l1 = (l + 1) % loopSize; // if (l == k || l1 == k || l == m || l1 == m) // { // continue; // } // const nvidia::NvVec3& vl = cutout.vertices[loop.polyVerts[l].index]; // const nvidia::NvVec3& vl1 = cutout.vertices[loop.polyVerts[l1].index]; // const float dist = segmentsIntersectXY(vl, vl1 - vl, vk, newEdge); // if (dist < minDist) // { // minDist = dist; // } // } // if (minDist <= 0.0f) // { // if (minDist > maxMinDist) // { // maxMinDist = minDist; // kToUse = k; // mToUse = m; // } // } // else // { // if (perpendicularDistanceSquared(vkPrev, vk, vm) <= cleanupTolerance2 || // perpendicularDistanceSquared(vk, vm, vmNext) <= cleanupTolerance2) // { // if (!cleanSliceFound) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // else // { // if (len2 < minLen2) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // } // cleanSliceFound = true; // } // else if (!cleanSliceFound && len2 < minLen2) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // } // } // } // cutout.convexLoops.push_back(Nv::Blast::ConvexLoop()); // Nv::Blast::ConvexLoop& newLoop = cutout.convexLoops.back(); // Nv::Blast::ConvexLoop& oldLoop = cutout.convexLoops[i]; // newLoop.polyVerts.resize(mToUse - kToUse + 1); // for (uint32_t n = 0; n <= mToUse - kToUse; ++n) // { // newLoop.polyVerts[n] = oldLoop.polyVerts[kToUse + n]; // } // newLoop.polyVerts[mToUse - kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge // oldLoop.polyVerts[kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge // oldLoop.polyVerts.erase(oldLoop.polyVerts.begin() + kToUse + 1, oldLoop.polyVerts.begin() + (mToUse - (kToUse + 1))); // if (cleanupTolerance > 0.0f) // { // cleanCutout(cutout, i, cleanupTolerance); // cleanCutout(cutout, cutout.convexLoops.size() - 1, cleanupTolerance); // } // } // else // { // if (cleanupTolerance > 0.0f) // { // cleanCutout(cutout, i, cleanupTolerance); // } // ++i; // } // } // // return true; //} static void traceRegion(std::vector<POINT2D>& trace, Map2d<uint32_t>& regions, Map2d<uint8_t>& pathCounts, uint32_t regionIndex, const POINT2D& startPoint) { POINT2D t = startPoint; trace.clear(); trace.push_back(t); ++pathCounts(t.x, t.y); // Increment path count // Find initial path direction int32_t dirN; uint32_t previousRegion = 0xFFFFFFFF; for (dirN = 0; dirN < 8; ++dirN) //TODO Should we start from dirN = 0? { const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex && previousRegion == regionIndex) { break; } previousRegion = regions(t1.x, t1.y); } bool done = false; do { for (int32_t i = 1; i < 8; ++i) // Skip direction we just came from { --dirN; const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex) { if (t1.x == trace[0].x && t1.y == trace[0].y) { done = true; break; } trace.push_back(t1); t = t1; ++pathCounts(t.x, t.y); // Increment path count dirN += 4; break; } } } while (!done && dirN >= 0); //NvBlast GWD-399: Try to fix bad corners int32_t sz = (int32_t)trace.size(); if (sz > 4) { struct CornerPixel { int32_t id; POINT2D p; CornerPixel(int32_t id, int32_t x, int32_t y) : id(id), p(x, y) { } }; std::vector <CornerPixel> cp; int32_t xb = 0, yb = 0; //bit buffer stores 1 if value do not changed from preview point and 0 otherwise (5 bits is used) for (int32_t i = -4; i < sz; i++) //fill buffer with 4 elements from the end of trace { //idx, idx - 1, idx - 2, idx - 3 values with correct indexing to trace int32_t idx = (sz + i) % sz, idx_ = (sz + i - 1) % sz, idx__ = (sz + i - 2) % sz, idx___ = (sz + i - 3) % sz; //update buffer xb <<= 1; yb <<= 1; xb += (trace[idx].x - trace[idx_].x) == 0; yb += (trace[idx].y - trace[idx_].y) == 0; //filter buffer for 11100-00111 or 00111-11100 corner patterns if (i >= 0 && ((xb & 0x1F) ^ (yb & 0x1F)) == 0x1B) { if ((xb & 3) == 3) { if (((yb >> 3) & 3) == 3) { cp.push_back(CornerPixel(idx__, trace[idx].x, trace[idx___].y)); } } else if ((yb & 3) == 3) { if (((xb >> 3) & 3) == 3) { cp.push_back(CornerPixel(idx__, trace[idx___].x, trace[idx].y)); } } } } std::sort(cp.begin(), cp.end(), [](const CornerPixel& cp1, const CornerPixel& cp2) -> bool { return cp1.id > cp2.id; }); for (auto it = cp.begin(); it != cp.end(); it++) { trace.insert(trace.begin() + it->id, it->p); ++pathCounts(it->p.x, it->p.y); } } } void Nv::Blast::createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps) { cutoutSet.cutouts.clear(); cutoutSet.cutoutLoops.clear(); cutoutSet.periodic = periodic; cutoutSet.dimensions = nvidia::NvVec2((float)bufferWidth, (float)bufferHeight); if (!periodic) { cutoutSet.dimensions[0] += 1.0f; cutoutSet.dimensions[1] += 1.0f; } if (pixelBuffer == NULL || bufferWidth == 0 || bufferHeight == 0) { return; } const int borderPad = periodic ? 0 : 2; // Padded for borders if not periodic const int originCoord = periodic ? 0 : 1; BitMap map(bufferWidth + borderPad, bufferHeight + borderPad, 0); map.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); bool hasBorder = false; for (uint32_t y = 0; y < bufferHeight; ++y) { for (uint32_t x = 0; x < bufferWidth; ++x) { const uint32_t pix = 5033165 * (uint32_t)pixelBuffer[0] + 9898557 * (uint32_t)pixelBuffer[1] + 1845494 * (uint32_t)pixelBuffer[2]; pixelBuffer += 3; if ((pix >> 28) != 0) { map.set((int32_t)x, (int32_t)y); hasBorder = true; } } } // Add borders if not tiling if (!periodic) { for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x) { map.set(x, -1); map.set(x, (int32_t)bufferHeight); } for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y) { map.set(-1, y); map.set((int32_t)bufferWidth, y); } } // Now search for regions // Create a region map Map2d<uint32_t> regions(bufferWidth + borderPad, bufferHeight + borderPad, 0xFFFFFFFF); // Initially an invalid value regions.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); // Create a path counting map Map2d<uint8_t> pathCounts(bufferWidth + borderPad, bufferHeight + borderPad, 0); pathCounts.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); // Bump path counts on borders if (!periodic) { for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x) { pathCounts(x, -1) = 1; pathCounts(x, (int32_t)bufferHeight) = 1; } for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y) { pathCounts(-1, y) = 1; pathCounts((int32_t)bufferWidth, y) = 1; } } std::vector<POINT2D> stack; std::vector<uint32_t> newCutout; std::vector<POINT2D> traceStarts; std::vector<std::vector<POINT2D>* > traces; std::set<uint64_t> regionBoundary; // Initial fill of region maps and path maps for (int32_t y = 0; y < (int32_t)bufferHeight; ++y) { for (int32_t x = 0; x < (int32_t)bufferWidth; ++x) { if (map.read(x - 1, y) && !map.read(x, y)) { // Found an empty spot next to a filled spot POINT2D t(x - 1, y); const uint32_t regionIndex = traceStarts.size(); newCutout.push_back(traces.size()); traceStarts.push_back(t); // Save off initial point traces.push_back(new std::vector<POINT2D>()); NVBLAST_ASSERT(traces.size() == traceStarts.size()); // This must be the same size as traceStarts //traces.back() = (std::vector<POINT2D>*)NVBLAST_ALLOC(sizeof(std::vector<POINT2D>), NV_DEBUG_EXP("CutoutPoint2DSet")); //new(traces.back()) std::vector<POINT2D>; // Flood fill region map std::set<uint64_t> visited; stack.push_back(POINT2D(x, y)); #define COMPRESS(x, y) (((uint64_t)(x) << 32) + (y)) visited.insert(COMPRESS(x, y)); do { const POINT2D s = stack.back(); stack.pop_back(); map.set(s.x, s.y); regions(s.x, s.y) = regionIndex; POINT2D n; for (int32_t i = 0; i < 4; ++i) { const int32_t i0 = i & 1; const int32_t i1 = (i >> 1) & 1; n.x = s.x + i0 - i1; n.y = s.y + i0 + i1 - 1; if (visited.find(COMPRESS(n.x, n.y)) == visited.end()) { if (!map.read(n.x, n.y)) { stack.push_back(n); visited.insert(COMPRESS(n.x, n.y)); } else { regionBoundary.insert(COMPRESS(n.x, n.y)); } } } } while (stack.size()); // Trace region NVBLAST_ASSERT(map.read(t.x, t.y)); std::vector<POINT2D>* trace = traces.back(); traceRegion(*trace, regions, pathCounts, regionIndex, t); //Find innner traces while(true) { for (auto& point : *trace) { regionBoundary.erase(COMPRESS(point.x, point.y)); } if (trace->size() < 4) { trace->~vector<POINT2D>(); delete trace; traces.pop_back(); traceStarts.pop_back(); } if (!regionBoundary.empty()) { auto it = regionBoundary.begin(); t.x = *it >> 32; t.y = *it & 0xFFFFFFFF; traces.push_back(new std::vector<POINT2D>()); traceStarts.push_back(t); trace = traces.back(); traceRegion(*trace, regions, pathCounts, regionIndex, t); continue; } break; } #undef COMPRESS } } } uint32_t cutoutCount = traces.size(); //find internal traces // Now expand regions until the paths completely overlap if (expandGaps) { bool somePathChanged; int sanityCounter = 1000; bool abort = false; do { somePathChanged = false; for (uint32_t i = 0; i < cutoutCount; ++i) { if (traces[i] == nullptr) { continue; } uint32_t regionIndex = 0; for (uint32_t c : newCutout) { if (i >= c) { regionIndex = c; } else { break; } } bool pathChanged = false; std::vector<POINT2D>& trace = *traces[i]; for (size_t j = 0; j < trace.size(); ++j) { const POINT2D& t = trace[j]; if (pathCounts(t.x, t.y) == 1) { if (regions(t.x, t.y) == 0xFFFFFFFF) { regions(t.x, t.y) = regionIndex; pathChanged = true; } else { trace.erase(trace.begin() + j--); } } } if (pathChanged) { // Recalculate cutout // Decrement pathCounts for (uint32_t j = 0; j < trace.size(); ++j) { const POINT2D& t = trace[j]; --pathCounts(t.x, t.y); } // Erase trace // Calculate new start point POINT2D& t = traceStarts[i]; POINT2D t1 = t; abort = true; for (int32_t dirN = 0; dirN < 8; ++dirN) { t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex) { t = t1; abort = false; break; } } if (abort) { break; } traceRegion(trace, regions, pathCounts, regionIndex, t); somePathChanged = true; } } if (--sanityCounter <= 0) { abort = true; break; } } while (somePathChanged); if (abort) { for (uint32_t i = 0; i < cutoutCount; ++i) { traces[i]->~vector<POINT2D>(); delete traces[i]; } cutoutCount = 0; } } // Create cutouts cutoutSet.cutouts = newCutout; cutoutSet.cutouts.push_back(cutoutCount); cutoutSet.cutoutLoops.resize(cutoutCount); for (uint32_t i = 0; i < cutoutCount; ++i) { createCutout(cutoutSet.cutoutLoops[i], *traces[i], segmentationErrorThreshold, snapThreshold, bufferWidth, bufferHeight, !cutoutSet.periodic); } if (expandGaps) { simplifyCutoutSetImpl(cutoutSet, snapThreshold, bufferWidth, bufferHeight); } // Release traces for (uint32_t i = 0; i < cutoutCount; ++i) { if (traces[i] != nullptr) { traces[i]->~vector<POINT2D>(); delete traces[i]; } } // Decompose each cutout in the set into convex loops //uint32_t cutoutSetSize = 0; //for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) //{ // bool success = decomposeCutoutIntoConvexLoops(cutoutSet.cutoutLoops[i]); // if (success) // { // if (cutoutSetSize != i) // { // cutoutSet.cutouts[cutoutSetSize] = cutoutSet.cutoutLoops[i]; // } // ++cutoutSetSize; // } //} //cutoutSet.cutoutLoops.resize(cutoutSetSize); //Check if single cutout spread to the whole area for non periodic (no need to cutout then) if (!periodic && cutoutSet.cutoutLoops.size() == 1 && (expandGaps || !hasBorder)) { cutoutSet.cutoutLoops.clear(); } } class Matrix22 { public: //! Default constructor Matrix22() {} //! Construct from two base vectors Matrix22(const nvidia::NvVec2& col0, const nvidia::NvVec2& col1) : column0(col0), column1(col1) {} //! Construct from float[4] explicit Matrix22(float values[]): column0(values[0],values[1]), column1(values[2],values[3]) { } //! Copy constructor Matrix22(const Matrix22& other) : column0(other.column0), column1(other.column1) {} //! Assignment operator Matrix22& operator=(const Matrix22& other) { column0 = other.column0; column1 = other.column1; return *this; } //! Set to identity matrix static Matrix22 createIdentity() { return Matrix22(nvidia::NvVec2(1,0), nvidia::NvVec2(0,1)); } //! Set to zero matrix static Matrix22 createZero() { return Matrix22(nvidia::NvVec2(0.0f), nvidia::NvVec2(0.0f)); } //! Construct from diagonal, off-diagonals are zero. static Matrix22 createDiagonal(const nvidia::NvVec2& d) { return Matrix22(nvidia::NvVec2(d.x,0.0f), nvidia::NvVec2(0.0f,d.y)); } //! Get transposed matrix Matrix22 getTranspose() const { const nvidia::NvVec2 v0(column0.x, column1.x); const nvidia::NvVec2 v1(column0.y, column1.y); return Matrix22(v0,v1); } //! Get the real inverse Matrix22 getInverse() const { const float det = getDeterminant(); Matrix22 inverse; if(det != 0) { const float invDet = 1.0f/det; inverse.column0[0] = invDet * column1[1]; inverse.column0[1] = invDet * (-column0[1]); inverse.column1[0] = invDet * (-column1[0]); inverse.column1[1] = invDet * column0[0]; return inverse; } else { return createIdentity(); } } //! Get determinant float getDeterminant() const { return column0[0] * column1[1] - column0[1] * column1[0]; } //! Unary minus Matrix22 operator-() const { return Matrix22(-column0, -column1); } //! Add Matrix22 operator+(const Matrix22& other) const { return Matrix22( column0+other.column0, column1+other.column1); } //! Subtract Matrix22 operator-(const Matrix22& other) const { return Matrix22( column0-other.column0, column1-other.column1); } //! Scalar multiplication Matrix22 operator*(float scalar) const { return Matrix22(column0*scalar, column1*scalar); } //! Matrix vector multiplication (returns 'this->transform(vec)') nvidia::NvVec2 operator*(const nvidia::NvVec2& vec) const { return transform(vec); } //! Matrix multiplication Matrix22 operator*(const Matrix22& other) const { //Rows from this <dot> columns from other //column0 = transform(other.column0) etc return Matrix22(transform(other.column0), transform(other.column1)); } // a <op>= b operators //! Equals-add Matrix22& operator+=(const Matrix22& other) { column0 += other.column0; column1 += other.column1; return *this; } //! Equals-sub Matrix22& operator-=(const Matrix22& other) { column0 -= other.column0; column1 -= other.column1; return *this; } //! Equals scalar multiplication Matrix22& operator*=(float scalar) { column0 *= scalar; column1 *= scalar; return *this; } //! Element access, mathematical way! float operator()(unsigned int row, unsigned int col) const { return (*this)[col][(int)row]; } //! Element access, mathematical way! float& operator()(unsigned int row, unsigned int col) { return (*this)[col][(int)row]; } // Transform etc //! Transform vector by matrix, equal to v' = M*v nvidia::NvVec2 transform(const nvidia::NvVec2& other) const { return column0*other.x + column1*other.y; } nvidia::NvVec2& operator[](unsigned int num) {return (&column0)[num];} const nvidia::NvVec2& operator[](unsigned int num) const {return (&column0)[num];} //Data, see above for format! nvidia::NvVec2 column0, column1; //the two base vectors }; bool calculateUVMapping(const Nv::Blast::Triangle& triangle, nvidia::NvMat33& theResultMapping) { nvidia::NvMat33 rMat; nvidia::NvMat33 uvMat; for (unsigned col = 0; col < 3; ++col) { auto v = (&triangle.a)[col]; rMat[col] = toNvShared(v.p); uvMat[col] = nvidia::NvVec3(v.uv[0].x, v.uv[0].y, 1.0f); } if (uvMat.getDeterminant() == 0.0f) { return false; } theResultMapping = rMat*uvMat.getInverse(); return true; } //static bool calculateUVMapping(ExplicitHierarchicalMesh& theHMesh, const nvidia::NvVec3& theDir, nvidia::NvMat33& theResultMapping) //{ // nvidia::NvVec3 cutoutDir( theDir ); // cutoutDir.normalize( ); // // const float cosineThreshold = nvidia::NvCos(3.141593f / 180); // 1 degree // // ExplicitRenderTriangle* triangleToUse = NULL; // float greatestCosine = -NV_MAX_F32; // float greatestArea = 0.0f; // for normals within the threshold // for ( uint32_t partIndex = 0; partIndex < theHMesh.partCount(); ++partIndex ) // { // ExplicitRenderTriangle* theTriangles = theHMesh.meshTriangles( partIndex ); // uint32_t triangleCount = theHMesh.meshTriangleCount( partIndex ); // for ( uint32_t tIndex = 0; tIndex < triangleCount; ++tIndex ) // { // ExplicitRenderTriangle& theTriangle = theTriangles[tIndex]; // nvidia::NvVec3 theEdge1 = theTriangle.vertices[1].position - theTriangle.vertices[0].position; // nvidia::NvVec3 theEdge2 = theTriangle.vertices[2].position - theTriangle.vertices[0].position; // nvidia::NvVec3 theNormal = theEdge1.cross( theEdge2 ); // float theArea = theNormal.normalize(); // twice the area, but that's ok // // if (theArea == 0.0f) // { // continue; // } // // const float cosine = cutoutDir.dot(theNormal); // // if (cosine < cosineThreshold) // { // if (cosine > greatestCosine && greatestArea == 0.0f) // { // greatestCosine = cosine; // triangleToUse = &theTriangle; // } // } // else // { // if (theArea > greatestArea) // { // greatestArea = theArea; // triangleToUse = &theTriangle; // } // } // } // } // // if (triangleToUse == NULL) // { // return false; // } // // return calculateUVMapping(*triangleToUse, theResultMapping); //} //bool calculateCutoutUVMapping(ExplicitHierarchicalMesh& hMesh, const nvidia::NvVec3& targetDirection, nvidia::NvMat33& theMapping) //{ // return ::calculateUVMapping(hMesh, targetDirection, theMapping); //} //bool calculateCutoutUVMapping(const Nv::Blast::Triangle& targetDirection, nvidia::NvMat33& theMapping) //{ // return ::calculateUVMapping(targetDirection, theMapping); //} const NvcVec3& CutoutSetImpl::getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const { return fromNvShared(cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex]); } const NvcVec2& CutoutSetImpl::getDimensions() const { return fromNvShared(dimensions); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H #define NVBLASTAUTHORINGFCUTOUTIMPL_H #include "NvBlastExtAuthoringCutout.h" #include <vector> #include "NvVec2.h" #include "NvVec3.h" #include "NvMat44.h" namespace Nv { namespace Blast { struct PolyVert { uint16_t index; uint16_t flags; }; struct ConvexLoop { std::vector<PolyVert> polyVerts; }; struct Cutout { std::vector<nvidia::NvVec3> vertices; //std::vector<ConvexLoop> convexLoops; std::vector<nvidia::NvVec3> smoothingGroups; }; struct POINT2D { POINT2D() {} POINT2D(int32_t _x, int32_t _y) : x(_x), y(_y) {} int32_t x; int32_t y; bool operator==(const POINT2D& other) const { return x == other.x && y == other.y; } bool operator<(const POINT2D& other) const { if (x == other.x) return y < other.y; return x < other.x; } }; struct CutoutSetImpl : public CutoutSet { CutoutSetImpl() : periodic(false), dimensions(0.0f) { } uint32_t getCutoutCount() const { return (uint32_t)cutouts.size() - 1; } uint32_t getCutoutVertexCount(uint32_t cutoutIndex, uint32_t loopIndex) const { return (uint32_t)cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices.size(); } uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const { return (uint32_t)cutouts[cutoutIndex + 1] - cutouts[cutoutIndex]; } const NvcVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const; bool isCutoutVertexToggleSmoothingGroup(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const { auto& vRef = cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex]; for (auto& v : cutoutLoops[cutouts[cutoutIndex] + loopIndex].smoothingGroups) { if ((vRef - v).magnitudeSquared() < 1e-5) { return true; } } return false; } bool isPeriodic() const { return periodic; } const NvcVec2& getDimensions() const; //void serialize(nvidia::NvFileBuf& stream) const; //void deserialize(nvidia::NvFileBuf& stream); void release() { delete this; } std::vector<Cutout> cutoutLoops; std::vector<uint32_t> cutouts; bool periodic; nvidia::NvVec2 dimensions; }; void createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCollisionBuilderImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include <NvBlastGlobals.h> #include "NvBlastExtAuthoringCollisionBuilderImpl.h" #include <NvBlastExtApexSharedParts.h> #include <NvBlastExtAuthoringInternalCommon.h> #include <NvBlastExtAuthoringBooleanToolImpl.h> #include <NvBlastExtAuthoringMeshImpl.h> #include <NvBlastExtAuthoringMeshUtils.h> #include <NvBlastNvSharedHelpers.h> #include <VHACD.h> #include <vector> using namespace nvidia; namespace Nv { namespace Blast { #define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr; #define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;} void trimCollisionGeometry(ConvexMeshBuilder& cmb, uint32_t chunksCount, CollisionHull** in, const uint32_t* chunkDepth) { std::vector<std::vector<NvPlane> > chunkMidplanes(chunksCount); std::vector<NvVec3> centers(chunksCount); std::vector<NvBounds3> hullsBounds(chunksCount); for (uint32_t i = 0; i < chunksCount; ++i) { hullsBounds[i].setEmpty(); centers[i] = NvVec3(0, 0, 0); for (uint32_t p = 0; p < in[i]->pointsCount; ++p) { centers[i] += toNvShared(in[i]->points[p]); hullsBounds[i].include(toNvShared(in[i]->points[p])); } centers[i] = hullsBounds[i].getCenter(); } Separation params; for (uint32_t hull = 0; hull < chunksCount; ++hull) { for (uint32_t hull2 = hull + 1; hull2 < chunksCount; ++hull2) { if (chunkDepth[hull] != chunkDepth[hull2]) { continue; } if (importerHullsInProximityApexFree(in[hull]->pointsCount, toNvShared(in[hull]->points), hullsBounds[hull], NvTransform(NvIdentity), NvVec3(1, 1, 1), in[hull2]->pointsCount, toNvShared(in[hull2]->points), hullsBounds[hull2], NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.0, &params) == false) { continue; } NvVec3 c1 = centers[hull]; NvVec3 c2 = centers[hull2]; float d = FLT_MAX; NvVec3 n1; NvVec3 n2; for (uint32_t p = 0; p < in[hull]->pointsCount; ++p) { float ld = (toNvShared(in[hull]->points[p]) - c2).magnitude(); if (ld < d) { n1 = toNvShared(in[hull]->points[p]); d = ld; } } d = FLT_MAX; for (uint32_t p = 0; p < in[hull2]->pointsCount; ++p) { float ld = (toNvShared(in[hull2]->points[p]) - c1).magnitude(); if (ld < d) { n2 = toNvShared(in[hull2]->points[p]); d = ld; } } NvVec3 dir = c2 - c1; NvPlane pl = NvPlane((n1 + n2) * 0.5, dir.getNormalized()); chunkMidplanes[hull].push_back(pl); NvPlane pl2 = NvPlane((n1 + n2) * 0.5, -dir.getNormalized()); chunkMidplanes[hull2].push_back(pl2); } } std::vector<NvVec3> hPoints; for (uint32_t i = 0; i < chunksCount; ++i) { std::vector<Facet> facets; std::vector<Vertex> vertices; std::vector<Edge> edges; for (uint32_t fc = 0; fc < in[i]->polygonDataCount; ++fc) { Facet nFc; nFc.firstEdgeNumber = edges.size(); auto& pd = in[i]->polygonData[fc]; uint32_t n = pd.vertexCount; for (uint32_t ed = 0; ed < n; ++ed) { uint32_t vr1 = in[i]->indices[(ed) + pd.indexBase]; uint32_t vr2 = in[i]->indices[(ed + 1) % n + pd.indexBase]; edges.push_back({vr1, vr2}); } nFc.edgesCount = n; facets.push_back(nFc); } vertices.resize(in[i]->pointsCount); for (uint32_t vr = 0; vr < in[i]->pointsCount; ++vr) { vertices[vr].p = in[i]->points[vr]; } Mesh* hullMesh = new MeshImpl(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size()); BooleanEvaluator evl; //I think the material ID is unused for collision meshes so harcoding MATERIAL_INTERIOR is ok Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(0, 0, 1), 40, 0, kMaterialInteriorId); for (uint32_t p = 0; p < chunkMidplanes[i].size(); ++p) { NvPlane& pl = chunkMidplanes[i][p]; setCuttingBox(pl.pointInPlane(), pl.n.getNormalized(), cuttingMesh, 60, 0); evl.performFastCutting(hullMesh, cuttingMesh, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = evl.createNewMesh(); if (result == nullptr) { break; } delete hullMesh; hullMesh = result; } delete cuttingMesh; if (hullMesh == nullptr) { continue; } hPoints.clear(); hPoints.resize(hullMesh->getVerticesCount()); for (uint32_t v = 0; v < hullMesh->getVerticesCount(); ++v) { hPoints[v] = toNvShared(hullMesh->getVertices()[v].p); } delete hullMesh; if (in[i] != nullptr) { delete in[i]; } in[i] = cmb.buildCollisionGeometry(hPoints.size(), fromNvShared(hPoints.data())); } } int32_t buildMeshConvexDecomposition(ConvexMeshBuilder& cmb, const Triangle* mesh, uint32_t triangleCount, const ConvexDecompositionParams& iparams, CollisionHull**& convexes) { std::vector<float> coords(triangleCount * 9); std::vector<uint32_t> indices(triangleCount * 3); uint32_t indx = 0; uint32_t indxCoord = 0; NvBounds3 chunkBound = NvBounds3::empty(); for (uint32_t i = 0; i < triangleCount; ++i) { for (auto& t : { mesh[i].a.p , mesh[i].b.p , mesh[i].c.p }) { chunkBound.include(toNvShared(t)); coords[indxCoord] = t.x; coords[indxCoord + 1] = t.y; coords[indxCoord + 2] = t.z; indxCoord += 3; } indices[indx] = indx; indices[indx + 1] = indx + 1; indices[indx + 2] = indx + 2; indx += 3; } NvVec3 rsc = chunkBound.getDimensions(); for (uint32_t i = 0; i < coords.size(); i += 3) { coords[i] = (coords[i] - chunkBound.minimum.x) / rsc.x; coords[i + 1] = (coords[i + 1] - chunkBound.minimum.y) / rsc.y; coords[i + 2] = (coords[i + 2] - chunkBound.minimum.z) / rsc.z; } VHACD::IVHACD* decomposer = VHACD::CreateVHACD(); VHACD::IVHACD::Parameters vhacdParam; vhacdParam.m_maxConvexHulls = iparams.maximumNumberOfHulls; vhacdParam.m_resolution = iparams.voxelGridResolution; vhacdParam.m_concavity = iparams.concavity; vhacdParam.m_oclAcceleration = false; //TODO vhacdParam.m_callback vhacdParam.m_minVolumePerCH = 0.003f; // 1.f / (3 * vhacdParam.m_resolution ^ (1 / 3)); decomposer->Compute(coords.data(), triangleCount * 3, indices.data(), triangleCount, vhacdParam); const uint32_t nConvexHulls = decomposer->GetNConvexHulls(); convexes = SAFE_ARRAY_NEW(CollisionHull*, nConvexHulls); for (uint32_t i = 0; i < nConvexHulls; ++i) { VHACD::IVHACD::ConvexHull hl; decomposer->GetConvexHull(i, hl); std::vector<NvVec3> vertices; for (uint32_t v = 0; v < hl.m_nPoints; ++v) { vertices.push_back(NvVec3(hl.m_points[v * 3], hl.m_points[v * 3 + 1], hl.m_points[v * 3 + 2])); vertices.back().x = vertices.back().x * rsc.x + chunkBound.minimum.x; vertices.back().y = vertices.back().y * rsc.y + chunkBound.minimum.y; vertices.back().z = vertices.back().z * rsc.z + chunkBound.minimum.z; } convexes[i] = cmb.buildCollisionGeometry(vertices.size(), fromNvShared(vertices.data())); } //VHACD::~VHACD called from release does nothign and does not call Clean() decomposer->Clean(); decomposer->Release(); return nConvexHulls; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshUtils.h
#include <cinttypes> #include <map> #include <set> #include <vector> #include "NvBlastExtAuthoringTypes.h" namespace nvidia { class NvVec3; }; namespace Nv { namespace Blast { class Mesh; /** Helper functions */ /** Set cutting box at some particular position. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] mesh Cutting box mesh \param[in] size Cutting box size \param[in] id Cutting box ID */ void setCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, Mesh* mesh, float size, int64_t id); /** Create cutting box at some particular position. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] size Cutting box size \param[in] id Cutting box ID */ Mesh* getCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, int64_t id, int32_t interiorMaterialId); /** Create box at some particular position. \param[in] point Cutting face center \param[in] size Cutting box size */ Mesh* getBigBox(const nvidia::NvVec3& point, float size, int32_t interiorMaterialId); /** Create slicing box with noisy cutting surface. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] size Cutting box size \param[in] jaggedPlaneSize Noisy surface size \param[in] resolution Noisy surface resolution \param[in] id Cutting box ID \param[in] amplitude Noise amplitude \param[in] frequency Noise frequency \param[in] octaves Noise octaves \param[in] seed Random generator seed, used for noise generation. */ Mesh* getNoisyCuttingBoxPair(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, float jaggedPlaneSize, nvidia::NvVec3 resolution, int64_t id, float amplitude, float frequency, int32_t octaves, int32_t seed, int32_t interiorMaterialId); /** Inverses normals of cutting box and sets indices. \param[in] mesh Cutting box mesh */ void inverseNormalAndIndices(Mesh* mesh); struct CmpVec { bool operator()(const nvidia::NvVec3& v1, const nvidia::NvVec3& v2) const; }; typedef std::map<nvidia::NvVec3, std::map<uint32_t, uint32_t>, CmpVec> PointMap; struct SharedFace { SharedFace() {} SharedFace(uint32_t inW, uint32_t inH, int64_t inUD, int32_t inMatId) : w(inW), h(inH), f(Facet( 0, 3, inUD, inMatId )) { vertices.reserve((w + 1) * (h + 1)); } uint32_t w, h; Facet f; std::vector<Nv::Blast::Vertex> vertices; std::vector<Nv::Blast::Edge> edges; std::vector<Nv::Blast::Facet> facets; }; struct CmpSharedFace { bool operator()(const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv1, const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv2) const; }; typedef std::map<std::pair<nvidia::NvVec3, nvidia::NvVec3>, SharedFace, CmpSharedFace> SharedFacesMap; struct CutoutConfiguration; void buildCuttingConeFaces(const CutoutConfiguration& conf, const std::vector<std::vector<nvidia::NvVec3>>& points, float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId, SharedFacesMap& sharedFacesMap); /** Create cutting cone at some particular position. \param[in] conf Cutout configuration parameters and data \param[in] meshId Cutout index \param[in] points Array of points for loop \param[in] smoothingGroups Array of point indices at which smoothing group should be toggled \param[in] heightBot Cutting cone bottom height (below z = 0) \param[in] heightTop Cutting cone top height (below z = 0) \param[in] conicityBot Cutting cone bottom points multiplier \param[in] conicityTop Cutting cone top points multiplier \param[in] id Cutting cylinder ID \param[in] seed Seed for RNG \param[in] interiorMaterialId Interior material index \param[in] sharedFacesMap Shared faces for noisy fracture */ Mesh* getCuttingCone(const CutoutConfiguration& conf, const std::vector<nvidia::NvVec3>& points, const std::set<int32_t>& smoothingGroups, float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId, const SharedFacesMap& sharedFacesMap, bool inverseNormals = false); }; };
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdVolume.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include "btConvexHullComputer.h" #include "vhacdVolume.h" #include <algorithm> #include <float.h> #include <math.h> #include <queue> #include <string.h> #ifdef _MSC_VER #pragma warning(disable:4458 4100) #endif namespace VHACD { /********************************************************/ /* AABB-triangle overlap test code */ /* by Tomas Akenine-Meuller */ /* Function: int32_t triBoxOverlap(float boxcenter[3], */ /* float boxhalfsize[3],float triverts[3][3]); */ /* History: */ /* 2001-03-05: released the code in its first version */ /* 2001-06-18: changed the order of the tests, faster */ /* */ /* Acknowledgement: Many thanks to Pierre Terdiman for */ /* suggestions and discussions on how to optimize code. */ /* Thanks to David Hunt for finding a ">="-bug! */ /********************************************************/ #define X 0 #define Y 1 #define Z 2 #define FINDMINMAX(x0, x1, x2, min, max) \ min = max = x0; \ if (x1 < min) \ min = x1; \ if (x1 > max) \ max = x1; \ if (x2 < min) \ min = x2; \ if (x2 > max) \ max = x2; #define AXISTEST_X01(a, b, fa, fb) \ p0 = a * v0[Y] - b * v0[Z]; \ p2 = a * v2[Y] - b * v2[Z]; \ if (p0 < p2) { \ min = p0; \ max = p2; \ } \ else { \ min = p2; \ max = p0; \ } \ rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_X2(a, b, fa, fb) \ p0 = a * v0[Y] - b * v0[Z]; \ p1 = a * v1[Y] - b * v1[Z]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Y02(a, b, fa, fb) \ p0 = -a * v0[X] + b * v0[Z]; \ p2 = -a * v2[X] + b * v2[Z]; \ if (p0 < p2) { \ min = p0; \ max = p2; \ } \ else { \ min = p2; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Y1(a, b, fa, fb) \ p0 = -a * v0[X] + b * v0[Z]; \ p1 = -a * v1[X] + b * v1[Z]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Z12(a, b, fa, fb) \ p1 = a * v1[X] - b * v1[Y]; \ p2 = a * v2[X] - b * v2[Y]; \ if (p2 < p1) { \ min = p2; \ max = p1; \ } \ else { \ min = p1; \ max = p2; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Z0(a, b, fa, fb) \ p0 = a * v0[X] - b * v0[Y]; \ p1 = a * v1[X] - b * v1[Y]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \ if (min > rad || max < -rad) \ return 0; int32_t PlaneBoxOverlap(const Vec3<double>& normal, const Vec3<double>& vert, const Vec3<double>& maxbox) { int32_t q; Vec3<double> vmin, vmax; double v; for (q = X; q <= Z; q++) { v = vert[q]; if (normal[q] > 0.0) { vmin[q] = -maxbox[q] - v; vmax[q] = maxbox[q] - v; } else { vmin[q] = maxbox[q] - v; vmax[q] = -maxbox[q] - v; } } if (normal * vmin > 0.0) return 0; if (normal * vmax >= 0.0) return 1; return 0; } int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0, const Vec3<double>& triver1, const Vec3<double>& triver2) { /* use separating axis theorem to test overlap between triangle and box */ /* need to test for overlap in these directions: */ /* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */ /* we do not even need to test these) */ /* 2) normal of the triangle */ /* 3) crossproduct(edge from tri, {x,y,z}-directin) */ /* this gives 3x3=9 more tests */ Vec3<double> v0, v1, v2; double min, max, p0, p1, p2, rad, fex, fey, fez; // -NJMP- "d" local variable removed Vec3<double> normal, e0, e1, e2; /* This is the fastest branch on Sun */ /* move everything so that the boxcenter is in (0,0,0) */ v0 = triver0 - boxcenter; v1 = triver1 - boxcenter; v2 = triver2 - boxcenter; /* compute triangle edges */ e0 = v1 - v0; /* tri edge 0 */ e1 = v2 - v1; /* tri edge 1 */ e2 = v0 - v2; /* tri edge 2 */ /* Bullet 3: */ /* test the 9 tests first (this was faster) */ fex = fabs(e0[X]); fey = fabs(e0[Y]); fez = fabs(e0[Z]); AXISTEST_X01(e0[Z], e0[Y], fez, fey); AXISTEST_Y02(e0[Z], e0[X], fez, fex); AXISTEST_Z12(e0[Y], e0[X], fey, fex); fex = fabs(e1[X]); fey = fabs(e1[Y]); fez = fabs(e1[Z]); AXISTEST_X01(e1[Z], e1[Y], fez, fey); AXISTEST_Y02(e1[Z], e1[X], fez, fex); AXISTEST_Z0(e1[Y], e1[X], fey, fex); fex = fabs(e2[X]); fey = fabs(e2[Y]); fez = fabs(e2[Z]); AXISTEST_X2(e2[Z], e2[Y], fez, fey); AXISTEST_Y1(e2[Z], e2[X], fez, fex); AXISTEST_Z12(e2[Y], e2[X], fey, fex); /* Bullet 1: */ /* first test overlap in the {x,y,z}-directions */ /* find min, max of the triangle each direction, and test for overlap in */ /* that direction -- this is equivalent to testing a minimal AABB around */ /* the triangle against the AABB */ /* test in X-direction */ FINDMINMAX(v0[X], v1[X], v2[X], min, max); if (min > boxhalfsize[X] || max < -boxhalfsize[X]) return 0; /* test in Y-direction */ FINDMINMAX(v0[Y], v1[Y], v2[Y], min, max); if (min > boxhalfsize[Y] || max < -boxhalfsize[Y]) return 0; /* test in Z-direction */ FINDMINMAX(v0[Z], v1[Z], v2[Z], min, max); if (min > boxhalfsize[Z] || max < -boxhalfsize[Z]) return 0; /* Bullet 2: */ /* test if the box intersects the plane of the triangle */ /* compute plane equation of triangle: normal*x+d=0 */ normal = e0 ^ e1; if (!PlaneBoxOverlap(normal, v0, boxhalfsize)) return 0; return 1; /* box and triangle overlaps */ } // Slightly modified version of Stan Melax's code for 3x3 matrix diagonalization (Thanks Stan!) // source: http://www.melax.com/diag.html?attredirects=0 void Diagonalize(const double (&A)[3][3], double (&Q)[3][3], double (&D)[3][3]) { // A must be a symmetric matrix. // returns Q and D such that // Diagonal matrix D = QT * A * Q; and A = Q*D*QT const int32_t maxsteps = 24; // certainly wont need that many. int32_t k0, k1, k2; double o[3], m[3]; double q[4] = { 0.0, 0.0, 0.0, 1.0 }; double jr[4]; double sqw, sqx, sqy, sqz; double tmp1, tmp2, mq; double AQ[3][3]; double thet, sgn, t, c; for (int32_t i = 0; i < maxsteps; ++i) { // quat to matrix sqx = q[0] * q[0]; sqy = q[1] * q[1]; sqz = q[2] * q[2]; sqw = q[3] * q[3]; Q[0][0] = (sqx - sqy - sqz + sqw); Q[1][1] = (-sqx + sqy - sqz + sqw); Q[2][2] = (-sqx - sqy + sqz + sqw); tmp1 = q[0] * q[1]; tmp2 = q[2] * q[3]; Q[1][0] = 2.0 * (tmp1 + tmp2); Q[0][1] = 2.0 * (tmp1 - tmp2); tmp1 = q[0] * q[2]; tmp2 = q[1] * q[3]; Q[2][0] = 2.0 * (tmp1 - tmp2); Q[0][2] = 2.0 * (tmp1 + tmp2); tmp1 = q[1] * q[2]; tmp2 = q[0] * q[3]; Q[2][1] = 2.0 * (tmp1 + tmp2); Q[1][2] = 2.0 * (tmp1 - tmp2); // AQ = A * Q AQ[0][0] = Q[0][0] * A[0][0] + Q[1][0] * A[0][1] + Q[2][0] * A[0][2]; AQ[0][1] = Q[0][1] * A[0][0] + Q[1][1] * A[0][1] + Q[2][1] * A[0][2]; AQ[0][2] = Q[0][2] * A[0][0] + Q[1][2] * A[0][1] + Q[2][2] * A[0][2]; AQ[1][0] = Q[0][0] * A[0][1] + Q[1][0] * A[1][1] + Q[2][0] * A[1][2]; AQ[1][1] = Q[0][1] * A[0][1] + Q[1][1] * A[1][1] + Q[2][1] * A[1][2]; AQ[1][2] = Q[0][2] * A[0][1] + Q[1][2] * A[1][1] + Q[2][2] * A[1][2]; AQ[2][0] = Q[0][0] * A[0][2] + Q[1][0] * A[1][2] + Q[2][0] * A[2][2]; AQ[2][1] = Q[0][1] * A[0][2] + Q[1][1] * A[1][2] + Q[2][1] * A[2][2]; AQ[2][2] = Q[0][2] * A[0][2] + Q[1][2] * A[1][2] + Q[2][2] * A[2][2]; // D = Qt * AQ D[0][0] = AQ[0][0] * Q[0][0] + AQ[1][0] * Q[1][0] + AQ[2][0] * Q[2][0]; D[0][1] = AQ[0][0] * Q[0][1] + AQ[1][0] * Q[1][1] + AQ[2][0] * Q[2][1]; D[0][2] = AQ[0][0] * Q[0][2] + AQ[1][0] * Q[1][2] + AQ[2][0] * Q[2][2]; D[1][0] = AQ[0][1] * Q[0][0] + AQ[1][1] * Q[1][0] + AQ[2][1] * Q[2][0]; D[1][1] = AQ[0][1] * Q[0][1] + AQ[1][1] * Q[1][1] + AQ[2][1] * Q[2][1]; D[1][2] = AQ[0][1] * Q[0][2] + AQ[1][1] * Q[1][2] + AQ[2][1] * Q[2][2]; D[2][0] = AQ[0][2] * Q[0][0] + AQ[1][2] * Q[1][0] + AQ[2][2] * Q[2][0]; D[2][1] = AQ[0][2] * Q[0][1] + AQ[1][2] * Q[1][1] + AQ[2][2] * Q[2][1]; D[2][2] = AQ[0][2] * Q[0][2] + AQ[1][2] * Q[1][2] + AQ[2][2] * Q[2][2]; o[0] = D[1][2]; o[1] = D[0][2]; o[2] = D[0][1]; m[0] = fabs(o[0]); m[1] = fabs(o[1]); m[2] = fabs(o[2]); k0 = (m[0] > m[1] && m[0] > m[2]) ? 0 : (m[1] > m[2]) ? 1 : 2; // index of largest element of offdiag k1 = (k0 + 1) % 3; k2 = (k0 + 2) % 3; if (o[k0] == 0.0) { break; // diagonal already } thet = (D[k2][k2] - D[k1][k1]) / (2.0 * o[k0]); sgn = (thet > 0.0) ? 1.0 : -1.0; thet *= sgn; // make it positive t = sgn / (thet + ((thet < 1.E6) ? sqrt(thet * thet + 1.0) : thet)); // sign(T)/(|T|+sqrt(T^2+1)) c = 1.0 / sqrt(t * t + 1.0); // c= 1/(t^2+1) , t=s/c if (c == 1.0) { break; // no room for improvement - reached machine precision. } jr[0] = jr[1] = jr[2] = jr[3] = 0.0; jr[k0] = sgn * sqrt((1.0 - c) / 2.0); // using 1/2 angle identity sin(a/2) = sqrt((1-cos(a))/2) jr[k0] *= -1.0; // since our quat-to-matrix convention was for v*M instead of M*v jr[3] = sqrt(1.0 - jr[k0] * jr[k0]); if (jr[3] == 1.0) { break; // reached limits of floating point precision } q[0] = (q[3] * jr[0] + q[0] * jr[3] + q[1] * jr[2] - q[2] * jr[1]); q[1] = (q[3] * jr[1] - q[0] * jr[2] + q[1] * jr[3] + q[2] * jr[0]); q[2] = (q[3] * jr[2] + q[0] * jr[1] - q[1] * jr[0] + q[2] * jr[3]); q[3] = (q[3] * jr[3] - q[0] * jr[0] - q[1] * jr[1] - q[2] * jr[2]); mq = sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]); q[0] /= mq; q[1] /= mq; q[2] /= mq; q[3] /= mq; } } const double TetrahedronSet::EPS = 0.0000000000001; VoxelSet::VoxelSet() { m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_minBBVoxels[0] = m_minBBVoxels[1] = m_minBBVoxels[2] = 0; m_maxBBVoxels[0] = m_maxBBVoxels[1] = m_maxBBVoxels[2] = 1; m_minBBPts[0] = m_minBBPts[1] = m_minBBPts[2] = 0; m_maxBBPts[0] = m_maxBBPts[1] = m_maxBBPts[2] = 1; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0; m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0; m_scale = 1.0; m_unitVolume = 1.0; m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; memset(m_Q, 0, sizeof(double) * 9); memset(m_D, 0, sizeof(double) * 9); } VoxelSet::~VoxelSet(void) { } void VoxelSet::ComputeBB() { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { m_minBBVoxels[h] = m_voxels[0].m_coord[h]; m_maxBBVoxels[h] = m_voxels[0].m_coord[h]; } Vec3<double> bary(0.0); for (size_t p = 0; p < nVoxels; ++p) { for (int32_t h = 0; h < 3; ++h) { bary[h] += m_voxels[p].m_coord[h]; if (m_minBBVoxels[h] > m_voxels[p].m_coord[h]) m_minBBVoxels[h] = m_voxels[p].m_coord[h]; if (m_maxBBVoxels[h] < m_voxels[p].m_coord[h]) m_maxBBVoxels[h] = m_voxels[p].m_coord[h]; } } bary /= (double)nVoxels; for (int32_t h = 0; h < 3; ++h) { m_minBBPts[h] = m_minBBVoxels[h] * m_scale + m_minBB[h]; m_maxBBPts[h] = m_maxBBVoxels[h] * m_scale + m_minBB[h]; m_barycenter[h] = (short)(bary[h] + 0.5); } } void VoxelSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const { const size_t CLUSTER_SIZE = 65536; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; SArray<Vec3<double> > cpoints; Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE]; size_t p = 0; size_t s = 0; short i, j, k; while (p < nVoxels) { size_t q = 0; while (q < CLUSTER_SIZE && p < nVoxels) { if (m_voxels[p].m_data == PRIMITIVE_ON_SURFACE) { ++s; if (s == sampling) { s = 0; i = m_voxels[p].m_coord[0]; j = m_voxels[p].m_coord[1]; k = m_voxels[p].m_coord[2]; Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); points[q++] = p0 + m_minBB; points[q++] = p1 + m_minBB; points[q++] = p2 + m_minBB; points[q++] = p3 + m_minBB; points[q++] = p4 + m_minBB; points[q++] = p5 + m_minBB; points[q++] = p6 + m_minBB; points[q++] = p7 + m_minBB; } } ++p; } btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } } delete[] points; points = cpoints.Data(); btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0); meshCH.ResizePoints(0); meshCH.ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { meshCH.AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void VoxelSet::GetPoints(const Voxel& voxel, Vec3<double>* const pts) const { short i = voxel.m_coord[0]; short j = voxel.m_coord[1]; short k = voxel.m_coord[2]; pts[0][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[1][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[2][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[3][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[4][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[5][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[6][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[7][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[0][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[1][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[2][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[3][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[4][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[5][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[6][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[7][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[0][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[1][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[2][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[3][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[4][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[5][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[6][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[7][2] = (k + 0.5) * m_scale + m_minBB[2]; } void VoxelSet::Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; const double d0 = m_scale; double d; Vec3<double> pts[8]; Vec3<double> pt; Voxel voxel; size_t sp = 0; size_t sn = 0; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; // if (d >= 0.0 && d <= d0) positivePts->PushBack(pt); // else if (d < 0.0 && -d <= d0) negativePts->PushBack(pt); if (d >= 0.0) { if (d <= d0) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { positivePts->PushBack(pts[k]); } } else { if (++sp == sampling) { // positivePts->PushBack(pt); GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { positivePts->PushBack(pts[k]); } sp = 0; } } } else { if (-d <= d0) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { negativePts->PushBack(pts[k]); } } else { if (++sn == sampling) { // negativePts->PushBack(pt); GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { negativePts->PushBack(pts[k]); } sn = 0; } } } } } void VoxelSet::ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; double d; Vec3<double> pt; Vec3<double> pts[8]; Voxel voxel; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d >= 0.0) { if (!mesh.IsInside(pt)) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { exteriorPts->PushBack(pts[k]); } } } } } void VoxelSet::ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const { negativeVolume = 0.0; positiveVolume = 0.0; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; double d; Vec3<double> pt; size_t nPositiveVoxels = 0; for (size_t v = 0; v < nVoxels; ++v) { pt = GetPoint(m_voxels[v]); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; nPositiveVoxels += (d >= 0.0); } size_t nNegativeVoxels = nVoxels - nPositiveVoxels; positiveVolume = m_unitVolume * nPositiveVoxels; negativeVolume = m_unitVolume * nNegativeVoxels; } void VoxelSet::SelectOnSurface(PrimitiveSet* const onSurfP) const { VoxelSet* const onSurf = (VoxelSet*)onSurfP; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { onSurf->m_minBB[h] = m_minBB[h]; } onSurf->m_voxels.Resize(0); onSurf->m_scale = m_scale; onSurf->m_unitVolume = m_unitVolume; onSurf->m_numVoxelsOnSurface = 0; onSurf->m_numVoxelsInsideSurface = 0; Voxel voxel; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; if (voxel.m_data == PRIMITIVE_ON_SURFACE) { onSurf->m_voxels.PushBack(voxel); ++onSurf->m_numVoxelsOnSurface; } } } void VoxelSet::Clip(const Plane& plane, PrimitiveSet* const positivePartP, PrimitiveSet* const negativePartP) const { VoxelSet* const positivePart = (VoxelSet*)positivePartP; VoxelSet* const negativePart = (VoxelSet*)negativePartP; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { negativePart->m_minBB[h] = positivePart->m_minBB[h] = m_minBB[h]; } positivePart->m_voxels.Resize(0); negativePart->m_voxels.Resize(0); positivePart->m_voxels.Allocate(nVoxels); negativePart->m_voxels.Allocate(nVoxels); negativePart->m_scale = positivePart->m_scale = m_scale; negativePart->m_unitVolume = positivePart->m_unitVolume = m_unitVolume; negativePart->m_numVoxelsOnSurface = positivePart->m_numVoxelsOnSurface = 0; negativePart->m_numVoxelsInsideSurface = positivePart->m_numVoxelsInsideSurface = 0; double d; Vec3<double> pt; Voxel voxel; const double d0 = m_scale; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d >= 0.0) { if (voxel.m_data == PRIMITIVE_ON_SURFACE || d <= d0) { voxel.m_data = PRIMITIVE_ON_SURFACE; positivePart->m_voxels.PushBack(voxel); ++positivePart->m_numVoxelsOnSurface; } else { positivePart->m_voxels.PushBack(voxel); ++positivePart->m_numVoxelsInsideSurface; } } else { if (voxel.m_data == PRIMITIVE_ON_SURFACE || -d <= d0) { voxel.m_data = PRIMITIVE_ON_SURFACE; negativePart->m_voxels.PushBack(voxel); ++negativePart->m_numVoxelsOnSurface; } else { negativePart->m_voxels.PushBack(voxel); ++negativePart->m_numVoxelsInsideSurface; } } } } void VoxelSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; Voxel voxel; Vec3<double> pts[8]; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; if (voxel.m_data == value) { GetPoints(voxel, pts); int32_t s = (int32_t)mesh.GetNPoints(); for (int32_t k = 0; k < 8; ++k) { mesh.AddPoint(pts[k]); } mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0)); } } } void VoxelSet::ComputePrincipalAxes() { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0; for (size_t v = 0; v < nVoxels; ++v) { Voxel& voxel = m_voxels[v]; m_barycenterPCA[0] += voxel.m_coord[0]; m_barycenterPCA[1] += voxel.m_coord[1]; m_barycenterPCA[2] += voxel.m_coord[2]; } m_barycenterPCA /= (double)nVoxels; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (size_t v = 0; v < nVoxels; ++v) { Voxel& voxel = m_voxels[v]; x = voxel.m_coord[0] - m_barycenter[0]; y = voxel.m_coord[1] - m_barycenter[1]; z = voxel.m_coord[2] - m_barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } covMat[0][0] /= nVoxels; covMat[1][1] /= nVoxels; covMat[2][2] /= nVoxels; covMat[0][1] /= nVoxels; covMat[0][2] /= nVoxels; covMat[1][2] /= nVoxels; covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; Diagonalize(covMat, m_Q, m_D); } Volume::Volume() { m_dim[0] = m_dim[1] = m_dim[2] = 0; m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0; m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; m_numVoxelsOutsideSurface = 0; m_scale = 1.0; m_data = 0; } Volume::~Volume(void) { delete[] m_data; } void Volume::Allocate() { delete[] m_data; size_t size = m_dim[0] * m_dim[1] * m_dim[2]; m_data = new unsigned char[size]; memset(m_data, PRIMITIVE_UNDEFINED, sizeof(unsigned char) * size); } void Volume::Free() { delete[] m_data; m_data = 0; } void Volume::FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1, const size_t j1, const size_t k1) { const short neighbours[6][3] = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }, { -1, 0, 0 }, { 0, -1, 0 }, { 0, 0, -1 } }; std::queue<Vec3<short> > fifo; Vec3<short> current; short a, b, c; for (size_t i = i0; i < i1; ++i) { for (size_t j = j0; j < j1; ++j) { for (size_t k = k0; k < k1; ++k) { if (GetVoxel(i, j, k) == PRIMITIVE_UNDEFINED) { current[0] = (short)i; current[1] = (short)j; current[2] = (short)k; fifo.push(current); GetVoxel(current[0], current[1], current[2]) = PRIMITIVE_OUTSIDE_SURFACE; ++m_numVoxelsOutsideSurface; while (fifo.size() > 0) { current = fifo.front(); fifo.pop(); for (int32_t h = 0; h < 6; ++h) { a = current[0] + neighbours[h][0]; b = current[1] + neighbours[h][1]; c = current[2] + neighbours[h][2]; if (a < 0 || a >= (int32_t)m_dim[0] || b < 0 || b >= (int32_t)m_dim[1] || c < 0 || c >= (int32_t)m_dim[2]) { continue; } unsigned char& v = GetVoxel(a, b, c); if (v == PRIMITIVE_UNDEFINED) { v = PRIMITIVE_OUTSIDE_SURFACE; ++m_numVoxelsOutsideSurface; fifo.push(Vec3<short>(a, b, c)); } } } } } } } } void Volume::FillInsideSurface() { const size_t i0 = m_dim[0]; const size_t j0 = m_dim[1]; const size_t k0 = m_dim[2]; for (size_t i = 0; i < i0; ++i) { for (size_t j = 0; j < j0; ++j) { for (size_t k = 0; k < k0; ++k) { unsigned char& v = GetVoxel(i, j, k); if (v == PRIMITIVE_UNDEFINED) { v = PRIMITIVE_INSIDE_SURFACE; ++m_numVoxelsInsideSurface; } } } } } void Volume::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t i0 = m_dim[0]; const size_t j0 = m_dim[1]; const size_t k0 = m_dim[2]; for (size_t i = 0; i < i0; ++i) { for (size_t j = 0; j < j0; ++j) { for (size_t k = 0; k < k0; ++k) { const unsigned char& voxel = GetVoxel(i, j, k); if (voxel == value) { Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); int32_t s = (int32_t)mesh.GetNPoints(); mesh.AddPoint(p0 + m_minBB); mesh.AddPoint(p1 + m_minBB); mesh.AddPoint(p2 + m_minBB); mesh.AddPoint(p3 + m_minBB); mesh.AddPoint(p4 + m_minBB); mesh.AddPoint(p5 + m_minBB); mesh.AddPoint(p6 + m_minBB); mesh.AddPoint(p7 + m_minBB); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0)); } } } } } void Volume::Convert(VoxelSet& vset) const { for (int32_t h = 0; h < 3; ++h) { vset.m_minBB[h] = m_minBB[h]; } vset.m_voxels.Allocate(m_numVoxelsInsideSurface + m_numVoxelsOnSurface); vset.m_scale = m_scale; vset.m_unitVolume = m_scale * m_scale * m_scale; const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; Voxel voxel; vset.m_numVoxelsOnSurface = 0; vset.m_numVoxelsInsideSurface = 0; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE) { voxel.m_coord[0] = i; voxel.m_coord[1] = j; voxel.m_coord[2] = k; voxel.m_data = PRIMITIVE_INSIDE_SURFACE; vset.m_voxels.PushBack(voxel); ++vset.m_numVoxelsInsideSurface; } else if (value == PRIMITIVE_ON_SURFACE) { voxel.m_coord[0] = i; voxel.m_coord[1] = j; voxel.m_coord[2] = k; voxel.m_data = PRIMITIVE_ON_SURFACE; vset.m_voxels.PushBack(voxel); ++vset.m_numVoxelsOnSurface; } } } } } void Volume::Convert(TetrahedronSet& tset) const { tset.m_tetrahedra.Allocate(5 * (m_numVoxelsInsideSurface + m_numVoxelsOnSurface)); tset.m_scale = m_scale; const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; tset.m_numTetrahedraOnSurface = 0; tset.m_numTetrahedraInsideSurface = 0; Tetrahedron tetrahedron; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { tetrahedron.m_data = value; Vec3<double> p1((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p2((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p3((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p4((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p5((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p6((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p7((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p8((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); tetrahedron.m_pts[0] = p2; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p6; tetrahedron.m_pts[1] = p2; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p3; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p2; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p1; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p2; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p8; tetrahedron.m_pts[1] = p5; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p4; tset.m_tetrahedra.PushBack(tetrahedron); if (value == PRIMITIVE_INSIDE_SURFACE) { tset.m_numTetrahedraInsideSurface += 5; } else { tset.m_numTetrahedraOnSurface += 5; } } } } } } void Volume::AlignToPrincipalAxes(double (&rot)[3][3]) const { const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; Vec3<double> barycenter(0.0); size_t nVoxels = 0; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { barycenter[0] += i; barycenter[1] += j; barycenter[2] += k; ++nVoxels; } } } } barycenter /= (double)nVoxels; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { x = i - barycenter[0]; y = j - barycenter[1]; z = k - barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } } } } covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; double D[3][3]; Diagonalize(covMat, rot, D); } TetrahedronSet::TetrahedronSet() { m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0; m_scale = 1.0; m_numTetrahedraOnSurface = 0; m_numTetrahedraInsideSurface = 0; memset(m_Q, 0, sizeof(double) * 9); memset(m_D, 0, sizeof(double) * 9); } TetrahedronSet::~TetrahedronSet(void) { } void TetrahedronSet::ComputeBB() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; for (int32_t h = 0; h < 3; ++h) { m_minBB[h] = m_maxBB[h] = m_tetrahedra[0].m_pts[0][h]; m_barycenter[h] = 0.0; } for (size_t p = 0; p < nTetrahedra; ++p) { for (int32_t i = 0; i < 4; ++i) { for (int32_t h = 0; h < 3; ++h) { if (m_minBB[h] > m_tetrahedra[p].m_pts[i][h]) m_minBB[h] = m_tetrahedra[p].m_pts[i][h]; if (m_maxBB[h] < m_tetrahedra[p].m_pts[i][h]) m_maxBB[h] = m_tetrahedra[p].m_pts[i][h]; m_barycenter[h] += m_tetrahedra[p].m_pts[i][h]; } } } m_barycenter /= (double)(4 * nTetrahedra); } void TetrahedronSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const { const size_t CLUSTER_SIZE = 65536; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; SArray<Vec3<double> > cpoints; Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE]; size_t p = 0; while (p < nTetrahedra) { size_t q = 0; size_t s = 0; while (q < CLUSTER_SIZE && p < nTetrahedra) { if (m_tetrahedra[p].m_data == PRIMITIVE_ON_SURFACE) { ++s; if (s == sampling) { s = 0; for (int32_t a = 0; a < 4; ++a) { points[q++] = m_tetrahedra[p].m_pts[a]; for (int32_t xx = 0; xx < 3; ++xx) { assert(m_tetrahedra[p].m_pts[a][xx] + EPS >= m_minBB[xx]); assert(m_tetrahedra[p].m_pts[a][xx] <= m_maxBB[xx] + EPS); } } } } ++p; } btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } } delete[] points; points = cpoints.Data(); btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0); meshCH.ResizePoints(0); meshCH.ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { meshCH.AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } inline bool TetrahedronSet::Add(Tetrahedron& tetrahedron) { double v = ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3]); const double EPS = 0.0000000001; if (fabs(v) < EPS) { return false; } else if (v < 0.0) { Vec3<double> tmp = tetrahedron.m_pts[0]; tetrahedron.m_pts[0] = tetrahedron.m_pts[1]; tetrahedron.m_pts[1] = tmp; } for (int32_t a = 0; a < 4; ++a) { for (int32_t xx = 0; xx < 3; ++xx) { assert(tetrahedron.m_pts[a][xx] + EPS >= m_minBB[xx]); assert(tetrahedron.m_pts[a][xx] <= m_maxBB[xx] + EPS); } } m_tetrahedra.PushBack(tetrahedron); return true; } void TetrahedronSet::AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts) { const int32_t tetF[4][3] = { { 0, 1, 2 }, { 2, 1, 3 }, { 3, 1, 0 }, { 3, 0, 2 } }; if (nPts < 4) { return; } else if (nPts == 4) { Tetrahedron tetrahedron; tetrahedron.m_data = PRIMITIVE_ON_SURFACE; tetrahedron.m_pts[0] = pts[0]; tetrahedron.m_pts[1] = pts[1]; tetrahedron.m_pts[2] = pts[2]; tetrahedron.m_pts[3] = pts[3]; if (Add(tetrahedron)) { ++m_numTetrahedraOnSurface; } } else if (nPts == 5) { const int32_t tet[15][4] = { { 0, 1, 2, 3 }, { 1, 2, 3, 4 }, { 0, 2, 3, 4 }, { 0, 1, 3, 4 }, { 0, 1, 2, 4 }, }; const int32_t rem[5] = { 4, 0, 1, 2, 3 }; double maxVol = 0.0; int32_t h0 = -1; Tetrahedron tetrahedron0; tetrahedron0.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 5; ++h) { double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]); if (v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][0]]; tetrahedron0.m_pts[1] = pts[tet[h][1]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = v; } else if (-v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][1]]; tetrahedron0.m_pts[1] = pts[tet[h][0]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = -v; } } if (h0 == -1) return; if (Add(tetrahedron0)) { ++m_numTetrahedraOnSurface; } else { return; } int32_t a = rem[h0]; maxVol = 0.0; int32_t h1 = -1; Tetrahedron tetrahedron1; tetrahedron1.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (v > maxVol) { h1 = h; tetrahedron1.m_pts[0] = pts[a]; tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 == -1 && Add(tetrahedron1)) { ++m_numTetrahedraOnSurface; } } else if (nPts == 6) { const int32_t tet[15][4] = { { 2, 3, 4, 5 }, { 1, 3, 4, 5 }, { 1, 2, 4, 5 }, { 1, 2, 3, 5 }, { 1, 2, 3, 4 }, { 0, 3, 4, 5 }, { 0, 2, 4, 5 }, { 0, 2, 3, 5 }, { 0, 2, 3, 4 }, { 0, 1, 4, 5 }, { 0, 1, 3, 5 }, { 0, 1, 3, 4 }, { 0, 1, 2, 5 }, { 0, 1, 2, 4 }, { 0, 1, 2, 3 } }; const int32_t rem[15][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 0, 4 }, { 0, 5 }, { 1, 2 }, { 1, 3 }, { 1, 4 }, { 1, 5 }, { 2, 3 }, { 2, 4 }, { 2, 5 }, { 3, 4 }, { 3, 5 }, { 4, 5 } }; double maxVol = 0.0; int32_t h0 = -1; Tetrahedron tetrahedron0; tetrahedron0.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 15; ++h) { double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]); if (v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][0]]; tetrahedron0.m_pts[1] = pts[tet[h][1]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = v; } else if (-v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][1]]; tetrahedron0.m_pts[1] = pts[tet[h][0]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = -v; } } if (h0 == -1) return; if (Add(tetrahedron0)) { ++m_numTetrahedraOnSurface; } else { return; } int32_t a0 = rem[h0][0]; int32_t a1 = rem[h0][1]; int32_t h1 = -1; Tetrahedron tetrahedron1; tetrahedron1.m_data = PRIMITIVE_ON_SURFACE; maxVol = 0.0; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (v > maxVol) { h1 = h; tetrahedron1.m_pts[0] = pts[a0]; tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 != -1 && Add(tetrahedron1)) { ++m_numTetrahedraOnSurface; } else { h1 = -1; } maxVol = 0.0; int32_t h2 = -1; Tetrahedron tetrahedron2; tetrahedron2.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (h == h1) continue; if (v > maxVol) { h2 = h; tetrahedron2.m_pts[0] = pts[a1]; tetrahedron2.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron2.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron2.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 != -1) { for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a1], tetrahedron1.m_pts[tetF[h][0]], tetrahedron1.m_pts[tetF[h][1]], tetrahedron1.m_pts[tetF[h][2]]); if (h == 1) continue; if (v > maxVol) { h2 = h; tetrahedron2.m_pts[0] = pts[a1]; tetrahedron2.m_pts[1] = tetrahedron1.m_pts[tetF[h][0]]; tetrahedron2.m_pts[2] = tetrahedron1.m_pts[tetF[h][1]]; tetrahedron2.m_pts[3] = tetrahedron1.m_pts[tetF[h][2]]; maxVol = v; } } } if (h2 != -1 && Add(tetrahedron2)) { ++m_numTetrahedraOnSurface; } } else { assert(0); } } void TetrahedronSet::Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; } void TetrahedronSet::ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const { } void TetrahedronSet::ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; } void TetrahedronSet::SelectOnSurface(PrimitiveSet* const onSurfP) const { TetrahedronSet* const onSurf = (TetrahedronSet*)onSurfP; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; onSurf->m_tetrahedra.Resize(0); onSurf->m_scale = m_scale; onSurf->m_numTetrahedraOnSurface = 0; onSurf->m_numTetrahedraInsideSurface = 0; onSurf->m_barycenter = m_barycenter; onSurf->m_minBB = m_minBB; onSurf->m_maxBB = m_maxBB; for (int32_t i = 0; i < 3; ++i) { for (int32_t j = 0; j < 3; ++j) { onSurf->m_Q[i][j] = m_Q[i][j]; onSurf->m_D[i][j] = m_D[i][j]; } } Tetrahedron tetrahedron; for (size_t v = 0; v < nTetrahedra; ++v) { tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { onSurf->m_tetrahedra.PushBack(tetrahedron); ++onSurf->m_numTetrahedraOnSurface; } } } void TetrahedronSet::Clip(const Plane& plane, PrimitiveSet* const positivePartP, PrimitiveSet* const negativePartP) const { TetrahedronSet* const positivePart = (TetrahedronSet*)positivePartP; TetrahedronSet* const negativePart = (TetrahedronSet*)negativePartP; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; positivePart->m_tetrahedra.Resize(0); negativePart->m_tetrahedra.Resize(0); positivePart->m_tetrahedra.Allocate(nTetrahedra); negativePart->m_tetrahedra.Allocate(nTetrahedra); negativePart->m_scale = positivePart->m_scale = m_scale; negativePart->m_numTetrahedraOnSurface = positivePart->m_numTetrahedraOnSurface = 0; negativePart->m_numTetrahedraInsideSurface = positivePart->m_numTetrahedraInsideSurface = 0; negativePart->m_barycenter = m_barycenter; positivePart->m_barycenter = m_barycenter; negativePart->m_minBB = m_minBB; positivePart->m_minBB = m_minBB; negativePart->m_maxBB = m_maxBB; positivePart->m_maxBB = m_maxBB; for (int32_t i = 0; i < 3; ++i) { for (int32_t j = 0; j < 3; ++j) { negativePart->m_Q[i][j] = positivePart->m_Q[i][j] = m_Q[i][j]; negativePart->m_D[i][j] = positivePart->m_D[i][j] = m_D[i][j]; } } Tetrahedron tetrahedron; double delta, alpha; int32_t sign[4]; int32_t npos, nneg; Vec3<double> posPts[10]; Vec3<double> negPts[10]; Vec3<double> P0, P1, M; const Vec3<double> n(plane.m_a, plane.m_b, plane.m_c); const int32_t edges[6][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 1, 3 }, { 2, 3 } }; double dist; for (size_t v = 0; v < nTetrahedra; ++v) { tetrahedron = m_tetrahedra[v]; npos = nneg = 0; for (int32_t i = 0; i < 4; ++i) { dist = plane.m_a * tetrahedron.m_pts[i][0] + plane.m_b * tetrahedron.m_pts[i][1] + plane.m_c * tetrahedron.m_pts[i][2] + plane.m_d; if (dist > 0.0) { sign[i] = 1; posPts[npos] = tetrahedron.m_pts[i]; ++npos; } else { sign[i] = -1; negPts[nneg] = tetrahedron.m_pts[i]; ++nneg; } } if (npos == 4) { positivePart->Add(tetrahedron); if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { ++positivePart->m_numTetrahedraOnSurface; } else { ++positivePart->m_numTetrahedraInsideSurface; } } else if (nneg == 4) { negativePart->Add(tetrahedron); if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { ++negativePart->m_numTetrahedraOnSurface; } else { ++negativePart->m_numTetrahedraInsideSurface; } } else { int32_t nnew = 0; for (int32_t j = 0; j < 6; ++j) { if (sign[edges[j][0]] * sign[edges[j][1]] == -1) { P0 = tetrahedron.m_pts[edges[j][0]]; P1 = tetrahedron.m_pts[edges[j][1]]; delta = (P0 - P1) * n; alpha = -(plane.m_d + (n * P1)) / delta; assert(alpha >= 0.0 && alpha <= 1.0); M = alpha * P0 + (1 - alpha) * P1; for (int32_t xx = 0; xx < 3; ++xx) { assert(M[xx] + EPS >= m_minBB[xx]); assert(M[xx] <= m_maxBB[xx] + EPS); } posPts[npos++] = M; negPts[nneg++] = M; ++nnew; } } negativePart->AddClippedTetrahedra(negPts, nneg); positivePart->AddClippedTetrahedra(posPts, npos); } } } void TetrahedronSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == value) { int32_t s = (int32_t)mesh.GetNPoints(); mesh.AddPoint(tetrahedron.m_pts[0]); mesh.AddPoint(tetrahedron.m_pts[1]); mesh.AddPoint(tetrahedron.m_pts[2]); mesh.AddPoint(tetrahedron.m_pts[3]); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 2, s + 1, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 1, s + 0)); mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 0, s + 2)); } } } const double TetrahedronSet::ComputeVolume() const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return 0.0; double volume = 0.0; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3])); } return volume / 6.0; } const double TetrahedronSet::ComputeMaxVolumeError() const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return 0.0; double volume = 0.0; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3])); } } return volume / 6.0; } void TetrahedronSet::RevertAlignToPrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[0][1] * y + m_Q[0][2] * z + m_barycenter[0]; tetrahedron.m_pts[i][1] = m_Q[1][0] * x + m_Q[1][1] * y + m_Q[1][2] * z + m_barycenter[1]; tetrahedron.m_pts[i][2] = m_Q[2][0] * x + m_Q[2][1] * y + m_Q[2][2] * z + m_barycenter[2]; } } ComputeBB(); } void TetrahedronSet::ComputePrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } } double n = nTetrahedra * 4.0; covMat[0][0] /= n; covMat[1][1] /= n; covMat[2][2] /= n; covMat[0][1] /= n; covMat[0][2] /= n; covMat[1][2] /= n; covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; Diagonalize(covMat, m_Q, m_D); } void TetrahedronSet::AlignToPrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[1][0] * y + m_Q[2][0] * z + m_barycenter[0]; tetrahedron.m_pts[i][1] = m_Q[0][1] * x + m_Q[1][1] * y + m_Q[2][1] * z + m_barycenter[1]; tetrahedron.m_pts[i][2] = m_Q[0][2] * x + m_Q[1][2] * y + m_Q[2][2] * z + m_barycenter[2]; } } ComputeBB(); } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include <algorithm> #include <fstream> #include <iomanip> #include <limits> #include <sstream> #if _OPENMP #include <omp.h> #endif // _OPENMP #include "../public/VHACD.h" #include "btConvexHullComputer.h" #include "vhacdICHull.h" #include "vhacdMesh.h" #include "vhacdSArray.h" #include "vhacdTimer.h" #include "vhacdVHACD.h" #include "vhacdVector.h" #include "vhacdVolume.h" #include "FloatMath.h" // Internal debugging feature only #define DEBUG_VISUALIZE_CONSTRAINTS 0 #if DEBUG_VISUALIZE_CONSTRAINTS #include "NvRenderDebug.h" extern RENDER_DEBUG::RenderDebug *gRenderDebug; #pragma warning(disable:4702) #endif #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define ABS(a) (((a) < 0) ? -(a) : (a)) #define ZSGN(a) (((a) < 0) ? -1 : (a) > 0 ? 1 : 0) #define MAX_DOUBLE (1.79769e+308) #ifdef _MSC_VER #pragma warning(disable:4267 4100 4244 4456) #endif #ifdef USE_SSE #include <immintrin.h> const int32_t SIMD_WIDTH = 4; inline int32_t FindMinimumElement(const float* const d, float* const _, const int32_t n) { // Min within vectors __m128 min_i = _mm_set1_ps(-1.0f); __m128 min_v = _mm_set1_ps(std::numeric_limits<float>::max()); for (int32_t i = 0; i <= n - SIMD_WIDTH; i += SIMD_WIDTH) { const __m128 data = _mm_load_ps(&d[i]); const __m128 pred = _mm_cmplt_ps(data, min_v); min_i = _mm_blendv_ps(min_i, _mm_set1_ps(i), pred); min_v = _mm_min_ps(data, min_v); } /* Min within vector */ const __m128 min1 = _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(1, 0, 3, 2)); const __m128 min2 = _mm_min_ps(min_v, min1); const __m128 min3 = _mm_shuffle_ps(min2, min2, _MM_SHUFFLE(0, 1, 0, 1)); const __m128 min4 = _mm_min_ps(min2, min3); float min_d = _mm_cvtss_f32(min4); // Min index const int32_t min_idx = __builtin_ctz(_mm_movemask_ps(_mm_cmpeq_ps(min_v, min4))); int32_t ret = min_i[min_idx] + min_idx; // Trailing elements for (int32_t i = (n & ~(SIMD_WIDTH - 1)); i < n; ++i) { if (d[i] < min_d) { min_d = d[i]; ret = i; } } *m = min_d; return ret; } inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end) { // Leading elements int32_t min_i = -1; float min_d = std::numeric_limits<float>::max(); const int32_t aligned = (begin & ~(SIMD_WIDTH - 1)) + ((begin & (SIMD_WIDTH - 1)) ? SIMD_WIDTH : 0); for (int32_t i = begin; i < std::min(end, aligned); ++i) { if (d[i] < min_d) { min_d = d[i]; min_i = i; } } // Middle and trailing elements float r_m = std::numeric_limits<float>::max(); const int32_t n = end - aligned; const int32_t r_i = (n > 0) ? FindMinimumElement(&d[aligned], &r_m, n) : 0; // Pick the lowest if (r_m < min_d) { *m = r_m; return r_i + aligned; } else { *m = min_d; return min_i; } } #else inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end) { int32_t idx = -1; float min = (std::numeric_limits<float>::max)(); for (size_t i = begin; i < size_t(end); ++i) { if (d[i] < min) { idx = i; min = d[i]; } } *m = min; return idx; } #endif //#define OCL_SOURCE_FROM_FILE #ifndef OCL_SOURCE_FROM_FILE const char* oclProgramSource = "\ __kernel void ComputePartialVolumes(__global short4 * voxels, \ const int32_t numVoxels, \ const float4 plane, \ const float4 minBB, \ const float4 scale, \ __local uint4 * localPartialVolumes, \ __global uint4 * partialVolumes) \ { \ int32_t localId = get_local_id(0); \ int32_t groupSize = get_local_size(0); \ int32_t i0 = get_global_id(0) << 2; \ float4 voxel; \ uint4 v; \ voxel = convert_float4(voxels[i0]); \ v.s0 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 < numVoxels);\ voxel = convert_float4(voxels[i0 + 1]); \ v.s1 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 1 < numVoxels);\ voxel = convert_float4(voxels[i0 + 2]); \ v.s2 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 2 < numVoxels);\ voxel = convert_float4(voxels[i0 + 3]); \ v.s3 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 3 < numVoxels);\ localPartialVolumes[localId] = v; \ barrier(CLK_LOCAL_MEM_FENCE); \ for (int32_t i = groupSize >> 1; i > 0; i >>= 1) \ { \ if (localId < i) \ { \ localPartialVolumes[localId] += localPartialVolumes[localId + i]; \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ } \ if (localId == 0) \ { \ partialVolumes[get_group_id(0)] = localPartialVolumes[0]; \ } \ } \ __kernel void ComputePartialSums(__global uint4 * data, \ const int32_t dataSize, \ __local uint4 * partialSums) \ { \ int32_t globalId = get_global_id(0); \ int32_t localId = get_local_id(0); \ int32_t groupSize = get_local_size(0); \ int32_t i; \ if (globalId < dataSize) \ { \ partialSums[localId] = data[globalId]; \ } \ else \ { \ partialSums[localId] = (0, 0, 0, 0); \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ for (i = groupSize >> 1; i > 0; i >>= 1) \ { \ if (localId < i) \ { \ partialSums[localId] += partialSums[localId + i]; \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ } \ if (localId == 0) \ { \ data[get_group_id(0)] = partialSums[0]; \ } \ }"; #endif //OCL_SOURCE_FROM_FILE namespace VHACD { IVHACD* CreateVHACD(void) { return new VHACD(); } bool VHACD::OCLInit(void* const oclDevice, IUserLogger* const logger) { #ifdef CL_VERSION_1_1 m_oclDevice = (cl_device_id*)oclDevice; cl_int error; m_oclContext = clCreateContext(NULL, 1, m_oclDevice, NULL, NULL, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create context\n"); } return false; } #ifdef OCL_SOURCE_FROM_FILE std::string cl_files = OPENCL_CL_FILES; // read kernal from file #ifdef _WIN32 std::replace(cl_files.begin(), cl_files.end(), '/', '\\'); #endif // _WIN32 FILE* program_handle = fopen(cl_files.c_str(), "rb"); fseek(program_handle, 0, SEEK_END); size_t program_size = ftell(program_handle); rewind(program_handle); char* program_buffer = new char[program_size + 1]; program_buffer[program_size] = '\0'; fread(program_buffer, sizeof(char), program_size, program_handle); fclose(program_handle); // create program m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&program_buffer, &program_size, &error); delete[] program_buffer; #else size_t program_size = strlen(oclProgramSource); m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&oclProgramSource, &program_size, &error); #endif if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create program\n"); } return false; } /* Build program */ error = clBuildProgram(m_oclProgram, 1, m_oclDevice, "-cl-denorms-are-zero", NULL, NULL); if (error != CL_SUCCESS) { size_t log_size; /* Find Size of log and print to std output */ clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size); char* program_log = new char[log_size + 2]; program_log[log_size] = '\n'; program_log[log_size + 1] = '\0'; clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL); if (logger) { logger->Log("Couldn't build program\n"); logger->Log(program_log); } delete[] program_log; return false; } delete[] m_oclQueue; delete[] m_oclKernelComputePartialVolumes; delete[] m_oclKernelComputeSum; m_oclQueue = new cl_command_queue[m_ompNumProcessors]; m_oclKernelComputePartialVolumes = new cl_kernel[m_ompNumProcessors]; m_oclKernelComputeSum = new cl_kernel[m_ompNumProcessors]; const char nameKernelComputePartialVolumes[] = "ComputePartialVolumes"; const char nameKernelComputeSum[] = "ComputePartialSums"; for (int32_t k = 0; k < m_ompNumProcessors; ++k) { m_oclKernelComputePartialVolumes[k] = clCreateKernel(m_oclProgram, nameKernelComputePartialVolumes, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create kernel\n"); } return false; } m_oclKernelComputeSum[k] = clCreateKernel(m_oclProgram, nameKernelComputeSum, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create kernel\n"); } return false; } } error = clGetKernelWorkGroupInfo(m_oclKernelComputePartialVolumes[0], *m_oclDevice, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &m_oclWorkGroupSize, NULL); size_t workGroupSize = 0; error = clGetKernelWorkGroupInfo(m_oclKernelComputeSum[0], *m_oclDevice, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &workGroupSize, NULL); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't query work group info\n"); } return false; } if (workGroupSize < m_oclWorkGroupSize) { m_oclWorkGroupSize = workGroupSize; } for (int32_t k = 0; k < m_ompNumProcessors; ++k) { m_oclQueue[k] = clCreateCommandQueue(m_oclContext, *m_oclDevice, 0 /*CL_QUEUE_PROFILING_ENABLE*/, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create queue\n"); } return false; } } return true; #else //CL_VERSION_1_1 return false; #endif //CL_VERSION_1_1 } bool VHACD::OCLRelease(IUserLogger* const logger) { #ifdef CL_VERSION_1_1 cl_int error; if (m_oclKernelComputePartialVolumes) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseKernel(m_oclKernelComputePartialVolumes[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release kernal\n"); } return false; } } delete[] m_oclKernelComputePartialVolumes; } if (m_oclKernelComputeSum) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseKernel(m_oclKernelComputeSum[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release kernal\n"); } return false; } } delete[] m_oclKernelComputeSum; } if (m_oclQueue) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseCommandQueue(m_oclQueue[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release queue\n"); } return false; } } delete[] m_oclQueue; } error = clReleaseProgram(m_oclProgram); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release program\n"); } return false; } error = clReleaseContext(m_oclContext); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release context\n"); } return false; } return true; #else //CL_VERSION_1_1 return false; #endif //CL_VERSION_1_1 } void VHACD::ComputePrimitiveSet(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Compute primitive set"; m_operation = "Convert volume to pset"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); if (params.m_mode == 0) { VoxelSet* vset = new VoxelSet; m_volume->Convert(*vset); m_pset = vset; } else { TetrahedronSet* tset = new TetrahedronSet; m_volume->Convert(*tset); m_pset = tset; } delete m_volume; m_volume = 0; if (params.m_logger) { msg.str(""); msg << "\t # primitives " << m_pset->GetNPrimitives() << std::endl; msg << "\t # inside surface " << m_pset->GetNPrimitivesInsideSurf() << std::endl; msg << "\t # on surface " << m_pset->GetNPrimitivesOnSurf() << std::endl; params.m_logger->Log(msg.str().c_str()); } m_overallProgress = 15.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } bool VHACD::Compute(const double* const points, const uint32_t nPoints, const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params) { return ComputeACD(points, nPoints, triangles, nTriangles, params); } bool VHACD::Compute(const float* const points,const uint32_t nPoints, const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params) { return ComputeACD(points, nPoints, triangles, nTriangles, params); } double ComputePreferredCuttingDirection(const PrimitiveSet* const tset, Vec3<double>& dir) { double ex = tset->GetEigenValue(AXIS_X); double ey = tset->GetEigenValue(AXIS_Y); double ez = tset->GetEigenValue(AXIS_Z); double vx = (ey - ez) * (ey - ez); double vy = (ex - ez) * (ex - ez); double vz = (ex - ey) * (ex - ey); if (vx < vy && vx < vz) { double e = ey * ey + ez * ez; dir[0] = 1.0; dir[1] = 0.0; dir[2] = 0.0; return (e == 0.0) ? 0.0 : 1.0 - vx / e; } else if (vy < vx && vy < vz) { double e = ex * ex + ez * ez; dir[0] = 0.0; dir[1] = 1.0; dir[2] = 0.0; return (e == 0.0) ? 0.0 : 1.0 - vy / e; } else { double e = ex * ex + ey * ey; dir[0] = 0.0; dir[1] = 0.0; dir[2] = 1.0; return (e == 0.0) ? 0.0 : 1.0 - vz / e; } } void ComputeAxesAlignedClippingPlanes(const VoxelSet& vset, const short downsampling, SArray<Plane>& planes) { const Vec3<short> minV = vset.GetMinBBVoxels(); const Vec3<short> maxV = vset.GetMaxBBVoxels(); Vec3<double> pt; Plane plane; const short i0 = minV[0]; const short i1 = maxV[0]; plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; i += downsampling) { pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0)); plane.m_d = -pt[0]; plane.m_index = i; planes.PushBack(plane); } const short j0 = minV[1]; const short j1 = maxV[1]; plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; j += downsampling) { pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0)); plane.m_d = -pt[1]; plane.m_index = j; planes.PushBack(plane); } const short k0 = minV[2]; const short k1 = maxV[2]; plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; k += downsampling) { pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5)); plane.m_d = -pt[2]; plane.m_index = k; planes.PushBack(plane); } } void ComputeAxesAlignedClippingPlanes(const TetrahedronSet& tset, const short downsampling, SArray<Plane>& planes) { const Vec3<double> minV = tset.GetMinBB(); const Vec3<double> maxV = tset.GetMaxBB(); const double scale = tset.GetSacle(); const short i0 = 0; const short j0 = 0; const short k0 = 0; const short i1 = static_cast<short>((maxV[0] - minV[0]) / scale + 0.5); const short j1 = static_cast<short>((maxV[1] - minV[1]) / scale + 0.5); const short k1 = static_cast<short>((maxV[2] - minV[2]) / scale + 0.5); Plane plane; plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; i += downsampling) { double x = minV[0] + scale * i; plane.m_d = -x; plane.m_index = i; planes.PushBack(plane); } plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; j += downsampling) { double y = minV[1] + scale * j; plane.m_d = -y; plane.m_index = j; planes.PushBack(plane); } plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; k += downsampling) { double z = minV[2] + scale * k; plane.m_d = -z; plane.m_index = k; planes.PushBack(plane); } } void RefineAxesAlignedClippingPlanes(const VoxelSet& vset, const Plane& bestPlane, const short downsampling, SArray<Plane>& planes) { const Vec3<short> minV = vset.GetMinBBVoxels(); const Vec3<short> maxV = vset.GetMaxBBVoxels(); Vec3<double> pt; Plane plane; if (bestPlane.m_axis == AXIS_X) { const short i0 = MAX(minV[0], bestPlane.m_index - downsampling); const short i1 = MIN(maxV[0], bestPlane.m_index + downsampling); plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; ++i) { pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0)); plane.m_d = -pt[0]; plane.m_index = i; planes.PushBack(plane); } } else if (bestPlane.m_axis == AXIS_Y) { const short j0 = MAX(minV[1], bestPlane.m_index - downsampling); const short j1 = MIN(maxV[1], bestPlane.m_index + downsampling); plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; ++j) { pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0)); plane.m_d = -pt[1]; plane.m_index = j; planes.PushBack(plane); } } else { const short k0 = MAX(minV[2], bestPlane.m_index - downsampling); const short k1 = MIN(maxV[2], bestPlane.m_index + downsampling); plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; ++k) { pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5)); plane.m_d = -pt[2]; plane.m_index = k; planes.PushBack(plane); } } } void RefineAxesAlignedClippingPlanes(const TetrahedronSet& tset, const Plane& bestPlane, const short downsampling, SArray<Plane>& planes) { const Vec3<double> minV = tset.GetMinBB(); const Vec3<double> maxV = tset.GetMaxBB(); const double scale = tset.GetSacle(); Plane plane; if (bestPlane.m_axis == AXIS_X) { const short i0 = MAX(0, bestPlane.m_index - downsampling); const short i1 = static_cast<short>(MIN((maxV[0] - minV[0]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; ++i) { double x = minV[0] + scale * i; plane.m_d = -x; plane.m_index = i; planes.PushBack(plane); } } else if (bestPlane.m_axis == AXIS_Y) { const short j0 = MAX(0, bestPlane.m_index - downsampling); const short j1 = static_cast<short>(MIN((maxV[1] - minV[1]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; ++j) { double y = minV[1] + scale * j; plane.m_d = -y; plane.m_index = j; planes.PushBack(plane); } } else { const short k0 = MAX(0, bestPlane.m_index - downsampling); const short k1 = static_cast<short>(MIN((maxV[2] - minV[2]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; ++k) { double z = minV[2] + scale * k; plane.m_d = -z; plane.m_index = k; planes.PushBack(plane); } } } inline double ComputeLocalConcavity(const double volume, const double volumeCH) { return fabs(volumeCH - volume) / volumeCH; } inline double ComputeConcavity(const double volume, const double volumeCH, const double volume0) { return fabs(volumeCH - volume) / volume0; } //#define DEBUG_TEMP void VHACD::ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes, const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta, const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane, double& minConcavity, const Parameters& params) { if (GetCancel()) { return; } char msg[256]; size_t nPrimitives = inputPSet->GetNPrimitives(); bool oclAcceleration = (nPrimitives > OCL_MIN_NUM_PRIMITIVES && params.m_oclAcceleration && params.m_mode == 0) ? true : false; int32_t iBest = -1; int32_t nPlanes = static_cast<int32_t>(planes.Size()); bool cancel = false; int32_t done = 0; double minTotal = MAX_DOUBLE; double minBalance = MAX_DOUBLE; double minSymmetry = MAX_DOUBLE; minConcavity = MAX_DOUBLE; SArray<Vec3<double> >* chPts = new SArray<Vec3<double> >[2 * m_ompNumProcessors]; Mesh* chs = new Mesh[2 * m_ompNumProcessors]; PrimitiveSet* onSurfacePSet = inputPSet->Create(); inputPSet->SelectOnSurface(onSurfacePSet); PrimitiveSet** psets = 0; if (!params.m_convexhullApproximation) { psets = new PrimitiveSet*[2 * m_ompNumProcessors]; for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) { psets[i] = inputPSet->Create(); } } #ifdef CL_VERSION_1_1 // allocate OpenCL data structures cl_mem voxels; cl_mem* partialVolumes = 0; size_t globalSize = 0; size_t nWorkGroups = 0; double unitVolume = 0.0; if (oclAcceleration) { VoxelSet* vset = (VoxelSet*)inputPSet; const Vec3<double> minBB = vset->GetMinBB(); const float fMinBB[4] = { (float)minBB[0], (float)minBB[1], (float)minBB[2], 1.0f }; const float fSclae[4] = { (float)vset->GetScale(), (float)vset->GetScale(), (float)vset->GetScale(), 0.0f }; const int32_t nVoxels = (int32_t)nPrimitives; unitVolume = vset->GetUnitVolume(); nWorkGroups = (nPrimitives + 4 * m_oclWorkGroupSize - 1) / (4 * m_oclWorkGroupSize); globalSize = nWorkGroups * m_oclWorkGroupSize; cl_int error; voxels = clCreateBuffer(m_oclContext, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(Voxel) * nPrimitives, vset->GetVoxels(), &error); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't create buffer\n"); } SetCancel(true); } partialVolumes = new cl_mem[m_ompNumProcessors]; for (int32_t i = 0; i < m_ompNumProcessors; ++i) { partialVolumes[i] = clCreateBuffer(m_oclContext, CL_MEM_WRITE_ONLY, sizeof(uint32_t) * 4 * nWorkGroups, NULL, &error); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't create buffer\n"); } SetCancel(true); break; } error = clSetKernelArg(m_oclKernelComputePartialVolumes[i], 0, sizeof(cl_mem), &voxels); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 1, sizeof(uint32_t), &nVoxels); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 3, sizeof(float) * 4, fMinBB); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 4, sizeof(float) * 4, &fSclae); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 5, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 6, sizeof(cl_mem), &(partialVolumes[i])); error |= clSetKernelArg(m_oclKernelComputeSum[i], 0, sizeof(cl_mem), &(partialVolumes[i])); error |= clSetKernelArg(m_oclKernelComputeSum[i], 2, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } } } #else // CL_VERSION_1_1 oclAcceleration = false; #endif // CL_VERSION_1_1 #ifdef DEBUG_TEMP Timer timerComputeCost; timerComputeCost.Tic(); #endif // DEBUG_TEMP #if USE_THREAD == 1 && _OPENMP #pragma omp parallel for #endif for (int32_t x = 0; x < nPlanes; ++x) { int32_t threadID = 0; #if USE_THREAD == 1 && _OPENMP threadID = omp_get_thread_num(); #pragma omp flush(cancel) #endif if (!cancel) { //Update progress if (GetCancel()) { cancel = true; #if USE_THREAD == 1 && _OPENMP #pragma omp flush(cancel) #endif } Plane plane = planes[x]; if (oclAcceleration) { #ifdef CL_VERSION_1_1 const float fPlane[4] = { (float)plane.m_a, (float)plane.m_b, (float)plane.m_c, (float)plane.m_d }; cl_int error = clSetKernelArg(m_oclKernelComputePartialVolumes[threadID], 2, sizeof(float) * 4, fPlane); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputePartialVolumes[threadID], 1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't run kernel \n"); } SetCancel(true); } int32_t nValues = (int32_t)nWorkGroups; while (nValues > 1) { error = clSetKernelArg(m_oclKernelComputeSum[threadID], 1, sizeof(int32_t), &nValues); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } size_t nWorkGroups = (nValues + m_oclWorkGroupSize - 1) / m_oclWorkGroupSize; size_t globalSize = nWorkGroups * m_oclWorkGroupSize; error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputeSum[threadID], 1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't run kernel \n"); } SetCancel(true); } nValues = (int32_t)nWorkGroups; } #endif // CL_VERSION_1_1 } Mesh& leftCH = chs[threadID]; Mesh& rightCH = chs[threadID + m_ompNumProcessors]; rightCH.ResizePoints(0); leftCH.ResizePoints(0); rightCH.ResizeTriangles(0); leftCH.ResizeTriangles(0); // compute convex-hulls #ifdef TEST_APPROX_CH double volumeLeftCH1; double volumeRightCH1; #endif //TEST_APPROX_CH if (params.m_convexhullApproximation) { SArray<Vec3<double> >& leftCHPts = chPts[threadID]; SArray<Vec3<double> >& rightCHPts = chPts[threadID + m_ompNumProcessors]; rightCHPts.Resize(0); leftCHPts.Resize(0); onSurfacePSet->Intersect(plane, &rightCHPts, &leftCHPts, convexhullDownsampling * 32); inputPSet->GetConvexHull().Clip(plane, rightCHPts, leftCHPts); rightCH.ComputeConvexHull((double*)rightCHPts.Data(), rightCHPts.Size()); leftCH.ComputeConvexHull((double*)leftCHPts.Data(), leftCHPts.Size()); #ifdef TEST_APPROX_CH Mesh leftCH1; Mesh rightCH1; VoxelSet right; VoxelSet left; onSurfacePSet->Clip(plane, &right, &left); right.ComputeConvexHull(rightCH1, convexhullDownsampling); left.ComputeConvexHull(leftCH1, convexhullDownsampling); volumeLeftCH1 = leftCH1.ComputeVolume(); volumeRightCH1 = rightCH1.ComputeVolume(); #endif //TEST_APPROX_CH } else { PrimitiveSet* const right = psets[threadID]; PrimitiveSet* const left = psets[threadID + m_ompNumProcessors]; onSurfacePSet->Clip(plane, right, left); right->ComputeConvexHull(rightCH, convexhullDownsampling); left->ComputeConvexHull(leftCH, convexhullDownsampling); } double volumeLeftCH = leftCH.ComputeVolume(); double volumeRightCH = rightCH.ComputeVolume(); // compute clipped volumes double volumeLeft = 0.0; double volumeRight = 0.0; if (oclAcceleration) { #ifdef CL_VERSION_1_1 uint32_t volumes[4]; cl_int error = clEnqueueReadBuffer(m_oclQueue[threadID], partialVolumes[threadID], CL_TRUE, 0, sizeof(uint32_t) * 4, volumes, 0, NULL, NULL); size_t nPrimitivesRight = volumes[0] + volumes[1] + volumes[2] + volumes[3]; size_t nPrimitivesLeft = nPrimitives - nPrimitivesRight; volumeRight = nPrimitivesRight * unitVolume; volumeLeft = nPrimitivesLeft * unitVolume; if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't read buffer \n"); } SetCancel(true); } #endif // CL_VERSION_1_1 } else { inputPSet->ComputeClippedVolumes(plane, volumeRight, volumeLeft); } double concavityLeft = ComputeConcavity(volumeLeft, volumeLeftCH, m_volumeCH0); double concavityRight = ComputeConcavity(volumeRight, volumeRightCH, m_volumeCH0); double concavity = (concavityLeft + concavityRight); // compute cost double balance = alpha * fabs(volumeLeft - volumeRight) / m_volumeCH0; double d = w * (preferredCuttingDirection[0] * plane.m_a + preferredCuttingDirection[1] * plane.m_b + preferredCuttingDirection[2] * plane.m_c); double symmetry = beta * d; double total = concavity + balance + symmetry; #if USE_THREAD == 1 && _OPENMP #pragma omp critical #endif { if (total < minTotal || (total == minTotal && x < iBest)) { minConcavity = concavity; minBalance = balance; minSymmetry = symmetry; bestPlane = plane; minTotal = total; iBest = x; } ++done; if (!(done & 127)) // reduce update frequency { double progress = done * (progress1 - progress0) / nPlanes + progress0; Update(m_stageProgress, progress, params); } } } } #ifdef DEBUG_TEMP timerComputeCost.Toc(); printf_s("Cost[%i] = %f\n", nPlanes, timerComputeCost.GetElapsedTime()); #endif // DEBUG_TEMP #ifdef CL_VERSION_1_1 if (oclAcceleration) { clReleaseMemObject(voxels); for (int32_t i = 0; i < m_ompNumProcessors; ++i) { clReleaseMemObject(partialVolumes[i]); } delete[] partialVolumes; } #endif // CL_VERSION_1_1 if (psets) { for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) { delete psets[i]; } delete[] psets; } delete onSurfacePSet; delete[] chPts; delete[] chs; if (params.m_logger) { sprintf(msg, "\n\t\t\t Best %04i T=%2.6f C=%2.6f B=%2.6f S=%2.6f (%1.1f, %1.1f, %1.1f, %3.3f)\n\n", iBest, minTotal, minConcavity, minBalance, minSymmetry, bestPlane.m_a, bestPlane.m_b, bestPlane.m_c, bestPlane.m_d); params.m_logger->Log(msg); } } void VHACD::ComputeACD(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Approximate Convex Decomposition"; m_stageProgress = 0.0; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } SArray<PrimitiveSet*> parts; SArray<PrimitiveSet*> inputParts; SArray<PrimitiveSet*> temp; inputParts.PushBack(m_pset); m_pset = 0; SArray<Plane> planes; SArray<Plane> planesRef; uint32_t sub = 0; bool firstIteration = true; m_volumeCH0 = 1.0; // Compute the decomposition depth based on the number of convex hulls being requested.. uint32_t hullCount = 2; uint32_t depth = 1; while (params.m_maxConvexHulls > hullCount) { depth++; hullCount *= 2; } // We must always increment the decomposition depth one higher than the maximum number of hulls requested. // The reason for this is as follows. // Say, for example, the user requests 32 convex hulls exactly. This would be a decomposition depth of 5. // However, when we do that, we do *not* necessarily get 32 hulls as a result. This is because, during // the recursive descent of the binary tree, one or more of the leaf nodes may have no concavity and // will not be split. So, in this way, even with a decomposition depth of 5, you can produce fewer than // 32 hulls. So, in this case, we would set the decomposition depth to 6 (producing up to as high as 64 convex hulls). // Then, the merge step which combines over-described hulls down to the user requested amount, we will end up // getting exactly 32 convex hulls as a result. // We could just allow the artist to directly control the decomposition depth directly, but this would be a bit // too complex and the preference is simply to let them specify how many hulls they want and derive the solution // from that. depth++; while (sub++ < depth && inputParts.Size() > 0 && !m_cancel) { msg.str(""); msg << "Subdivision level " << sub; m_operation = msg.str(); if (params.m_logger) { msg.str(""); msg << "\t Subdivision level " << sub << std::endl; params.m_logger->Log(msg.str().c_str()); } double maxConcavity = 0.0; const size_t nInputParts = inputParts.Size(); Update(m_stageProgress, 0.0, params); for (size_t p = 0; p < nInputParts && !m_cancel; ++p) { const double progress0 = p * 100.0 / nInputParts; const double progress1 = (p + 0.75) * 100.0 / nInputParts; const double progress2 = (p + 1.00) * 100.0 / nInputParts; Update(m_stageProgress, progress0, params); PrimitiveSet* pset = inputParts[p]; inputParts[p] = 0; double volume = pset->ComputeVolume(); pset->ComputeBB(); pset->ComputePrincipalAxes(); if (params.m_pca) { pset->AlignToPrincipalAxes(); } pset->ComputeConvexHull(pset->GetConvexHull()); double volumeCH = fabs(pset->GetConvexHull().ComputeVolume()); if (firstIteration) { m_volumeCH0 = volumeCH; } double concavity = ComputeConcavity(volume, volumeCH, m_volumeCH0); double error = 1.01 * pset->ComputeMaxVolumeError() / m_volumeCH0; if (firstIteration) { firstIteration = false; } if (params.m_logger) { msg.str(""); msg << "\t -> Part[" << p << "] C = " << concavity << ", E = " << error << ", VS = " << pset->GetNPrimitivesOnSurf() << ", VI = " << pset->GetNPrimitivesInsideSurf() << std::endl; params.m_logger->Log(msg.str().c_str()); } if (concavity > params.m_concavity && concavity > error) { Vec3<double> preferredCuttingDirection; double w = ComputePreferredCuttingDirection(pset, preferredCuttingDirection); planes.Resize(0); if (params.m_mode == 0) { VoxelSet* vset = (VoxelSet*)pset; ComputeAxesAlignedClippingPlanes(*vset, params.m_planeDownsampling, planes); } else { TetrahedronSet* tset = (TetrahedronSet*)pset; ComputeAxesAlignedClippingPlanes(*tset, params.m_planeDownsampling, planes); } if (params.m_logger) { msg.str(""); msg << "\t\t [Regular sampling] Number of clipping planes " << planes.Size() << std::endl; params.m_logger->Log(msg.str().c_str()); } Plane bestPlane; double minConcavity = MAX_DOUBLE; ComputeBestClippingPlane(pset, volume, planes, preferredCuttingDirection, w, concavity * params.m_alpha, concavity * params.m_beta, params.m_convexhullDownsampling, progress0, progress1, bestPlane, minConcavity, params); if (!m_cancel && (params.m_planeDownsampling > 1 || params.m_convexhullDownsampling > 1)) { planesRef.Resize(0); if (params.m_mode == 0) { VoxelSet* vset = (VoxelSet*)pset; RefineAxesAlignedClippingPlanes(*vset, bestPlane, params.m_planeDownsampling, planesRef); } else { TetrahedronSet* tset = (TetrahedronSet*)pset; RefineAxesAlignedClippingPlanes(*tset, bestPlane, params.m_planeDownsampling, planesRef); } if (params.m_logger) { msg.str(""); msg << "\t\t [Refining] Number of clipping planes " << planesRef.Size() << std::endl; params.m_logger->Log(msg.str().c_str()); } ComputeBestClippingPlane(pset, volume, planesRef, preferredCuttingDirection, w, concavity * params.m_alpha, concavity * params.m_beta, 1, // convexhullDownsampling = 1 progress1, progress2, bestPlane, minConcavity, params); } if (GetCancel()) { delete pset; // clean up break; } else { if (maxConcavity < minConcavity) { maxConcavity = minConcavity; } PrimitiveSet* bestLeft = pset->Create(); PrimitiveSet* bestRight = pset->Create(); temp.PushBack(bestLeft); temp.PushBack(bestRight); pset->Clip(bestPlane, bestRight, bestLeft); if (params.m_pca) { bestRight->RevertAlignToPrincipalAxes(); bestLeft->RevertAlignToPrincipalAxes(); } delete pset; } } else { if (params.m_pca) { pset->RevertAlignToPrincipalAxes(); } parts.PushBack(pset); } } Update(95.0 * (1.0 - maxConcavity) / (1.0 - params.m_concavity), 100.0, params); if (GetCancel()) { const size_t nTempParts = temp.Size(); for (size_t p = 0; p < nTempParts; ++p) { delete temp[p]; } temp.Resize(0); } else { inputParts = temp; temp.Resize(0); } } const size_t nInputParts = inputParts.Size(); for (size_t p = 0; p < nInputParts; ++p) { parts.PushBack(inputParts[p]); } if (GetCancel()) { const size_t nParts = parts.Size(); for (size_t p = 0; p < nParts; ++p) { delete parts[p]; } return; } m_overallProgress = 90.0; Update(m_stageProgress, 100.0, params); msg.str(""); msg << "Generate convex-hulls"; m_operation = msg.str(); size_t nConvexHulls = parts.Size(); if (params.m_logger) { msg.str(""); msg << "+ Generate " << nConvexHulls << " convex-hulls " << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(m_stageProgress, 0.0, params); m_convexHulls.Resize(0); for (size_t p = 0; p < nConvexHulls && !m_cancel; ++p) { Update(m_stageProgress, p * 100.0 / nConvexHulls, params); m_convexHulls.PushBack(new Mesh); parts[p]->ComputeConvexHull(*m_convexHulls[p]); size_t nv = m_convexHulls[p]->GetNPoints(); double x, y, z; for (size_t i = 0; i < nv; ++i) { Vec3<double>& pt = m_convexHulls[p]->GetPoint(i); x = pt[0]; y = pt[1]; z = pt[2]; pt[0] = m_rot[0][0] * x + m_rot[0][1] * y + m_rot[0][2] * z + m_barycenter[0]; pt[1] = m_rot[1][0] * x + m_rot[1][1] * y + m_rot[1][2] * z + m_barycenter[1]; pt[2] = m_rot[2][0] * x + m_rot[2][1] * y + m_rot[2][2] * z + m_barycenter[2]; } } const size_t nParts = parts.Size(); for (size_t p = 0; p < nParts; ++p) { delete parts[p]; parts[p] = 0; } parts.Resize(0); if (GetCancel()) { const size_t nConvexHulls = m_convexHulls.Size(); for (size_t p = 0; p < nConvexHulls; ++p) { delete m_convexHulls[p]; } m_convexHulls.Clear(); return; } m_overallProgress = 95.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } void AddPoints(const Mesh* const mesh, SArray<Vec3<double> >& pts) { const int32_t n = (int32_t)mesh->GetNPoints(); for (int32_t i = 0; i < n; ++i) { pts.PushBack(mesh->GetPoint(i)); } } void ComputeConvexHull(const Mesh* const ch1, const Mesh* const ch2, SArray<Vec3<double> >& pts, Mesh* const combinedCH) { pts.Resize(0); AddPoints(ch1, pts); AddPoints(ch2, pts); btConvexHullComputer ch; ch.compute((double*)pts.Data(), 3 * sizeof(double), (int32_t)pts.Size(), -1.0, -1.0); combinedCH->ResizePoints(0); combinedCH->ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { combinedCH->AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { combinedCH->AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void VHACD::MergeConvexHulls(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Merge Convex Hulls"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } // Get the current number of convex hulls size_t nConvexHulls = m_convexHulls.Size(); // Iteration counter int32_t iteration = 0; // While we have more than at least one convex hull and the user has not asked us to cancel the operation if (nConvexHulls > 1 && !m_cancel) { // Get the gamma error threshold for when to exit SArray<Vec3<double> > pts; Mesh combinedCH; // Populate the cost matrix size_t idx = 0; SArray<float> costMatrix; costMatrix.Resize(((nConvexHulls * nConvexHulls) - nConvexHulls) >> 1); for (size_t p1 = 1; p1 < nConvexHulls; ++p1) { const float volume1 = m_convexHulls[p1]->ComputeVolume(); for (size_t p2 = 0; p2 < p1; ++p2) { ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, &combinedCH); costMatrix[idx++] = ComputeConcavity(volume1 + m_convexHulls[p2]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); } } // Until we cant merge below the maximum cost size_t costSize = m_convexHulls.Size(); while (!m_cancel) { msg.str(""); msg << "Iteration " << iteration++; m_operation = msg.str(); // Search for lowest cost float bestCost = (std::numeric_limits<float>::max)(); const size_t addr = FindMinimumElement(costMatrix.Data(), &bestCost, 0, costMatrix.Size()); if ( (costSize-1) < params.m_maxConvexHulls) { break; } const size_t addrI = (static_cast<int32_t>(sqrt(1 + (8 * addr))) - 1) >> 1; const size_t p1 = addrI + 1; const size_t p2 = addr - ((addrI * (addrI + 1)) >> 1); assert(p1 >= 0); assert(p2 >= 0); assert(p1 < costSize); assert(p2 < costSize); if (params.m_logger) { msg.str(""); msg << "\t\t Merging (" << p1 << ", " << p2 << ") " << bestCost << std::endl << std::endl; params.m_logger->Log(msg.str().c_str()); } // Make the lowest cost row and column into a new hull Mesh* cch = new Mesh; ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, cch); delete m_convexHulls[p2]; m_convexHulls[p2] = cch; delete m_convexHulls[p1]; std::swap(m_convexHulls[p1], m_convexHulls[m_convexHulls.Size() - 1]); m_convexHulls.PopBack(); costSize = costSize - 1; // Calculate costs versus the new hull size_t rowIdx = ((p2 - 1) * p2) >> 1; const float volume1 = m_convexHulls[p2]->ComputeVolume(); for (size_t i = 0; (i < p2) && (!m_cancel); ++i) { ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH); costMatrix[rowIdx++] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); } rowIdx += p2; for (size_t i = p2 + 1; (i < costSize) && (!m_cancel); ++i) { ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH); costMatrix[rowIdx] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); rowIdx += i; assert(rowIdx >= 0); } // Move the top column in to replace its space const size_t erase_idx = ((costSize - 1) * costSize) >> 1; if (p1 < costSize) { rowIdx = (addrI * p1) >> 1; size_t top_row = erase_idx; for (size_t i = 0; i < p1; ++i) { if (i != p2) { costMatrix[rowIdx] = costMatrix[top_row]; } ++rowIdx; ++top_row; } ++top_row; rowIdx += p1; for (size_t i = p1 + 1; i < (costSize + 1); ++i) { costMatrix[rowIdx] = costMatrix[top_row++]; rowIdx += i; assert(rowIdx >= 0); } } costMatrix.Resize(erase_idx); } } m_overallProgress = 99.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } void VHACD::SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume) { if (nvertices <= 4) { return; } ICHull icHull; if (mRaycastMesh) { // We project these points onto the original source mesh to increase precision // The voxelization process drops floating point precision so returned data points are not exactly lying on the // surface of the original source mesh. // The first step is we need to compute the bounding box of the mesh we are trying to build a convex hull for. // From this bounding box, we compute the length of the diagonal to get a relative size and center for point projection uint32_t nPoints = ch->GetNPoints(); Vec3<double> *inputPoints = ch->GetPointsBuffer(); Vec3<double> bmin(inputPoints[0]); Vec3<double> bmax(inputPoints[1]); for (uint32_t i = 1; i < nPoints; i++) { const Vec3<double> &p = inputPoints[i]; p.UpdateMinMax(bmin, bmax); } Vec3<double> center; double diagonalLength = center.GetCenter(bmin, bmax); // Get the center of the bounding box // This is the error threshold for determining if we should use the raycast result data point vs. the voxelized result. double pointDistanceThreshold = diagonalLength * 0.05; // If a new point is within 1/100th the diagonal length of the bounding volume we do not add it. To do so would create a // thin sliver in the resulting convex hull double snapDistanceThreshold = diagonalLength * 0.01; double snapDistanceThresholdSquared = snapDistanceThreshold*snapDistanceThreshold; // Allocate buffer for projected vertices Vec3<double> *outputPoints = new Vec3<double>[nPoints]; uint32_t outCount = 0; for (uint32_t i = 0; i < nPoints; i++) { Vec3<double> &inputPoint = inputPoints[i]; Vec3<double> &outputPoint = outputPoints[outCount]; // Compute the direction vector from the center of this mesh to the vertex Vec3<double> dir = inputPoint - center; // Normalize the direction vector. dir.Normalize(); // Multiply times the diagonal length of the mesh dir *= diagonalLength; // Add the center back in again to get the destination point dir += center; // By default the output point is equal to the input point outputPoint = inputPoint; double pointDistance; if (mRaycastMesh->raycast(center.GetData(), dir.GetData(), inputPoint.GetData(), outputPoint.GetData(),&pointDistance) ) { // If the nearest intersection point is too far away, we keep the original source data point. // Not all points lie directly on the original mesh surface if (pointDistance > pointDistanceThreshold) { outputPoint = inputPoint; } } // Ok, before we add this point, we do not want to create points which are extremely close to each other. // This will result in tiny sliver triangles which are really bad for collision detection. bool foundNearbyPoint = false; for (uint32_t j = 0; j < outCount; j++) { // If this new point is extremely close to an existing point, we do not add it! double squaredDistance = outputPoints[j].GetDistanceSquared(outputPoint); if (squaredDistance < snapDistanceThresholdSquared ) { foundNearbyPoint = true; break; } } if (!foundNearbyPoint) { outCount++; } } icHull.AddPoints(outputPoints, outCount); delete[]outputPoints; } else { icHull.AddPoints(ch->GetPointsBuffer(), ch->GetNPoints()); } icHull.Process((uint32_t)nvertices, minVolume); TMMesh& mesh = icHull.GetMesh(); const size_t nT = mesh.GetNTriangles(); const size_t nV = mesh.GetNVertices(); ch->ResizePoints(nV); ch->ResizeTriangles(nT); mesh.GetIFS(ch->GetPointsBuffer(), ch->GetTrianglesBuffer()); } void VHACD::SimplifyConvexHulls(const Parameters& params) { if (m_cancel || params.m_maxNumVerticesPerCH < 4) { return; } m_timer.Tic(); m_stage = "Simplify convex-hulls"; m_operation = "Simplify convex-hulls"; std::ostringstream msg; const size_t nConvexHulls = m_convexHulls.Size(); if (params.m_logger) { msg << "+ Simplify " << nConvexHulls << " convex-hulls " << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); for (size_t i = 0; i < nConvexHulls && !m_cancel; ++i) { if (params.m_logger) { msg.str(""); msg << "\t\t Simplify CH[" << std::setfill('0') << std::setw(5) << i << "] " << m_convexHulls[i]->GetNPoints() << " V, " << m_convexHulls[i]->GetNTriangles() << " T" << std::endl; params.m_logger->Log(msg.str().c_str()); } SimplifyConvexHull(m_convexHulls[i], params.m_maxNumVerticesPerCH, m_volumeCH0 * params.m_minVolumePerCH); } m_overallProgress = 100.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } bool VHACD::ComputeCenterOfMass(double centerOfMass[3]) const { bool ret = false; centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; // Get number of convex hulls in the result uint32_t hullCount = GetNConvexHulls(); if (hullCount) // if we have results { ret = true; double totalVolume = 0; // Initialize the center of mass to zero centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; // Compute the total volume of all convex hulls for (uint32_t i = 0; i < hullCount; i++) { ConvexHull ch; GetConvexHull(i, ch); totalVolume += ch.m_volume; } // compute the reciprocal of the total volume double recipVolume = 1.0 / totalVolume; // Add in the weighted by volume average of the center point of each convex hull for (uint32_t i = 0; i < hullCount; i++) { ConvexHull ch; GetConvexHull(i, ch); double ratio = ch.m_volume*recipVolume; centerOfMass[0] += ch.m_center[0] * ratio; centerOfMass[1] += ch.m_center[1] * ratio; centerOfMass[2] += ch.m_center[2] * ratio; } } return ret; } #pragma warning(disable:4189 4101) // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found uint32_t VHACD::ComputeConstraints(void) { mConstraints.clear(); // erase any previous constraint results uint32_t hullCount = GetNConvexHulls(); // get the number of convex hulls in the results if (hullCount == 0) return 0; #if DEBUG_VISUALIZE_CONSTRAINTS gRenderDebug->pushRenderState(); gRenderDebug->setCurrentDisplayTime(10); #endif // We voxelize the convex hull class HullData { public: HullData(void) { FLOAT_MATH::fm_initMinMax(mBmin, mBmax); } ~HullData(void) { FLOAT_MATH::fm_releaseVertexIndex(mVertexIndex); FLOAT_MATH::fm_releaseTesselate(mTesselate); delete[]mIndices; } void computeResolution(void) { mDiagonalDistance = FLOAT_MATH::fm_distance(mBmin, mBmax); mTessellateDistance = mDiagonalDistance / 20; mNearestPointDistance = mDiagonalDistance / 20.0f; mPointResolution = mDiagonalDistance / 100; mVertexIndex = FLOAT_MATH::fm_createVertexIndex(mPointResolution, false); mTesselate = FLOAT_MATH::fm_createTesselate(); } void computeTesselation(void) { mTesselationIndices = mTesselate->tesselate(mVertexIndex, mSourceTriangleCount, mIndices, mTessellateDistance, 6, mTessellateTriangleCount); uint32_t vcount = mVertexIndex->getVcount(); } bool getNearestVert(const double sourcePoint[3], double nearest[3], const HullData &other, double nearestThreshold) { bool ret = false; double nt2 = nearestThreshold*nearestThreshold; uint32_t vcount = other.mVertexIndex->getVcount(); for (uint32_t i = 0; i < vcount; i++) { const double *p = other.mVertexIndex->getVertexDouble(i); double d2 = FLOAT_MATH::fm_distanceSquared(sourcePoint, p); if (d2 < nt2) { nearest[0] = p[0]; nearest[1] = p[1]; nearest[2] = p[2]; nt2 = d2; ret = true; } } return ret; } void findMatchingPoints(const HullData &other) { uint32_t vcount = mVertexIndex->getVcount(); for (uint32_t i = 0; i < vcount; i++) { const double *sourcePoint = mVertexIndex->getVertexDouble(i); double nearestPoint[3]; if (getNearestVert(sourcePoint, nearestPoint, other, mNearestPointDistance)) { #if DEBUG_VISUALIZE_CONSTRAINTS float fp1[3]; float fp2[3]; FLOAT_MATH::fm_doubleToFloat3(sourcePoint, fp1); FLOAT_MATH::fm_doubleToFloat3(nearestPoint, fp2); gRenderDebug->debugRay(fp1, fp2); #endif } } } double mBmin[3]; double mBmax[3]; double mDiagonalDistance; double mTessellateDistance; double mPointResolution; double mNearestPointDistance; uint32_t mSourceTriangleCount{ 0 }; uint32_t mTessellateTriangleCount{ 0 }; uint32_t *mIndices{ nullptr }; FLOAT_MATH::fm_VertexIndex *mVertexIndex{ nullptr }; FLOAT_MATH::fm_Tesselate *mTesselate{ nullptr }; const uint32_t *mTesselationIndices{ nullptr }; }; HullData *hullData = new HullData[hullCount]; for (uint32_t i = 0; i < hullCount; i++) { HullData &hd = hullData[i]; ConvexHull ch; GetConvexHull(i, ch); // Compute the bounding volume of this convex hull for (uint32_t j = 0; j < ch.m_nPoints; j++) { const double *p = &ch.m_points[j * 3]; FLOAT_MATH::fm_minmax(p, hd.mBmin, hd.mBmax); } hd.computeResolution(); // Compute the tessellation resolution uint32_t tcount = ch.m_nTriangles; hd.mSourceTriangleCount = tcount; hd.mIndices = new uint32_t[tcount * 3]; for (uint32_t j = 0; j < tcount; j++) { uint32_t i1 = ch.m_triangles[j * 3 + 0]; uint32_t i2 = ch.m_triangles[j * 3 + 1]; uint32_t i3 = ch.m_triangles[j * 3 + 2]; const double *p1 = &ch.m_points[i1 * 3]; const double *p2 = &ch.m_points[i2 * 3]; const double *p3 = &ch.m_points[i3 * 3]; bool newPos; hd.mIndices[j * 3 + 0] = hd.mVertexIndex->getIndex(p1, newPos); hd.mIndices[j * 3 + 1] = hd.mVertexIndex->getIndex(p2, newPos); hd.mIndices[j * 3 + 2] = hd.mVertexIndex->getIndex(p3, newPos); } hd.computeTesselation(); } for (uint32_t i = 0; i < hullCount; i++) { HullData &hd = hullData[i]; // Slightly inflate the bounding box around each convex hull for intersection tests // during the constraint building phase FLOAT_MATH::fm_inflateMinMax(hd.mBmin, hd.mBmax, 0.05f); } // Look for every possible pair of convex hulls as possible constraints for (uint32_t i = 0; i < hullCount; i++) { HullData &hd1 = hullData[i]; for (uint32_t j = i + 1; j < hullCount; j++) { HullData &hd2 = hullData[j]; if (FLOAT_MATH::fm_intersectAABB(hd1.mBmin, hd1.mBmax, hd2.mBmin, hd2.mBmax)) { // ok. if two convex hulls intersect, we are going to find the <n> number of nearest // matching points between them. hd1.findMatchingPoints(hd2); } } } #if DEBUG_VISUALIZE_CONSTRAINTS gRenderDebug->popRenderState(); #endif return uint32_t(mConstraints.size()); } // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' const VHACD::IVHACD::Constraint *VHACD::GetConstraint(uint32_t index) const { const Constraint *ret = nullptr; if (index < mConstraints.size()) { ret = &mConstraints[index]; } return ret; } } // end of VHACD namespace
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btConvexHullComputer.cpp
/* Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <string.h> #include "btAlignedObjectArray.h" #include "btConvexHullComputer.h" #include "btMinMax.h" #include "btVector3.h" #ifdef __GNUC__ #include <stdint.h> #elif defined(_MSC_VER) typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #else typedef int32_t int32_t; typedef long long int32_t int64_t; typedef uint32_t uint32_t; typedef unsigned long long int32_t uint64_t; #endif #ifdef _MSC_VER #pragma warning(disable:4458) #endif //The definition of USE_X86_64_ASM is moved into the build system. You can enable it manually by commenting out the following lines //#if (defined(__GNUC__) && defined(__x86_64__) && !defined(__ICL)) // || (defined(__ICL) && defined(_M_X64)) bug in Intel compiler, disable inline assembly // #define USE_X86_64_ASM //#endif //#define DEBUG_CONVEX_HULL //#define SHOW_ITERATIONS #if defined(DEBUG_CONVEX_HULL) || defined(SHOW_ITERATIONS) #include <stdio.h> #endif // Convex hull implementation based on Preparata and Hong // Ole Kniemeyer, MAXON Computer GmbH class btConvexHullInternal { public: class Point64 { public: int64_t x; int64_t y; int64_t z; Point64(int64_t x, int64_t y, int64_t z) : x(x) , y(y) , z(z) { } bool isZero() { return (x == 0) && (y == 0) && (z == 0); } int64_t dot(const Point64& b) const { return x * b.x + y * b.y + z * b.z; } }; class Point32 { public: int32_t x; int32_t y; int32_t z; int32_t index; Point32() { } Point32(int32_t x, int32_t y, int32_t z) : x(x) , y(y) , z(z) , index(-1) { } bool operator==(const Point32& b) const { return (x == b.x) && (y == b.y) && (z == b.z); } bool operator!=(const Point32& b) const { return (x != b.x) || (y != b.y) || (z != b.z); } bool isZero() { return (x == 0) && (y == 0) && (z == 0); } Point64 cross(const Point32& b) const { return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } Point64 cross(const Point64& b) const { return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } int64_t dot(const Point32& b) const { return x * b.x + y * b.y + z * b.z; } int64_t dot(const Point64& b) const { return x * b.x + y * b.y + z * b.z; } Point32 operator+(const Point32& b) const { return Point32(x + b.x, y + b.y, z + b.z); } Point32 operator-(const Point32& b) const { return Point32(x - b.x, y - b.y, z - b.z); } }; class Int128 { public: uint64_t low; uint64_t high; Int128() { } Int128(uint64_t low, uint64_t high) : low(low) , high(high) { } Int128(uint64_t low) : low(low) , high(0) { } Int128(int64_t value) : low(value) , high((value >= 0) ? 0 : (uint64_t)-1LL) { } static Int128 mul(int64_t a, int64_t b); static Int128 mul(uint64_t a, uint64_t b); Int128 operator-() const { return Int128((uint64_t) - (int64_t)low, ~high + (low == 0)); } Int128 operator+(const Int128& b) const { #ifdef USE_X86_64_ASM Int128 result; __asm__("addq %[bl], %[rl]\n\t" "adcq %[bh], %[rh]\n\t" : [rl] "=r"(result.low), [rh] "=r"(result.high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); return result; #else uint64_t lo = low + b.low; return Int128(lo, high + b.high + (lo < low)); #endif } Int128 operator-(const Int128& b) const { #ifdef USE_X86_64_ASM Int128 result; __asm__("subq %[bl], %[rl]\n\t" "sbbq %[bh], %[rh]\n\t" : [rl] "=r"(result.low), [rh] "=r"(result.high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); return result; #else return *this + -b; #endif } Int128& operator+=(const Int128& b) { #ifdef USE_X86_64_ASM __asm__("addq %[bl], %[rl]\n\t" "adcq %[bh], %[rh]\n\t" : [rl] "=r"(low), [rh] "=r"(high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); #else uint64_t lo = low + b.low; if (lo < low) { ++high; } low = lo; high += b.high; #endif return *this; } Int128& operator++() { if (++low == 0) { ++high; } return *this; } Int128 operator*(int64_t b) const; btScalar toScalar() const { return ((int64_t)high >= 0) ? btScalar(high) * (btScalar(0x100000000LL) * btScalar(0x100000000LL)) + btScalar(low) : -(-*this).toScalar(); } int32_t getSign() const { return ((int64_t)high < 0) ? -1 : (high || low) ? 1 : 0; } bool operator<(const Int128& b) const { return (high < b.high) || ((high == b.high) && (low < b.low)); } int32_t ucmp(const Int128& b) const { if (high < b.high) { return -1; } if (high > b.high) { return 1; } if (low < b.low) { return -1; } if (low > b.low) { return 1; } return 0; } }; class Rational64 { private: uint64_t m_numerator; uint64_t m_denominator; int32_t sign; public: Rational64(int64_t numerator, int64_t denominator) { if (numerator > 0) { sign = 1; m_numerator = (uint64_t)numerator; } else if (numerator < 0) { sign = -1; m_numerator = (uint64_t)-numerator; } else { sign = 0; m_numerator = 0; } if (denominator > 0) { m_denominator = (uint64_t)denominator; } else if (denominator < 0) { sign = -sign; m_denominator = (uint64_t)-denominator; } else { m_denominator = 0; } } bool isNegativeInfinity() const { return (sign < 0) && (m_denominator == 0); } bool isNaN() const { return (sign == 0) && (m_denominator == 0); } int32_t compare(const Rational64& b) const; btScalar toScalar() const { return sign * ((m_denominator == 0) ? SIMD_INFINITY : (btScalar)m_numerator / m_denominator); } }; class Rational128 { private: Int128 numerator; Int128 denominator; int32_t sign; bool isInt64; public: Rational128(int64_t value) { if (value > 0) { sign = 1; this->numerator = value; } else if (value < 0) { sign = -1; this->numerator = -value; } else { sign = 0; this->numerator = (uint64_t)0; } this->denominator = (uint64_t)1; isInt64 = true; } Rational128(const Int128& numerator, const Int128& denominator) { sign = numerator.getSign(); if (sign >= 0) { this->numerator = numerator; } else { this->numerator = -numerator; } int32_t dsign = denominator.getSign(); if (dsign >= 0) { this->denominator = denominator; } else { sign = -sign; this->denominator = -denominator; } isInt64 = false; } int32_t compare(const Rational128& b) const; int32_t compare(int64_t b) const; btScalar toScalar() const { return sign * ((denominator.getSign() == 0) ? SIMD_INFINITY : numerator.toScalar() / denominator.toScalar()); } }; class PointR128 { public: Int128 x; Int128 y; Int128 z; Int128 denominator; PointR128() { } PointR128(Int128 x, Int128 y, Int128 z, Int128 denominator) : x(x) , y(y) , z(z) , denominator(denominator) { } btScalar xvalue() const { return x.toScalar() / denominator.toScalar(); } btScalar yvalue() const { return y.toScalar() / denominator.toScalar(); } btScalar zvalue() const { return z.toScalar() / denominator.toScalar(); } }; class Edge; class Face; class Vertex { public: Vertex* next; Vertex* prev; Edge* edges; Face* firstNearbyFace; Face* lastNearbyFace; PointR128 point128; Point32 point; int32_t copy; Vertex() : next(NULL) , prev(NULL) , edges(NULL) , firstNearbyFace(NULL) , lastNearbyFace(NULL) , copy(-1) { } #ifdef DEBUG_CONVEX_HULL void print() { printf("V%d (%d, %d, %d)", point.index, point.x, point.y, point.z); } void printGraph(); #endif Point32 operator-(const Vertex& b) const { return point - b.point; } Rational128 dot(const Point64& b) const { return (point.index >= 0) ? Rational128(point.dot(b)) : Rational128(point128.x * b.x + point128.y * b.y + point128.z * b.z, point128.denominator); } btScalar xvalue() const { return (point.index >= 0) ? btScalar(point.x) : point128.xvalue(); } btScalar yvalue() const { return (point.index >= 0) ? btScalar(point.y) : point128.yvalue(); } btScalar zvalue() const { return (point.index >= 0) ? btScalar(point.z) : point128.zvalue(); } void receiveNearbyFaces(Vertex* src) { if (lastNearbyFace) { lastNearbyFace->nextWithSameNearbyVertex = src->firstNearbyFace; } else { firstNearbyFace = src->firstNearbyFace; } if (src->lastNearbyFace) { lastNearbyFace = src->lastNearbyFace; } for (Face* f = src->firstNearbyFace; f; f = f->nextWithSameNearbyVertex) { btAssert(f->nearbyVertex == src); f->nearbyVertex = this; } src->firstNearbyFace = NULL; src->lastNearbyFace = NULL; } }; class Edge { public: Edge* next; Edge* prev; Edge* reverse; Vertex* target; Face* face; int32_t copy; ~Edge() { next = NULL; prev = NULL; reverse = NULL; target = NULL; face = NULL; } void link(Edge* n) { btAssert(reverse->target == n->reverse->target); next = n; n->prev = this; } #ifdef DEBUG_CONVEX_HULL void print() { printf("E%p : %d -> %d, n=%p p=%p (0 %d\t%d\t%d) -> (%d %d %d)", this, reverse->target->point.index, target->point.index, next, prev, reverse->target->point.x, reverse->target->point.y, reverse->target->point.z, target->point.x, target->point.y, target->point.z); } #endif }; class Face { public: Face* next; Vertex* nearbyVertex; Face* nextWithSameNearbyVertex; Point32 origin; Point32 dir0; Point32 dir1; Face() : next(NULL) , nearbyVertex(NULL) , nextWithSameNearbyVertex(NULL) { } void init(Vertex* a, Vertex* b, Vertex* c) { nearbyVertex = a; origin = a->point; dir0 = *b - *a; dir1 = *c - *a; if (a->lastNearbyFace) { a->lastNearbyFace->nextWithSameNearbyVertex = this; } else { a->firstNearbyFace = this; } a->lastNearbyFace = this; } Point64 getNormal() { return dir0.cross(dir1); } }; template <typename UWord, typename UHWord> class DMul { private: static uint32_t high(uint64_t value) { return (uint32_t)(value >> 32); } static uint32_t low(uint64_t value) { return (uint32_t)value; } static uint64_t mul(uint32_t a, uint32_t b) { return (uint64_t)a * (uint64_t)b; } static void shlHalf(uint64_t& value) { value <<= 32; } static uint64_t high(Int128 value) { return value.high; } static uint64_t low(Int128 value) { return value.low; } static Int128 mul(uint64_t a, uint64_t b) { return Int128::mul(a, b); } static void shlHalf(Int128& value) { value.high = value.low; value.low = 0; } public: static void mul(UWord a, UWord b, UWord& resLow, UWord& resHigh) { UWord p00 = mul(low(a), low(b)); UWord p01 = mul(low(a), high(b)); UWord p10 = mul(high(a), low(b)); UWord p11 = mul(high(a), high(b)); UWord p0110 = UWord(low(p01)) + UWord(low(p10)); p11 += high(p01); p11 += high(p10); p11 += high(p0110); shlHalf(p0110); p00 += p0110; if (p00 < p0110) { ++p11; } resLow = p00; resHigh = p11; } }; private: class IntermediateHull { public: Vertex* minXy; Vertex* maxXy; Vertex* minYx; Vertex* maxYx; IntermediateHull() : minXy(NULL) , maxXy(NULL) , minYx(NULL) , maxYx(NULL) { } void print(); }; enum Orientation { NONE, CLOCKWISE, COUNTER_CLOCKWISE }; template <typename T> class PoolArray { private: T* array; int32_t size; public: PoolArray<T>* next; PoolArray(int32_t size) : size(size) , next(NULL) { array = (T*)btAlignedAlloc(sizeof(T) * size, 16); } ~PoolArray() { btAlignedFree(array); } T* init() { T* o = array; for (int32_t i = 0; i < size; i++, o++) { o->next = (i + 1 < size) ? o + 1 : NULL; } return array; } }; template <typename T> class Pool { private: PoolArray<T>* arrays; PoolArray<T>* nextArray; T* freeObjects; int32_t arraySize; public: Pool() : arrays(NULL) , nextArray(NULL) , freeObjects(NULL) , arraySize(256) { } ~Pool() { while (arrays) { PoolArray<T>* p = arrays; arrays = p->next; p->~PoolArray<T>(); btAlignedFree(p); } } void reset() { nextArray = arrays; freeObjects = NULL; } void setArraySize(int32_t arraySize) { this->arraySize = arraySize; } T* newObject() { T* o = freeObjects; if (!o) { PoolArray<T>* p = nextArray; if (p) { nextArray = p->next; } else { p = new (btAlignedAlloc(sizeof(PoolArray<T>), 16)) PoolArray<T>(arraySize); p->next = arrays; arrays = p; } o = p->init(); } freeObjects = o->next; return new (o) T(); }; void freeObject(T* object) { object->~T(); object->next = freeObjects; freeObjects = object; } }; btVector3 scaling; btVector3 center; Pool<Vertex> vertexPool; Pool<Edge> edgePool; Pool<Face> facePool; btAlignedObjectArray<Vertex*> originalVertices; int32_t mergeStamp; int32_t minAxis; int32_t medAxis; int32_t maxAxis; int32_t usedEdgePairs; int32_t maxUsedEdgePairs; static Orientation getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t); Edge* findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot); void findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1); Edge* newEdgePair(Vertex* from, Vertex* to); void removeEdgePair(Edge* edge) { Edge* n = edge->next; Edge* r = edge->reverse; btAssert(edge->target && r->target); if (n != edge) { n->prev = edge->prev; edge->prev->next = n; r->target->edges = n; } else { r->target->edges = NULL; } n = r->next; if (n != r) { n->prev = r->prev; r->prev->next = n; edge->target->edges = n; } else { edge->target->edges = NULL; } edgePool.freeObject(edge); edgePool.freeObject(r); usedEdgePairs--; } void computeInternal(int32_t start, int32_t end, IntermediateHull& result); bool mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1); void merge(IntermediateHull& h0, IntermediateHull& h1); btVector3 toBtVector(const Point32& v); btVector3 getBtNormal(Face* face); bool shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack); public: Vertex* vertexList; void compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count); btVector3 getCoordinates(const Vertex* v); btScalar shrink(btScalar amount, btScalar clampAmount); }; btConvexHullInternal::Int128 btConvexHullInternal::Int128::operator*(int64_t b) const { bool negative = (int64_t)high < 0; Int128 a = negative ? -*this : *this; if (b < 0) { negative = !negative; b = -b; } Int128 result = mul(a.low, (uint64_t)b); result.high += a.high * (uint64_t)b; return negative ? -result : result; } btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(int64_t a, int64_t b) { Int128 result; #ifdef USE_X86_64_ASM __asm__("imulq %[b]" : "=a"(result.low), "=d"(result.high) : "0"(a), [b] "r"(b) : "cc"); return result; #else bool negative = a < 0; if (negative) { a = -a; } if (b < 0) { negative = !negative; b = -b; } DMul<uint64_t, uint32_t>::mul((uint64_t)a, (uint64_t)b, result.low, result.high); return negative ? -result : result; #endif } btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(uint64_t a, uint64_t b) { Int128 result; #ifdef USE_X86_64_ASM __asm__("mulq %[b]" : "=a"(result.low), "=d"(result.high) : "0"(a), [b] "r"(b) : "cc"); #else DMul<uint64_t, uint32_t>::mul(a, b, result.low, result.high); #endif return result; } int32_t btConvexHullInternal::Rational64::compare(const Rational64& b) const { if (sign != b.sign) { return sign - b.sign; } else if (sign == 0) { return 0; } // return (numerator * b.denominator > b.numerator * denominator) ? sign : (numerator * b.denominator < b.numerator * denominator) ? -sign : 0; #ifdef USE_X86_64_ASM int32_t result; int64_t tmp; int64_t dummy; __asm__("mulq %[bn]\n\t" "movq %%rax, %[tmp]\n\t" "movq %%rdx, %%rbx\n\t" "movq %[tn], %%rax\n\t" "mulq %[bd]\n\t" "subq %[tmp], %%rax\n\t" "sbbq %%rbx, %%rdx\n\t" // rdx:rax contains 128-bit-difference "numerator*b.denominator - b.numerator*denominator" "setnsb %%bh\n\t" // bh=1 if difference is non-negative, bh=0 otherwise "orq %%rdx, %%rax\n\t" "setnzb %%bl\n\t" // bl=1 if difference if non-zero, bl=0 if it is zero "decb %%bh\n\t" // now bx=0x0000 if difference is zero, 0xff01 if it is negative, 0x0001 if it is positive (i.e., same sign as difference) "shll $16, %%ebx\n\t" // ebx has same sign as difference : "=&b"(result), [tmp] "=&r"(tmp), "=a"(dummy) : "a"(denominator), [bn] "g"(b.numerator), [tn] "g"(numerator), [bd] "g"(b.denominator) : "%rdx", "cc"); return result ? result ^ sign // if sign is +1, only bit 0 of result is inverted, which does not change the sign of result (and cannot result in zero) // if sign is -1, all bits of result are inverted, which changes the sign of result (and again cannot result in zero) : 0; #else return sign * Int128::mul(m_numerator, b.m_denominator).ucmp(Int128::mul(m_denominator, b.m_numerator)); #endif } int32_t btConvexHullInternal::Rational128::compare(const Rational128& b) const { if (sign != b.sign) { return sign - b.sign; } else if (sign == 0) { return 0; } if (isInt64) { return -b.compare(sign * (int64_t)numerator.low); } Int128 nbdLow, nbdHigh, dbnLow, dbnHigh; DMul<Int128, uint64_t>::mul(numerator, b.denominator, nbdLow, nbdHigh); DMul<Int128, uint64_t>::mul(denominator, b.numerator, dbnLow, dbnHigh); int32_t cmp = nbdHigh.ucmp(dbnHigh); if (cmp) { return cmp * sign; } return nbdLow.ucmp(dbnLow) * sign; } int32_t btConvexHullInternal::Rational128::compare(int64_t b) const { if (isInt64) { int64_t a = sign * (int64_t)numerator.low; return (a > b) ? 1 : (a < b) ? -1 : 0; } if (b > 0) { if (sign <= 0) { return -1; } } else if (b < 0) { if (sign >= 0) { return 1; } b = -b; } else { return sign; } return numerator.ucmp(denominator * b) * sign; } btConvexHullInternal::Edge* btConvexHullInternal::newEdgePair(Vertex* from, Vertex* to) { btAssert(from && to); Edge* e = edgePool.newObject(); Edge* r = edgePool.newObject(); e->reverse = r; r->reverse = e; e->copy = mergeStamp; r->copy = mergeStamp; e->target = to; r->target = from; e->face = NULL; r->face = NULL; usedEdgePairs++; if (usedEdgePairs > maxUsedEdgePairs) { maxUsedEdgePairs = usedEdgePairs; } return e; } bool btConvexHullInternal::mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1) { Vertex* v0 = h0.maxYx; Vertex* v1 = h1.minYx; if ((v0->point.x == v1->point.x) && (v0->point.y == v1->point.y)) { btAssert(v0->point.z < v1->point.z); Vertex* v1p = v1->prev; if (v1p == v1) { c0 = v0; if (v1->edges) { btAssert(v1->edges->next == v1->edges); v1 = v1->edges->target; btAssert(v1->edges->next == v1->edges); } c1 = v1; return false; } Vertex* v1n = v1->next; v1p->next = v1n; v1n->prev = v1p; if (v1 == h1.minXy) { if ((v1n->point.x < v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y < v1p->point.y))) { h1.minXy = v1n; } else { h1.minXy = v1p; } } if (v1 == h1.maxXy) { if ((v1n->point.x > v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y > v1p->point.y))) { h1.maxXy = v1n; } else { h1.maxXy = v1p; } } } v0 = h0.maxXy; v1 = h1.maxXy; Vertex* v00 = NULL; Vertex* v10 = NULL; int32_t sign = 1; for (int32_t side = 0; side <= 1; side++) { int32_t dx = (v1->point.x - v0->point.x) * sign; if (dx > 0) { while (true) { int32_t dy = v1->point.y - v0->point.y; Vertex* w0 = side ? v0->next : v0->prev; if (w0 != v0) { int32_t dx0 = (w0->point.x - v0->point.x) * sign; int32_t dy0 = w0->point.y - v0->point.y; if ((dy0 <= 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx <= dy * dx0)))) { v0 = w0; dx = (v1->point.x - v0->point.x) * sign; continue; } } Vertex* w1 = side ? v1->next : v1->prev; if (w1 != v1) { int32_t dx1 = (w1->point.x - v1->point.x) * sign; int32_t dy1 = w1->point.y - v1->point.y; int32_t dxn = (w1->point.x - v0->point.x) * sign; if ((dxn > 0) && (dy1 < 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx < dy * dx1)))) { v1 = w1; dx = dxn; continue; } } break; } } else if (dx < 0) { while (true) { int32_t dy = v1->point.y - v0->point.y; Vertex* w1 = side ? v1->prev : v1->next; if (w1 != v1) { int32_t dx1 = (w1->point.x - v1->point.x) * sign; int32_t dy1 = w1->point.y - v1->point.y; if ((dy1 >= 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx <= dy * dx1)))) { v1 = w1; dx = (v1->point.x - v0->point.x) * sign; continue; } } Vertex* w0 = side ? v0->prev : v0->next; if (w0 != v0) { int32_t dx0 = (w0->point.x - v0->point.x) * sign; int32_t dy0 = w0->point.y - v0->point.y; int32_t dxn = (v1->point.x - w0->point.x) * sign; if ((dxn < 0) && (dy0 > 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx < dy * dx0)))) { v0 = w0; dx = dxn; continue; } } break; } } else { int32_t x = v0->point.x; int32_t y0 = v0->point.y; Vertex* w0 = v0; Vertex* t; while (((t = side ? w0->next : w0->prev) != v0) && (t->point.x == x) && (t->point.y <= y0)) { w0 = t; y0 = t->point.y; } v0 = w0; int32_t y1 = v1->point.y; Vertex* w1 = v1; while (((t = side ? w1->prev : w1->next) != v1) && (t->point.x == x) && (t->point.y >= y1)) { w1 = t; y1 = t->point.y; } v1 = w1; } if (side == 0) { v00 = v0; v10 = v1; v0 = h0.minXy; v1 = h1.minXy; sign = -1; } } v0->prev = v1; v1->next = v0; v00->next = v10; v10->prev = v00; if (h1.minXy->point.x < h0.minXy->point.x) { h0.minXy = h1.minXy; } if (h1.maxXy->point.x >= h0.maxXy->point.x) { h0.maxXy = h1.maxXy; } h0.maxYx = h1.maxYx; c0 = v00; c1 = v10; return true; } void btConvexHullInternal::computeInternal(int32_t start, int32_t end, IntermediateHull& result) { int32_t n = end - start; switch (n) { case 0: result.minXy = NULL; result.maxXy = NULL; result.minYx = NULL; result.maxYx = NULL; return; case 2: { Vertex* v = originalVertices[start]; Vertex* w = v + 1; if (v->point != w->point) { int32_t dx = v->point.x - w->point.x; int32_t dy = v->point.y - w->point.y; if ((dx == 0) && (dy == 0)) { if (v->point.z > w->point.z) { Vertex* t = w; w = v; v = t; } btAssert(v->point.z < w->point.z); v->next = v; v->prev = v; result.minXy = v; result.maxXy = v; result.minYx = v; result.maxYx = v; } else { v->next = w; v->prev = w; w->next = v; w->prev = v; if ((dx < 0) || ((dx == 0) && (dy < 0))) { result.minXy = v; result.maxXy = w; } else { result.minXy = w; result.maxXy = v; } if ((dy < 0) || ((dy == 0) && (dx < 0))) { result.minYx = v; result.maxYx = w; } else { result.minYx = w; result.maxYx = v; } } Edge* e = newEdgePair(v, w); e->link(e); v->edges = e; e = e->reverse; e->link(e); w->edges = e; return; } #if defined(__GNUC__) goto fallthrough; // Needed to silence gcc #endif } #if defined(__GNUC__) fallthrough: // Needed to silence gcc #endif // lint -fallthrough case 1: { Vertex* v = originalVertices[start]; v->edges = NULL; v->next = v; v->prev = v; result.minXy = v; result.maxXy = v; result.minYx = v; result.maxYx = v; return; } } int32_t split0 = start + n / 2; Point32 p = originalVertices[split0 - 1]->point; int32_t split1 = split0; while ((split1 < end) && (originalVertices[split1]->point == p)) { split1++; } computeInternal(start, split0, result); IntermediateHull hull1; computeInternal(split1, end, hull1); #ifdef DEBUG_CONVEX_HULL printf("\n\nMerge\n"); result.print(); hull1.print(); #endif merge(result, hull1); #ifdef DEBUG_CONVEX_HULL printf("\n Result\n"); result.print(); #endif } #ifdef DEBUG_CONVEX_HULL void btConvexHullInternal::IntermediateHull::print() { printf(" Hull\n"); for (Vertex* v = minXy; v;) { printf(" "); v->print(); if (v == maxXy) { printf(" maxXy"); } if (v == minYx) { printf(" minYx"); } if (v == maxYx) { printf(" maxYx"); } if (v->next->prev != v) { printf(" Inconsistency"); } printf("\n"); v = v->next; if (v == minXy) { break; } } if (minXy) { minXy->copy = (minXy->copy == -1) ? -2 : -1; minXy->printGraph(); } } void btConvexHullInternal::Vertex::printGraph() { print(); printf("\nEdges\n"); Edge* e = edges; if (e) { do { e->print(); printf("\n"); e = e->next; } while (e != edges); do { Vertex* v = e->target; if (v->copy != copy) { v->copy = copy; v->printGraph(); } e = e->next; } while (e != edges); } } #endif btConvexHullInternal::Orientation btConvexHullInternal::getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t) { btAssert(prev->reverse->target == next->reverse->target); if (prev->next == next) { if (prev->prev == next) { Point64 n = t.cross(s); Point64 m = (*prev->target - *next->reverse->target).cross(*next->target - *next->reverse->target); btAssert(!m.isZero()); int64_t dot = n.dot(m); btAssert(dot != 0); return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE; } return COUNTER_CLOCKWISE; } else if (prev->prev == next) { return CLOCKWISE; } else { return NONE; } } btConvexHullInternal::Edge* btConvexHullInternal::findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot) { Edge* minEdge = NULL; #ifdef DEBUG_CONVEX_HULL printf("find max edge for %d\n", start->point.index); #endif Edge* e = start->edges; if (e) { do { if (e->copy > mergeStamp) { Point32 t = *e->target - *start; Rational64 cot(t.dot(sxrxs), t.dot(rxs)); #ifdef DEBUG_CONVEX_HULL printf(" Angle is %f (%d) for ", (float)btAtan(cot.toScalar()), (int32_t)cot.isNaN()); e->print(); #endif if (cot.isNaN()) { btAssert(ccw ? (t.dot(s) < 0) : (t.dot(s) > 0)); } else { int32_t cmp; if (minEdge == NULL) { minCot = cot; minEdge = e; } else if ((cmp = cot.compare(minCot)) < 0) { minCot = cot; minEdge = e; } else if ((cmp == 0) && (ccw == (getOrientation(minEdge, e, s, t) == COUNTER_CLOCKWISE))) { minEdge = e; } } #ifdef DEBUG_CONVEX_HULL printf("\n"); #endif } e = e->next; } while (e != start->edges); } return minEdge; } void btConvexHullInternal::findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1) { Edge* start0 = e0; Edge* start1 = e1; Point32 et0 = start0 ? start0->target->point : c0->point; Point32 et1 = start1 ? start1->target->point : c1->point; Point32 s = c1->point - c0->point; Point64 normal = ((start0 ? start0 : start1)->target->point - c0->point).cross(s); int64_t dist = c0->point.dot(normal); btAssert(!start1 || (start1->target->point.dot(normal) == dist)); Point64 perp = s.cross(normal); btAssert(!perp.isZero()); #ifdef DEBUG_CONVEX_HULL printf(" Advancing %d %d (%p %p, %d %d)\n", c0->point.index, c1->point.index, start0, start1, start0 ? start0->target->point.index : -1, start1 ? start1->target->point.index : -1); #endif int64_t maxDot0 = et0.dot(perp); if (e0) { while (e0->target != stop0) { Edge* e = e0->reverse->prev; if (e->target->point.dot(normal) < dist) { break; } btAssert(e->target->point.dot(normal) == dist); if (e->copy == mergeStamp) { break; } int64_t dot = e->target->point.dot(perp); if (dot <= maxDot0) { break; } maxDot0 = dot; e0 = e; et0 = e->target->point; } } int64_t maxDot1 = et1.dot(perp); if (e1) { while (e1->target != stop1) { Edge* e = e1->reverse->next; if (e->target->point.dot(normal) < dist) { break; } btAssert(e->target->point.dot(normal) == dist); if (e->copy == mergeStamp) { break; } int64_t dot = e->target->point.dot(perp); if (dot <= maxDot1) { break; } maxDot1 = dot; e1 = e; et1 = e->target->point; } } #ifdef DEBUG_CONVEX_HULL printf(" Starting at %d %d\n", et0.index, et1.index); #endif int64_t dx = maxDot1 - maxDot0; if (dx > 0) { while (true) { int64_t dy = (et1 - et0).dot(s); if (e0 && (e0->target != stop0)) { Edge* f0 = e0->next->reverse; if (f0->copy > mergeStamp) { int64_t dx0 = (f0->target->point - et0).dot(perp); int64_t dy0 = (f0->target->point - et0).dot(s); if ((dx0 == 0) ? (dy0 < 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) >= 0))) { et0 = f0->target->point; dx = (et1 - et0).dot(perp); e0 = (e0 == start0) ? NULL : f0; continue; } } } if (e1 && (e1->target != stop1)) { Edge* f1 = e1->reverse->next; if (f1->copy > mergeStamp) { Point32 d1 = f1->target->point - et1; if (d1.dot(normal) == 0) { int64_t dx1 = d1.dot(perp); int64_t dy1 = d1.dot(s); int64_t dxn = (f1->target->point - et0).dot(perp); if ((dxn > 0) && ((dx1 == 0) ? (dy1 < 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) > 0)))) { e1 = f1; et1 = e1->target->point; dx = dxn; continue; } } else { btAssert((e1 == start1) && (d1.dot(normal) < 0)); } } } break; } } else if (dx < 0) { while (true) { int64_t dy = (et1 - et0).dot(s); if (e1 && (e1->target != stop1)) { Edge* f1 = e1->prev->reverse; if (f1->copy > mergeStamp) { int64_t dx1 = (f1->target->point - et1).dot(perp); int64_t dy1 = (f1->target->point - et1).dot(s); if ((dx1 == 0) ? (dy1 > 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) <= 0))) { et1 = f1->target->point; dx = (et1 - et0).dot(perp); e1 = (e1 == start1) ? NULL : f1; continue; } } } if (e0 && (e0->target != stop0)) { Edge* f0 = e0->reverse->prev; if (f0->copy > mergeStamp) { Point32 d0 = f0->target->point - et0; if (d0.dot(normal) == 0) { int64_t dx0 = d0.dot(perp); int64_t dy0 = d0.dot(s); int64_t dxn = (et1 - f0->target->point).dot(perp); if ((dxn < 0) && ((dx0 == 0) ? (dy0 > 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) < 0)))) { e0 = f0; et0 = e0->target->point; dx = dxn; continue; } } else { btAssert((e0 == start0) && (d0.dot(normal) < 0)); } } } break; } } #ifdef DEBUG_CONVEX_HULL printf(" Advanced edges to %d %d\n", et0.index, et1.index); #endif } void btConvexHullInternal::merge(IntermediateHull& h0, IntermediateHull& h1) { if (!h1.maxXy) { return; } if (!h0.maxXy) { h0 = h1; return; } mergeStamp--; Vertex* c0 = NULL; Edge* toPrev0 = NULL; Edge* firstNew0 = NULL; Edge* pendingHead0 = NULL; Edge* pendingTail0 = NULL; Vertex* c1 = NULL; Edge* toPrev1 = NULL; Edge* firstNew1 = NULL; Edge* pendingHead1 = NULL; Edge* pendingTail1 = NULL; Point32 prevPoint; if (mergeProjection(h0, h1, c0, c1)) { Point32 s = *c1 - *c0; Point64 normal = Point32(0, 0, -1).cross(s); Point64 t = s.cross(normal); btAssert(!t.isZero()); Edge* e = c0->edges; Edge* start0 = NULL; if (e) { do { int64_t dot = (*e->target - *c0).dot(normal); btAssert(dot <= 0); if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) { if (!start0 || (getOrientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) { start0 = e; } } e = e->next; } while (e != c0->edges); } e = c1->edges; Edge* start1 = NULL; if (e) { do { int64_t dot = (*e->target - *c1).dot(normal); btAssert(dot <= 0); if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) { if (!start1 || (getOrientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) { start1 = e; } } e = e->next; } while (e != c1->edges); } if (start0 || start1) { findEdgeForCoplanarFaces(c0, c1, start0, start1, NULL, NULL); if (start0) { c0 = start0->target; } if (start1) { c1 = start1->target; } } prevPoint = c1->point; prevPoint.z++; } else { prevPoint = c1->point; prevPoint.x++; } Vertex* first0 = c0; Vertex* first1 = c1; bool firstRun = true; while (true) { Point32 s = *c1 - *c0; Point32 r = prevPoint - c0->point; Point64 rxs = r.cross(s); Point64 sxrxs = s.cross(rxs); #ifdef DEBUG_CONVEX_HULL printf("\n Checking %d %d\n", c0->point.index, c1->point.index); #endif Rational64 minCot0(0, 0); Edge* min0 = findMaxAngle(false, c0, s, rxs, sxrxs, minCot0); Rational64 minCot1(0, 0); Edge* min1 = findMaxAngle(true, c1, s, rxs, sxrxs, minCot1); if (!min0 && !min1) { Edge* e = newEdgePair(c0, c1); e->link(e); c0->edges = e; e = e->reverse; e->link(e); c1->edges = e; return; } else { int32_t cmp = !min0 ? 1 : !min1 ? -1 : minCot0.compare(minCot1); #ifdef DEBUG_CONVEX_HULL printf(" -> Result %d\n", cmp); #endif if (firstRun || ((cmp >= 0) ? !minCot1.isNegativeInfinity() : !minCot0.isNegativeInfinity())) { Edge* e = newEdgePair(c0, c1); if (pendingTail0) { pendingTail0->prev = e; } else { pendingHead0 = e; } e->next = pendingTail0; pendingTail0 = e; e = e->reverse; if (pendingTail1) { pendingTail1->next = e; } else { pendingHead1 = e; } e->prev = pendingTail1; pendingTail1 = e; } Edge* e0 = min0; Edge* e1 = min1; #ifdef DEBUG_CONVEX_HULL printf(" Found min edges to %d %d\n", e0 ? e0->target->point.index : -1, e1 ? e1->target->point.index : -1); #endif if (cmp == 0) { findEdgeForCoplanarFaces(c0, c1, e0, e1, NULL, NULL); } if ((cmp >= 0) && e1) { if (toPrev1) { for (Edge *e = toPrev1->next, *n = NULL; e != min1; e = n) { n = e->next; removeEdgePair(e); } } if (pendingTail1) { if (toPrev1) { toPrev1->link(pendingHead1); } else { min1->prev->link(pendingHead1); firstNew1 = pendingHead1; } pendingTail1->link(min1); pendingHead1 = NULL; pendingTail1 = NULL; } else if (!toPrev1) { firstNew1 = min1; } prevPoint = c1->point; c1 = e1->target; toPrev1 = e1->reverse; } if ((cmp <= 0) && e0) { if (toPrev0) { for (Edge *e = toPrev0->prev, *n = NULL; e != min0; e = n) { n = e->prev; removeEdgePair(e); } } if (pendingTail0) { if (toPrev0) { pendingHead0->link(toPrev0); } else { pendingHead0->link(min0->next); firstNew0 = pendingHead0; } min0->link(pendingTail0); pendingHead0 = NULL; pendingTail0 = NULL; } else if (!toPrev0) { firstNew0 = min0; } prevPoint = c0->point; c0 = e0->target; toPrev0 = e0->reverse; } } if ((c0 == first0) && (c1 == first1)) { if (toPrev0 == NULL) { pendingHead0->link(pendingTail0); c0->edges = pendingTail0; } else { for (Edge *e = toPrev0->prev, *n = NULL; e != firstNew0; e = n) { n = e->prev; removeEdgePair(e); } if (pendingTail0) { pendingHead0->link(toPrev0); firstNew0->link(pendingTail0); } } if (toPrev1 == NULL) { pendingTail1->link(pendingHead1); c1->edges = pendingTail1; } else { for (Edge *e = toPrev1->next, *n = NULL; e != firstNew1; e = n) { n = e->next; removeEdgePair(e); } if (pendingTail1) { toPrev1->link(pendingHead1); pendingTail1->link(firstNew1); } } return; } firstRun = false; } } static bool pointCmp(const btConvexHullInternal::Point32& p, const btConvexHullInternal::Point32& q) { return (p.y < q.y) || ((p.y == q.y) && ((p.x < q.x) || ((p.x == q.x) && (p.z < q.z)))); } void btConvexHullInternal::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count) { btVector3 min(btScalar(1e30), btScalar(1e30), btScalar(1e30)), max(btScalar(-1e30), btScalar(-1e30), btScalar(-1e30)); const char* ptr = (const char*)coords; if (doubleCoords) { for (int32_t i = 0; i < count; i++) { const double* v = (const double*)ptr; btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); ptr += stride; min.setMin(p); max.setMax(p); } } else { for (int32_t i = 0; i < count; i++) { const float* v = (const float*)ptr; btVector3 p(v[0], v[1], v[2]); ptr += stride; min.setMin(p); max.setMax(p); } } btVector3 s = max - min; maxAxis = s.maxAxis(); minAxis = s.minAxis(); if (minAxis == maxAxis) { minAxis = (maxAxis + 1) % 3; } medAxis = 3 - maxAxis - minAxis; s /= btScalar(10216); if (((medAxis + 1) % 3) != maxAxis) { s *= -1; } scaling = s; if (s[0] != 0) { s[0] = btScalar(1) / s[0]; } if (s[1] != 0) { s[1] = btScalar(1) / s[1]; } if (s[2] != 0) { s[2] = btScalar(1) / s[2]; } center = (min + max) * btScalar(0.5); btAlignedObjectArray<Point32> points; points.resize(count); ptr = (const char*)coords; if (doubleCoords) { for (int32_t i = 0; i < count; i++) { const double* v = (const double*)ptr; btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); ptr += stride; p = (p - center) * s; points[i].x = (int32_t)p[medAxis]; points[i].y = (int32_t)p[maxAxis]; points[i].z = (int32_t)p[minAxis]; points[i].index = i; } } else { for (int32_t i = 0; i < count; i++) { const float* v = (const float*)ptr; btVector3 p(v[0], v[1], v[2]); ptr += stride; p = (p - center) * s; points[i].x = (int32_t)p[medAxis]; points[i].y = (int32_t)p[maxAxis]; points[i].z = (int32_t)p[minAxis]; points[i].index = i; } } points.quickSort(pointCmp); vertexPool.reset(); vertexPool.setArraySize(count); originalVertices.resize(count); for (int32_t i = 0; i < count; i++) { Vertex* v = vertexPool.newObject(); v->edges = NULL; v->point = points[i]; v->copy = -1; originalVertices[i] = v; } points.clear(); edgePool.reset(); edgePool.setArraySize(6 * count); usedEdgePairs = 0; maxUsedEdgePairs = 0; mergeStamp = -3; IntermediateHull hull; computeInternal(0, count, hull); vertexList = hull.minXy; #ifdef DEBUG_CONVEX_HULL printf("max. edges %d (3v = %d)", maxUsedEdgePairs, 3 * count); #endif } btVector3 btConvexHullInternal::toBtVector(const Point32& v) { btVector3 p; p[medAxis] = btScalar(v.x); p[maxAxis] = btScalar(v.y); p[minAxis] = btScalar(v.z); return p * scaling; } btVector3 btConvexHullInternal::getBtNormal(Face* face) { return toBtVector(face->dir0).cross(toBtVector(face->dir1)).normalized(); } btVector3 btConvexHullInternal::getCoordinates(const Vertex* v) { btVector3 p; p[medAxis] = v->xvalue(); p[maxAxis] = v->yvalue(); p[minAxis] = v->zvalue(); return p * scaling + center; } btScalar btConvexHullInternal::shrink(btScalar amount, btScalar clampAmount) { if (!vertexList) { return 0; } int32_t stamp = --mergeStamp; btAlignedObjectArray<Vertex*> stack; vertexList->copy = stamp; stack.push_back(vertexList); btAlignedObjectArray<Face*> faces; Point32 ref = vertexList->point; Int128 hullCenterX(0, 0); Int128 hullCenterY(0, 0); Int128 hullCenterZ(0, 0); Int128 volume(0, 0); while (stack.size() > 0) { Vertex* v = stack[stack.size() - 1]; stack.pop_back(); Edge* e = v->edges; if (e) { do { if (e->target->copy != stamp) { e->target->copy = stamp; stack.push_back(e->target); } if (e->copy != stamp) { Face* face = facePool.newObject(); face->init(e->target, e->reverse->prev->target, v); faces.push_back(face); Edge* f = e; Vertex* a = NULL; Vertex* b = NULL; do { if (a && b) { int64_t vol = (v->point - ref).dot((a->point - ref).cross(b->point - ref)); btAssert(vol >= 0); Point32 c = v->point + a->point + b->point + ref; hullCenterX += vol * c.x; hullCenterY += vol * c.y; hullCenterZ += vol * c.z; volume += vol; } btAssert(f->copy != stamp); f->copy = stamp; f->face = face; a = b; b = f->target; f = f->reverse->prev; } while (f != e); } e = e->next; } while (e != v->edges); } } if (volume.getSign() <= 0) { return 0; } btVector3 hullCenter; hullCenter[medAxis] = hullCenterX.toScalar(); hullCenter[maxAxis] = hullCenterY.toScalar(); hullCenter[minAxis] = hullCenterZ.toScalar(); hullCenter /= 4 * volume.toScalar(); hullCenter *= scaling; int32_t faceCount = faces.size(); if (clampAmount > 0) { btScalar minDist = SIMD_INFINITY; for (int32_t i = 0; i < faceCount; i++) { btVector3 normal = getBtNormal(faces[i]); btScalar dist = normal.dot(toBtVector(faces[i]->origin) - hullCenter); if (dist < minDist) { minDist = dist; } } if (minDist <= 0) { return 0; } amount = btMin(amount, minDist * clampAmount); } uint32_t seed = 243703; for (int32_t i = 0; i < faceCount; i++, seed = 1664525 * seed + 1013904223) { btSwap(faces[i], faces[seed % faceCount]); } for (int32_t i = 0; i < faceCount; i++) { if (!shiftFace(faces[i], amount, stack)) { return -amount; } } return amount; } bool btConvexHullInternal::shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack) { btVector3 origShift = getBtNormal(face) * -amount; if (scaling[0] != 0) { origShift[0] /= scaling[0]; } if (scaling[1] != 0) { origShift[1] /= scaling[1]; } if (scaling[2] != 0) { origShift[2] /= scaling[2]; } Point32 shift((int32_t)origShift[medAxis], (int32_t)origShift[maxAxis], (int32_t)origShift[minAxis]); if (shift.isZero()) { return true; } Point64 normal = face->getNormal(); #ifdef DEBUG_CONVEX_HULL printf("\nShrinking face (%d %d %d) (%d %d %d) (%d %d %d) by (%d %d %d)\n", face->origin.x, face->origin.y, face->origin.z, face->dir0.x, face->dir0.y, face->dir0.z, face->dir1.x, face->dir1.y, face->dir1.z, shift.x, shift.y, shift.z); #endif int64_t origDot = face->origin.dot(normal); Point32 shiftedOrigin = face->origin + shift; int64_t shiftedDot = shiftedOrigin.dot(normal); btAssert(shiftedDot <= origDot); if (shiftedDot >= origDot) { return false; } Edge* intersection = NULL; Edge* startEdge = face->nearbyVertex->edges; #ifdef DEBUG_CONVEX_HULL printf("Start edge is "); startEdge->print(); printf(", normal is (%lld %lld %lld), shifted dot is %lld\n", normal.x, normal.y, normal.z, shiftedDot); #endif Rational128 optDot = face->nearbyVertex->dot(normal); int32_t cmp = optDot.compare(shiftedDot); #ifdef SHOW_ITERATIONS int32_t n = 0; #endif if (cmp >= 0) { Edge* e = startEdge; do { #ifdef SHOW_ITERATIONS n++; #endif Rational128 dot = e->target->dot(normal); btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL printf("Moving downwards, edge is "); e->print(); printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif if (dot.compare(optDot) < 0) { int32_t c = dot.compare(shiftedDot); optDot = dot; e = e->reverse; startEdge = e; if (c < 0) { intersection = e; break; } cmp = c; } e = e->prev; } while (e != startEdge); if (!intersection) { return false; } } else { Edge* e = startEdge; do { #ifdef SHOW_ITERATIONS n++; #endif Rational128 dot = e->target->dot(normal); btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL printf("Moving upwards, edge is "); e->print(); printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif if (dot.compare(optDot) > 0) { cmp = dot.compare(shiftedDot); if (cmp >= 0) { intersection = e; break; } optDot = dot; e = e->reverse; startEdge = e; } e = e->prev; } while (e != startEdge); if (!intersection) { return true; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to find initial intersection\n", n); #endif if (cmp == 0) { Edge* e = intersection->reverse->next; #ifdef SHOW_ITERATIONS n = 0; #endif while (e->target->dot(normal).compare(shiftedDot) <= 0) { #ifdef SHOW_ITERATIONS n++; #endif e = e->next; if (e == intersection->reverse) { return true; } #ifdef DEBUG_CONVEX_HULL printf("Checking for outwards edge, current edge is "); e->print(); printf("\n"); #endif } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to check for complete containment\n", n); #endif } Edge* firstIntersection = NULL; Edge* faceEdge = NULL; Edge* firstFaceEdge = NULL; #ifdef SHOW_ITERATIONS int32_t m = 0; #endif while (true) { #ifdef SHOW_ITERATIONS m++; #endif #ifdef DEBUG_CONVEX_HULL printf("Intersecting edge is "); intersection->print(); printf("\n"); #endif if (cmp == 0) { Edge* e = intersection->reverse->next; startEdge = e; #ifdef SHOW_ITERATIONS n = 0; #endif while (true) { #ifdef SHOW_ITERATIONS n++; #endif if (e->target->dot(normal).compare(shiftedDot) >= 0) { break; } intersection = e->reverse; e = e->next; if (e == startEdge) { return true; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to advance intersection\n", n); #endif } #ifdef DEBUG_CONVEX_HULL printf("Advanced intersecting edge to "); intersection->print(); printf(", cmp = %d\n", cmp); #endif if (!firstIntersection) { firstIntersection = intersection; } else if (intersection == firstIntersection) { break; } int32_t prevCmp = cmp; Edge* prevIntersection = intersection; Edge* prevFaceEdge = faceEdge; Edge* e = intersection->reverse; #ifdef SHOW_ITERATIONS n = 0; #endif while (true) { #ifdef SHOW_ITERATIONS n++; #endif e = e->reverse->prev; btAssert(e != intersection->reverse); cmp = e->target->dot(normal).compare(shiftedDot); #ifdef DEBUG_CONVEX_HULL printf("Testing edge "); e->print(); printf(" -> cmp = %d\n", cmp); #endif if (cmp >= 0) { intersection = e; break; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to find other intersection of face\n", n); #endif if (cmp > 0) { Vertex* removed = intersection->target; e = intersection->reverse; if (e->prev == e) { removed->edges = NULL; } else { removed->edges = e->prev; e->prev->link(e->next); e->link(e); } #ifdef DEBUG_CONVEX_HULL printf("1: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif Point64 n0 = intersection->face->getNormal(); Point64 n1 = intersection->reverse->face->getNormal(); int64_t m00 = face->dir0.dot(n0); int64_t m01 = face->dir1.dot(n0); int64_t m10 = face->dir0.dot(n1); int64_t m11 = face->dir1.dot(n1); int64_t r0 = (intersection->face->origin - shiftedOrigin).dot(n0); int64_t r1 = (intersection->reverse->face->origin - shiftedOrigin).dot(n1); Int128 det = Int128::mul(m00, m11) - Int128::mul(m01, m10); btAssert(det.getSign() != 0); Vertex* v = vertexPool.newObject(); v->point.index = -1; v->copy = -1; v->point128 = PointR128(Int128::mul(face->dir0.x * r0, m11) - Int128::mul(face->dir0.x * r1, m01) + Int128::mul(face->dir1.x * r1, m00) - Int128::mul(face->dir1.x * r0, m10) + det * shiftedOrigin.x, Int128::mul(face->dir0.y * r0, m11) - Int128::mul(face->dir0.y * r1, m01) + Int128::mul(face->dir1.y * r1, m00) - Int128::mul(face->dir1.y * r0, m10) + det * shiftedOrigin.y, Int128::mul(face->dir0.z * r0, m11) - Int128::mul(face->dir0.z * r1, m01) + Int128::mul(face->dir1.z * r1, m00) - Int128::mul(face->dir1.z * r0, m10) + det * shiftedOrigin.z, det); v->point.x = (int32_t)v->point128.xvalue(); v->point.y = (int32_t)v->point128.yvalue(); v->point.z = (int32_t)v->point128.zvalue(); intersection->target = v; v->edges = e; stack.push_back(v); stack.push_back(removed); stack.push_back(NULL); } if (cmp || prevCmp || (prevIntersection->reverse->next->target != intersection->target)) { faceEdge = newEdgePair(prevIntersection->target, intersection->target); if (prevCmp == 0) { faceEdge->link(prevIntersection->reverse->next); } if ((prevCmp == 0) || prevFaceEdge) { prevIntersection->reverse->link(faceEdge); } if (cmp == 0) { intersection->reverse->prev->link(faceEdge->reverse); } faceEdge->reverse->link(intersection->reverse); } else { faceEdge = prevIntersection->reverse->next; } if (prevFaceEdge) { if (prevCmp > 0) { faceEdge->link(prevFaceEdge->reverse); } else if (faceEdge != prevFaceEdge->reverse) { stack.push_back(prevFaceEdge->target); while (faceEdge->next != prevFaceEdge->reverse) { Vertex* removed = faceEdge->next->target; removeEdgePair(faceEdge->next); stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL printf("2: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif } stack.push_back(NULL); } } faceEdge->face = face; faceEdge->reverse->face = intersection->face; if (!firstFaceEdge) { firstFaceEdge = faceEdge; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to process all intersections\n", m); #endif if (cmp > 0) { firstFaceEdge->reverse->target = faceEdge->target; firstIntersection->reverse->link(firstFaceEdge); firstFaceEdge->link(faceEdge->reverse); } else if (firstFaceEdge != faceEdge->reverse) { stack.push_back(faceEdge->target); while (firstFaceEdge->next != faceEdge->reverse) { Vertex* removed = firstFaceEdge->next->target; removeEdgePair(firstFaceEdge->next); stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL printf("3: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif } stack.push_back(NULL); } btAssert(stack.size() > 0); vertexList = stack[0]; #ifdef DEBUG_CONVEX_HULL printf("Removing part\n"); #endif #ifdef SHOW_ITERATIONS n = 0; #endif int32_t pos = 0; while (pos < stack.size()) { int32_t end = stack.size(); while (pos < end) { Vertex* kept = stack[pos++]; #ifdef DEBUG_CONVEX_HULL kept->print(); #endif bool deeper = false; Vertex* removed; while ((removed = stack[pos++]) != NULL) { #ifdef SHOW_ITERATIONS n++; #endif kept->receiveNearbyFaces(removed); while (removed->edges) { if (!deeper) { deeper = true; stack.push_back(kept); } stack.push_back(removed->edges->target); removeEdgePair(removed->edges); } } if (deeper) { stack.push_back(NULL); } } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to remove part\n", n); #endif stack.resize(0); face->origin = shiftedOrigin; return true; } static int32_t getVertexCopy(btConvexHullInternal::Vertex* vertex, btAlignedObjectArray<btConvexHullInternal::Vertex*>& vertices) { int32_t index = vertex->copy; if (index < 0) { index = vertices.size(); vertex->copy = index; vertices.push_back(vertex); #ifdef DEBUG_CONVEX_HULL printf("Vertex %d gets index *%d\n", vertex->point.index, index); #endif } return index; } btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { if (count <= 0) { vertices.clear(); edges.clear(); faces.clear(); return 0; } btConvexHullInternal hull; hull.compute(coords, doubleCoords, stride, count); btScalar shift = 0; if ((shrink > 0) && ((shift = hull.shrink(shrink, shrinkClamp)) < 0)) { vertices.clear(); edges.clear(); faces.clear(); return shift; } vertices.resize(0); edges.resize(0); faces.resize(0); btAlignedObjectArray<btConvexHullInternal::Vertex*> oldVertices; getVertexCopy(hull.vertexList, oldVertices); int32_t copied = 0; while (copied < oldVertices.size()) { btConvexHullInternal::Vertex* v = oldVertices[copied]; vertices.push_back(hull.getCoordinates(v)); btConvexHullInternal::Edge* firstEdge = v->edges; if (firstEdge) { int32_t firstCopy = -1; int32_t prevCopy = -1; btConvexHullInternal::Edge* e = firstEdge; do { if (e->copy < 0) { int32_t s = edges.size(); edges.push_back(Edge()); edges.push_back(Edge()); Edge* c = &edges[s]; Edge* r = &edges[s + 1]; e->copy = s; e->reverse->copy = s + 1; c->reverse = 1; r->reverse = -1; c->targetVertex = getVertexCopy(e->target, oldVertices); r->targetVertex = copied; #ifdef DEBUG_CONVEX_HULL printf(" CREATE: Vertex *%d has edge to *%d\n", copied, c->getTargetVertex()); #endif } if (prevCopy >= 0) { edges[e->copy].next = prevCopy - e->copy; } else { firstCopy = e->copy; } prevCopy = e->copy; e = e->next; } while (e != firstEdge); edges[firstCopy].next = prevCopy - firstCopy; } copied++; } for (int32_t i = 0; i < copied; i++) { btConvexHullInternal::Vertex* v = oldVertices[i]; btConvexHullInternal::Edge* firstEdge = v->edges; if (firstEdge) { btConvexHullInternal::Edge* e = firstEdge; do { if (e->copy >= 0) { #ifdef DEBUG_CONVEX_HULL printf("Vertex *%d has edge to *%d\n", i, edges[e->copy].getTargetVertex()); #endif faces.push_back(e->copy); btConvexHullInternal::Edge* f = e; do { #ifdef DEBUG_CONVEX_HULL printf(" Face *%d\n", edges[f->copy].getTargetVertex()); #endif f->copy = -1; f = f->reverse->prev; } while (f != e); } e = e->next; } while (e != firstEdge); } } return shift; }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdManifoldMesh.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "vhacdManifoldMesh.h" namespace VHACD { TMMVertex::TMMVertex(void) { Initialize(); } void TMMVertex::Initialize() { m_name = 0; m_id = 0; m_duplicate = 0; m_onHull = false; m_tag = false; } TMMVertex::~TMMVertex(void) { } TMMEdge::TMMEdge(void) { Initialize(); } void TMMEdge::Initialize() { m_id = 0; m_triangles[0] = m_triangles[1] = m_newFace = 0; m_vertices[0] = m_vertices[1] = 0; } TMMEdge::~TMMEdge(void) { } void TMMTriangle::Initialize() { m_id = 0; for (int32_t i = 0; i < 3; i++) { m_edges[i] = 0; m_vertices[0] = 0; } m_visible = false; } TMMTriangle::TMMTriangle(void) { Initialize(); } TMMTriangle::~TMMTriangle(void) { } TMMesh::TMMesh() { } TMMesh::~TMMesh(void) { } void TMMesh::GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles) { size_t nV = m_vertices.GetSize(); size_t nT = m_triangles.GetSize(); for (size_t v = 0; v < nV; v++) { points[v] = m_vertices.GetData().m_pos; m_vertices.GetData().m_id = v; m_vertices.Next(); } for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_triangles.GetData(); triangles[f].X() = static_cast<int32_t>(currentTriangle.m_vertices[0]->GetData().m_id); triangles[f].Y() = static_cast<int32_t>(currentTriangle.m_vertices[1]->GetData().m_id); triangles[f].Z() = static_cast<int32_t>(currentTriangle.m_vertices[2]->GetData().m_id); m_triangles.Next(); } } void TMMesh::Clear() { m_vertices.Clear(); m_edges.Clear(); m_triangles.Clear(); } void TMMesh::Copy(TMMesh& mesh) { Clear(); // updating the id's size_t nV = mesh.m_vertices.GetSize(); size_t nE = mesh.m_edges.GetSize(); size_t nT = mesh.m_triangles.GetSize(); for (size_t v = 0; v < nV; v++) { mesh.m_vertices.GetData().m_id = v; mesh.m_vertices.Next(); } for (size_t e = 0; e < nE; e++) { mesh.m_edges.GetData().m_id = e; mesh.m_edges.Next(); } for (size_t f = 0; f < nT; f++) { mesh.m_triangles.GetData().m_id = f; mesh.m_triangles.Next(); } // copying data m_vertices = mesh.m_vertices; m_edges = mesh.m_edges; m_triangles = mesh.m_triangles; // generate mapping CircularListElement<TMMVertex>** vertexMap = new CircularListElement<TMMVertex>*[nV]; CircularListElement<TMMEdge>** edgeMap = new CircularListElement<TMMEdge>*[nE]; CircularListElement<TMMTriangle>** triangleMap = new CircularListElement<TMMTriangle>*[nT]; for (size_t v = 0; v < nV; v++) { vertexMap[v] = m_vertices.GetHead(); m_vertices.Next(); } for (size_t e = 0; e < nE; e++) { edgeMap[e] = m_edges.GetHead(); m_edges.Next(); } for (size_t f = 0; f < nT; f++) { triangleMap[f] = m_triangles.GetHead(); m_triangles.Next(); } // updating pointers for (size_t v = 0; v < nV; v++) { if (vertexMap[v]->GetData().m_duplicate) { vertexMap[v]->GetData().m_duplicate = edgeMap[vertexMap[v]->GetData().m_duplicate->GetData().m_id]; } } for (size_t e = 0; e < nE; e++) { if (edgeMap[e]->GetData().m_newFace) { edgeMap[e]->GetData().m_newFace = triangleMap[edgeMap[e]->GetData().m_newFace->GetData().m_id]; } if (nT > 0) { for (int32_t f = 0; f < 2; f++) { if (edgeMap[e]->GetData().m_triangles[f]) { edgeMap[e]->GetData().m_triangles[f] = triangleMap[edgeMap[e]->GetData().m_triangles[f]->GetData().m_id]; } } } for (int32_t v = 0; v < 2; v++) { if (edgeMap[e]->GetData().m_vertices[v]) { edgeMap[e]->GetData().m_vertices[v] = vertexMap[edgeMap[e]->GetData().m_vertices[v]->GetData().m_id]; } } } for (size_t f = 0; f < nT; f++) { if (nE > 0) { for (int32_t e = 0; e < 3; e++) { if (triangleMap[f]->GetData().m_edges[e]) { triangleMap[f]->GetData().m_edges[e] = edgeMap[triangleMap[f]->GetData().m_edges[e]->GetData().m_id]; } } } for (int32_t v = 0; v < 3; v++) { if (triangleMap[f]->GetData().m_vertices[v]) { triangleMap[f]->GetData().m_vertices[v] = vertexMap[triangleMap[f]->GetData().m_vertices[v]->GetData().m_id]; } } } delete[] vertexMap; delete[] edgeMap; delete[] triangleMap; } bool TMMesh::CheckConsistancy() { size_t nE = m_edges.GetSize(); size_t nT = m_triangles.GetSize(); for (size_t e = 0; e < nE; e++) { for (int32_t f = 0; f < 2; f++) { if (!m_edges.GetHead()->GetData().m_triangles[f]) { return false; } } m_edges.Next(); } for (size_t f = 0; f < nT; f++) { for (int32_t e = 0; e < 3; e++) { int32_t found = 0; for (int32_t k = 0; k < 2; k++) { if (m_triangles.GetHead()->GetData().m_edges[e]->GetData().m_triangles[k] == m_triangles.GetHead()) { found++; } } if (found != 1) { return false; } } m_triangles.Next(); } return true; } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdICHull.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "vhacdICHull.h" #include <limits> #ifdef _MSC_VER #pragma warning(disable:4456 4706) #endif namespace VHACD { const double ICHull::sc_eps = 1.0e-15; const int32_t ICHull::sc_dummyIndex = std::numeric_limits<int32_t>::max(); ICHull::ICHull() { m_isFlat = false; } bool ICHull::AddPoints(const Vec3<double>* points, size_t nPoints) { if (!points) { return false; } CircularListElement<TMMVertex>* vertex = NULL; for (size_t i = 0; i < nPoints; i++) { vertex = m_mesh.AddVertex(); vertex->GetData().m_pos.X() = points[i].X(); vertex->GetData().m_pos.Y() = points[i].Y(); vertex->GetData().m_pos.Z() = points[i].Z(); vertex->GetData().m_name = static_cast<int32_t>(i); } return true; } bool ICHull::AddPoint(const Vec3<double>& point, int32_t id) { if (AddPoints(&point, 1)) { m_mesh.m_vertices.GetData().m_name = id; return true; } return false; } ICHullError ICHull::Process() { uint32_t addedPoints = 0; if (m_mesh.GetNVertices() < 3) { return ICHullErrorNotEnoughPoints; } if (m_mesh.GetNVertices() == 3) { m_isFlat = true; CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle(); CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle(); CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead(); CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); t1->GetData().m_vertices[0] = v0; t1->GetData().m_vertices[1] = v1; t1->GetData().m_vertices[2] = v2; t2->GetData().m_vertices[0] = v1; t2->GetData().m_vertices[1] = v2; t2->GetData().m_vertices[2] = v2; return ICHullErrorOK; } if (m_isFlat) { m_mesh.m_edges.Clear(); m_mesh.m_triangles.Clear(); m_isFlat = false; } if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron { ICHullError res = DoubleTriangle(); if (res != ICHullErrorOK) { return res; } else { addedPoints += 3; } } CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); // go to the first added and not processed vertex while (!(vertices.GetHead()->GetPrev()->GetData().m_tag)) { vertices.Prev(); } while (!vertices.GetData().m_tag) // not processed { vertices.GetData().m_tag = true; if (ProcessPoint()) { addedPoints++; CleanUp(addedPoints); vertices.Next(); if (!GetMesh().CheckConsistancy()) { size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); break; } vertices.Next(); } return ICHullErrorInconsistent; } } } if (m_isFlat) { SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate; size_t nT = m_mesh.GetNTriangles(); for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData(); if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) { m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead()); for (int32_t k = 0; k < 3; k++) { for (int32_t h = 0; h < 2; h++) { if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) { currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0; break; } } } } else { trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead()); } m_mesh.m_triangles.Next(); } size_t nE = m_mesh.GetNEdges(); for (size_t e = 0; e < nE; e++) { TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData(); if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) { m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead()); } m_mesh.m_edges.Next(); } size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); } else { vertices.GetData().m_tag = false; vertices.Next(); } } CleanEdges(); CleanTriangles(); CircularListElement<TMMTriangle>* newTriangle; for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) { newTriangle = m_mesh.AddTriangle(); newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1]; newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0]; newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2]; } } return ICHullErrorOK; } ICHullError ICHull::Process(const uint32_t nPointsCH, const double minVolume) { uint32_t addedPoints = 0; if (nPointsCH < 3 || m_mesh.GetNVertices() < 3) { return ICHullErrorNotEnoughPoints; } if (m_mesh.GetNVertices() == 3) { m_isFlat = true; CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle(); CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle(); CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead(); CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); t1->GetData().m_vertices[0] = v0; t1->GetData().m_vertices[1] = v1; t1->GetData().m_vertices[2] = v2; t2->GetData().m_vertices[0] = v1; t2->GetData().m_vertices[1] = v0; t2->GetData().m_vertices[2] = v2; return ICHullErrorOK; } if (m_isFlat) { m_mesh.m_triangles.Clear(); m_mesh.m_edges.Clear(); m_isFlat = false; } if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron { ICHullError res = DoubleTriangle(); if (res != ICHullErrorOK) { return res; } else { addedPoints += 3; } } CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); while (!vertices.GetData().m_tag && addedPoints < nPointsCH) // not processed { if (!FindMaxVolumePoint((addedPoints > 4) ? minVolume : 0.0)) { break; } vertices.GetData().m_tag = true; if (ProcessPoint()) { addedPoints++; CleanUp(addedPoints); if (!GetMesh().CheckConsistancy()) { size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); break; } vertices.Next(); } return ICHullErrorInconsistent; } vertices.Next(); } } // delete remaining points while (!vertices.GetData().m_tag) { vertices.Delete(); } if (m_isFlat) { SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate; size_t nT = m_mesh.GetNTriangles(); for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData(); if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) { m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead()); for (int32_t k = 0; k < 3; k++) { for (int32_t h = 0; h < 2; h++) { if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) { currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0; break; } } } } else { trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead()); } m_mesh.m_triangles.Next(); } size_t nE = m_mesh.GetNEdges(); for (size_t e = 0; e < nE; e++) { TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData(); if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) { m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead()); } m_mesh.m_edges.Next(); } size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); } else { vertices.GetData().m_tag = false; vertices.Next(); } } CleanEdges(); CleanTriangles(); CircularListElement<TMMTriangle>* newTriangle; for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) { newTriangle = m_mesh.AddTriangle(); newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1]; newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0]; newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2]; } } return ICHullErrorOK; } bool ICHull::FindMaxVolumePoint(const double minVolume) { CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vMaxVolume = 0; CircularListElement<TMMVertex>* vHeadPrev = vertices.GetHead()->GetPrev(); double maxVolume = minVolume; double volume = 0.0; while (!vertices.GetData().m_tag) // not processed { if (ComputePointVolume(volume, false)) { if (maxVolume < volume) { maxVolume = volume; vMaxVolume = vertices.GetHead(); } vertices.Next(); } } CircularListElement<TMMVertex>* vHead = vHeadPrev->GetNext(); vertices.GetHead() = vHead; if (!vMaxVolume) { return false; } if (vMaxVolume != vHead) { Vec3<double> pos = vHead->GetData().m_pos; int32_t id = vHead->GetData().m_name; vHead->GetData().m_pos = vMaxVolume->GetData().m_pos; vHead->GetData().m_name = vMaxVolume->GetData().m_name; vMaxVolume->GetData().m_pos = pos; vHead->GetData().m_name = id; } return true; } ICHullError ICHull::DoubleTriangle() { // find three non colinear points m_isFlat = false; CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* v0 = vertices.GetHead(); while (Colinear(v0->GetData().m_pos, v0->GetNext()->GetData().m_pos, v0->GetNext()->GetNext()->GetData().m_pos)) { if ((v0 = v0->GetNext()) == vertices.GetHead()) { return ICHullErrorCoplanarPoints; } } CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // mark points as processed v0->GetData().m_tag = v1->GetData().m_tag = v2->GetData().m_tag = true; // create two triangles CircularListElement<TMMTriangle>* f0 = MakeFace(v0, v1, v2, 0); MakeFace(v2, v1, v0, f0); // find a fourth non-coplanar point to form tetrahedron CircularListElement<TMMVertex>* v3 = v2->GetNext(); vertices.GetHead() = v3; double vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos); while (fabs(vol) < sc_eps && !v3->GetNext()->GetData().m_tag) { v3 = v3->GetNext(); vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos); } if (fabs(vol) < sc_eps) { // compute the barycenter Vec3<double> bary(0.0, 0.0, 0.0); CircularListElement<TMMVertex>* vBary = v0; do { bary += vBary->GetData().m_pos; } while ((vBary = vBary->GetNext()) != v0); bary /= static_cast<double>(vertices.GetSize()); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); // add dummy vertex placed at (bary + normal) vertices.GetHead() = v2; Vec3<double> newPt = bary + m_normal; AddPoint(newPt, sc_dummyIndex); m_isFlat = true; return ICHullErrorOK; } else if (v3 != vertices.GetHead()) { TMMVertex temp; temp.m_name = v3->GetData().m_name; temp.m_pos = v3->GetData().m_pos; v3->GetData().m_name = vertices.GetHead()->GetData().m_name; v3->GetData().m_pos = vertices.GetHead()->GetData().m_pos; vertices.GetHead()->GetData().m_name = temp.m_name; vertices.GetHead()->GetData().m_pos = temp.m_pos; } return ICHullErrorOK; } CircularListElement<TMMTriangle>* ICHull::MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold) { CircularListElement<TMMEdge>* e0; CircularListElement<TMMEdge>* e1; CircularListElement<TMMEdge>* e2; int32_t index = 0; if (!fold) // if first face to be created { e0 = m_mesh.AddEdge(); // create the three edges e1 = m_mesh.AddEdge(); e2 = m_mesh.AddEdge(); } else // otherwise re-use existing edges (in reverse order) { e0 = fold->GetData().m_edges[2]; e1 = fold->GetData().m_edges[1]; e2 = fold->GetData().m_edges[0]; index = 1; } e0->GetData().m_vertices[0] = v0; e0->GetData().m_vertices[1] = v1; e1->GetData().m_vertices[0] = v1; e1->GetData().m_vertices[1] = v2; e2->GetData().m_vertices[0] = v2; e2->GetData().m_vertices[1] = v0; // create the new face CircularListElement<TMMTriangle>* f = m_mesh.AddTriangle(); f->GetData().m_edges[0] = e0; f->GetData().m_edges[1] = e1; f->GetData().m_edges[2] = e2; f->GetData().m_vertices[0] = v0; f->GetData().m_vertices[1] = v1; f->GetData().m_vertices[2] = v2; // link edges to face f e0->GetData().m_triangles[index] = e1->GetData().m_triangles[index] = e2->GetData().m_triangles[index] = f; return f; } CircularListElement<TMMTriangle>* ICHull::MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* p) { // create two new edges if they don't already exist CircularListElement<TMMEdge>* newEdges[2]; for (int32_t i = 0; i < 2; ++i) { if (!(newEdges[i] = e->GetData().m_vertices[i]->GetData().m_duplicate)) { // if the edge doesn't exits add it and mark the vertex as duplicated newEdges[i] = m_mesh.AddEdge(); newEdges[i]->GetData().m_vertices[0] = e->GetData().m_vertices[i]; newEdges[i]->GetData().m_vertices[1] = p; e->GetData().m_vertices[i]->GetData().m_duplicate = newEdges[i]; } } // make the new face CircularListElement<TMMTriangle>* newFace = m_mesh.AddTriangle(); newFace->GetData().m_edges[0] = e; newFace->GetData().m_edges[1] = newEdges[0]; newFace->GetData().m_edges[2] = newEdges[1]; MakeCCW(newFace, e, p); for (int32_t i = 0; i < 2; ++i) { for (int32_t j = 0; j < 2; ++j) { if (!newEdges[i]->GetData().m_triangles[j]) { newEdges[i]->GetData().m_triangles[j] = newFace; break; } } } return newFace; } bool ICHull::ComputePointVolume(double& totalVolume, bool markVisibleFaces) { // mark visible faces CircularListElement<TMMTriangle>* fHead = m_mesh.GetTriangles().GetHead(); CircularListElement<TMMTriangle>* f = fHead; CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vertex0 = vertices.GetHead(); bool visible = false; Vec3<double> pos0 = Vec3<double>(vertex0->GetData().m_pos.X(), vertex0->GetData().m_pos.Y(), vertex0->GetData().m_pos.Z()); double vol = 0.0; totalVolume = 0.0; Vec3<double> ver0, ver1, ver2; do { ver0.X() = f->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = f->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = f->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = f->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = f->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = f->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = f->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = f->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = f->GetData().m_vertices[2]->GetData().m_pos.Z(); vol = ComputeVolume4(ver0, ver1, ver2, pos0); if (vol < -sc_eps) { vol = fabs(vol); totalVolume += vol; if (markVisibleFaces) { f->GetData().m_visible = true; m_trianglesToDelete.PushBack(f); } visible = true; } f = f->GetNext(); } while (f != fHead); if (m_trianglesToDelete.Size() == m_mesh.m_triangles.GetSize()) { for (size_t i = 0; i < m_trianglesToDelete.Size(); i++) { m_trianglesToDelete[i]->GetData().m_visible = false; } visible = false; } // if no faces visible from p then p is inside the hull if (!visible && markVisibleFaces) { vertices.Delete(); m_trianglesToDelete.Resize(0); return false; } return true; } bool ICHull::ProcessPoint() { double totalVolume = 0.0; if (!ComputePointVolume(totalVolume, true)) { return false; } // Mark edges in interior of visible region for deletion. // Create a new face based on each border edge CircularListElement<TMMVertex>* v0 = m_mesh.GetVertices().GetHead(); CircularListElement<TMMEdge>* eHead = m_mesh.GetEdges().GetHead(); CircularListElement<TMMEdge>* e = eHead; CircularListElement<TMMEdge>* tmp = 0; int32_t nvisible = 0; m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); do { tmp = e->GetNext(); nvisible = 0; for (int32_t k = 0; k < 2; k++) { if (e->GetData().m_triangles[k]->GetData().m_visible) { nvisible++; } } if (nvisible == 2) { m_edgesToDelete.PushBack(e); } else if (nvisible == 1) { e->GetData().m_newFace = MakeConeFace(e, v0); m_edgesToUpdate.PushBack(e); } e = tmp; } while (e != eHead); return true; } bool ICHull::MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v) { // the visible face adjacent to e CircularListElement<TMMTriangle>* fv; if (e->GetData().m_triangles[0]->GetData().m_visible) { fv = e->GetData().m_triangles[0]; } else { fv = e->GetData().m_triangles[1]; } // set vertex[0] and vertex[1] to have the same orientation as the corresponding vertices of fv. int32_t i; // index of e->m_vertices[0] in fv CircularListElement<TMMVertex>* v0 = e->GetData().m_vertices[0]; CircularListElement<TMMVertex>* v1 = e->GetData().m_vertices[1]; for (i = 0; fv->GetData().m_vertices[i] != v0; i++) ; if (fv->GetData().m_vertices[(i + 1) % 3] != e->GetData().m_vertices[1]) { f->GetData().m_vertices[0] = v1; f->GetData().m_vertices[1] = v0; } else { f->GetData().m_vertices[0] = v0; f->GetData().m_vertices[1] = v1; // swap edges CircularListElement<TMMEdge>* tmp = f->GetData().m_edges[0]; f->GetData().m_edges[0] = f->GetData().m_edges[1]; f->GetData().m_edges[1] = tmp; } f->GetData().m_vertices[2] = v; return true; } bool ICHull::CleanUp(uint32_t& addedPoints) { bool r0 = CleanEdges(); bool r1 = CleanTriangles(); bool r2 = CleanVertices(addedPoints); return r0 && r1 && r2; } bool ICHull::CleanEdges() { // integrate the new faces into the data structure CircularListElement<TMMEdge>* e; const size_t ne_update = m_edgesToUpdate.Size(); for (size_t i = 0; i < ne_update; ++i) { e = m_edgesToUpdate[i]; if (e->GetData().m_newFace) { if (e->GetData().m_triangles[0]->GetData().m_visible) { e->GetData().m_triangles[0] = e->GetData().m_newFace; } else { e->GetData().m_triangles[1] = e->GetData().m_newFace; } e->GetData().m_newFace = 0; } } // delete edges maked for deletion CircularList<TMMEdge>& edges = m_mesh.GetEdges(); const size_t ne_delete = m_edgesToDelete.Size(); for (size_t i = 0; i < ne_delete; ++i) { edges.Delete(m_edgesToDelete[i]); } m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); return true; } bool ICHull::CleanTriangles() { CircularList<TMMTriangle>& triangles = m_mesh.GetTriangles(); const size_t nt_delete = m_trianglesToDelete.Size(); for (size_t i = 0; i < nt_delete; ++i) { triangles.Delete(m_trianglesToDelete[i]); } m_trianglesToDelete.Resize(0); return true; } bool ICHull::CleanVertices(uint32_t& addedPoints) { // mark all vertices incident to some undeleted edge as on the hull CircularList<TMMEdge>& edges = m_mesh.GetEdges(); CircularListElement<TMMEdge>* e = edges.GetHead(); size_t nE = edges.GetSize(); for (size_t i = 0; i < nE; i++) { e->GetData().m_vertices[0]->GetData().m_onHull = true; e->GetData().m_vertices[1]->GetData().m_onHull = true; e = e->GetNext(); } // delete all the vertices that have been processed but are not on the hull CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vHead = vertices.GetHead(); CircularListElement<TMMVertex>* v = vHead; v = v->GetPrev(); do { if (v->GetData().m_tag && !v->GetData().m_onHull) { CircularListElement<TMMVertex>* tmp = v->GetPrev(); vertices.Delete(v); v = tmp; addedPoints--; } else { v->GetData().m_duplicate = 0; v->GetData().m_onHull = false; v = v->GetPrev(); } } while (v->GetData().m_tag && v != vHead); return true; } void ICHull::Clear() { m_mesh.Clear(); m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); m_trianglesToDelete.Resize(0); m_isFlat = false; } const ICHull& ICHull::operator=(ICHull& rhs) { if (&rhs != this) { m_mesh.Copy(rhs.m_mesh); m_edgesToDelete = rhs.m_edgesToDelete; m_edgesToUpdate = rhs.m_edgesToUpdate; m_trianglesToDelete = rhs.m_trianglesToDelete; m_isFlat = rhs.m_isFlat; } return (*this); } bool ICHull::IsInside(const Vec3<double>& pt0, const double eps) { const Vec3<double> pt(pt0.X(), pt0.Y(), pt0.Z()); if (m_isFlat) { size_t nT = m_mesh.m_triangles.GetSize(); Vec3<double> ver0, ver1, ver2, a, b, c; double u, v; for (size_t t = 0; t < nT; t++) { ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z(); a = ver1 - ver0; b = ver2 - ver0; c = pt - ver0; u = c * a; v = c * b; if (u >= 0.0 && u <= 1.0 && v >= 0.0 && u + v <= 1.0) { return true; } m_mesh.m_triangles.Next(); } return false; } else { size_t nT = m_mesh.m_triangles.GetSize(); Vec3<double> ver0, ver1, ver2; double vol; for (size_t t = 0; t < nT; t++) { ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z(); vol = ComputeVolume4(ver0, ver1, ver2, pt); if (vol < eps) { return false; } m_mesh.m_triangles.Next(); } return true; } } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdRaycastMesh.cpp
#include "vhacdRaycastMesh.h" #include <math.h> #include <assert.h> namespace RAYCAST_MESH { /* a = b - c */ #define vector(a,b,c) \ (a)[0] = (b)[0] - (c)[0]; \ (a)[1] = (b)[1] - (c)[1]; \ (a)[2] = (b)[2] - (c)[2]; #define innerProduct(v,q) \ ((v)[0] * (q)[0] + \ (v)[1] * (q)[1] + \ (v)[2] * (q)[2]) #define crossProduct(a,b,c) \ (a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \ (a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \ (a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1]; static inline bool rayIntersectsTriangle(const double *p,const double *d,const double *v0,const double *v1,const double *v2,double &t) { double e1[3],e2[3],h[3],s[3],q[3]; double a,f,u,v; vector(e1,v1,v0); vector(e2,v2,v0); crossProduct(h,d,e2); a = innerProduct(e1,h); if (a > -0.00001 && a < 0.00001) return(false); f = 1/a; vector(s,p,v0); u = f * (innerProduct(s,h)); if (u < 0.0 || u > 1.0) return(false); crossProduct(q,s,e1); v = f * innerProduct(d,q); if (v < 0.0 || u + v > 1.0) return(false); // at this stage we can compute t to find out where // the intersection point is on the line t = f * innerProduct(e2,q); if (t > 0) // ray intersection return(true); else // this means that there is a line intersection // but not a ray intersection return (false); } static double getPointDistance(const double *p1, const double *p2) { double dx = p1[0] - p2[0]; double dy = p1[1] - p2[1]; double dz = p1[2] - p2[2]; return sqrt(dx*dx + dy*dy + dz*dz); } class MyRaycastMesh : public VHACD::RaycastMesh { public: template <class T> MyRaycastMesh(uint32_t vcount, const T *vertices, uint32_t tcount, const uint32_t *indices) { mVcount = vcount; mVertices = new double[mVcount * 3]; for (uint32_t i = 0; i < mVcount; i++) { mVertices[i * 3 + 0] = vertices[0]; mVertices[i * 3 + 1] = vertices[1]; mVertices[i * 3 + 2] = vertices[2]; vertices += 3; } mTcount = tcount; mIndices = new uint32_t[mTcount * 3]; for (uint32_t i = 0; i < mTcount; i++) { mIndices[i * 3 + 0] = indices[0]; mIndices[i * 3 + 1] = indices[1]; mIndices[i * 3 + 2] = indices[2]; indices += 3; } } ~MyRaycastMesh(void) { delete[]mVertices; delete[]mIndices; } virtual void release(void) { delete this; } virtual bool raycast(const double *from, // The starting point of the raycast const double *to, // The ending point of the raycast const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point) double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location double *hitDistance) final // The distance the ray traveled to the hit location { bool ret = false; double dir[3]; dir[0] = to[0] - from[0]; dir[1] = to[1] - from[1]; dir[2] = to[2] - from[2]; double distance = sqrt( dir[0]*dir[0] + dir[1]*dir[1]+dir[2]*dir[2] ); if ( distance < 0.0000000001f ) return false; double recipDistance = 1.0f / distance; dir[0]*=recipDistance; dir[1]*=recipDistance; dir[2]*=recipDistance; const uint32_t *indices = mIndices; const double *vertices = mVertices; double nearestDistance = distance; for (uint32_t tri=0; tri<mTcount; tri++) { uint32_t i1 = indices[tri*3+0]; uint32_t i2 = indices[tri*3+1]; uint32_t i3 = indices[tri*3+2]; const double *p1 = &vertices[i1*3]; const double *p2 = &vertices[i2*3]; const double *p3 = &vertices[i3*3]; double t; if ( rayIntersectsTriangle(from,dir,p1,p2,p3,t)) { double hitPos[3]; hitPos[0] = from[0] + dir[0] * t; hitPos[1] = from[1] + dir[1] * t; hitPos[2] = from[2] + dir[2] * t; double pointDistance = getPointDistance(hitPos, closestToPoint); if (pointDistance < nearestDistance ) { nearestDistance = pointDistance; if ( hitLocation ) { hitLocation[0] = hitPos[0]; hitLocation[1] = hitPos[1]; hitLocation[2] = hitPos[2]; } if ( hitDistance ) { *hitDistance = pointDistance; } ret = true; } } } return ret; } uint32_t mVcount; double *mVertices; uint32_t mTcount; uint32_t *mIndices; }; }; using namespace RAYCAST_MESH; namespace VHACD { RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... { MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices); return static_cast<RaycastMesh *>(m); } RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... { MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices); return static_cast<RaycastMesh *>(m); } } // end of VHACD namespace
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdMesh.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include "btConvexHullComputer.h" #include "vhacdMesh.h" #include <fstream> #include <iosfwd> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string> namespace VHACD { Mesh::Mesh() { m_diag = 1.0; } Mesh::~Mesh() { } Vec3<double>& Mesh::ComputeCenter(void) { const size_t nV = GetNPoints(); if (nV) { m_minBB = GetPoint(0); m_maxBB = GetPoint(0); for (size_t v = 1; v < nV; v++) { Vec3<double> p = GetPoint(v); if (p.X() < m_minBB.X()) { m_minBB.X() = p.X(); } if (p.Y() < m_minBB.Y()) { m_minBB.Y() = p.Y(); } if (p.Z() < m_minBB.Z()) { m_minBB.Z() = p.Z(); } if (p.X() > m_maxBB.X()) { m_maxBB.X() = p.X(); } if (p.Y() > m_maxBB.Y()) { m_maxBB.Y() = p.Y(); } if (p.Z() > m_maxBB.Z()) { m_maxBB.Z() = p.Z(); } } m_center.X() = (m_maxBB.X() - m_minBB.X())*0.5 + m_minBB.X(); m_center.Y() = (m_maxBB.Y() - m_minBB.Y())*0.5 + m_minBB.Y(); m_center.Z() = (m_maxBB.Z() - m_minBB.Z())*0.5 + m_minBB.Z(); } return m_center; } double Mesh::ComputeVolume() const { const size_t nV = GetNPoints(); const size_t nT = GetNTriangles(); if (nV == 0 || nT == 0) { return 0.0; } Vec3<double> bary(0.0, 0.0, 0.0); for (size_t v = 0; v < nV; v++) { bary += GetPoint(v); } bary /= static_cast<double>(nV); Vec3<double> ver0, ver1, ver2; double totalVolume = 0.0; for (int32_t t = 0; t < int32_t(nT); t++) { const Vec3<int32_t>& tri = GetTriangle(t); ver0 = GetPoint(tri[0]); ver1 = GetPoint(tri[1]); ver2 = GetPoint(tri[2]); totalVolume += ComputeVolume4(ver0, ver1, ver2, bary); } return totalVolume / 6.0; } void Mesh::ComputeConvexHull(const double* const pts, const size_t nPts) { ResizePoints(0); ResizeTriangles(0); btConvexHullComputer ch; ch.compute(pts, 3 * sizeof(double), (int32_t)nPts, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void Mesh::Clip(const Plane& plane, SArray<Vec3<double> >& positivePart, SArray<Vec3<double> >& negativePart) const { const size_t nV = GetNPoints(); if (nV == 0) { return; } double d; for (size_t v = 0; v < nV; v++) { const Vec3<double>& pt = GetPoint(v); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d > 0.0) { positivePart.PushBack(pt); } else if (d < 0.0) { negativePart.PushBack(pt); } else { positivePart.PushBack(pt); negativePart.PushBack(pt); } } } bool Mesh::IsInside(const Vec3<double>& pt) const { const size_t nV = GetNPoints(); const size_t nT = GetNTriangles(); if (nV == 0 || nT == 0) { return false; } Vec3<double> ver0, ver1, ver2; double volume; for (int32_t t = 0; t < int32_t(nT); t++) { const Vec3<int32_t>& tri = GetTriangle(t); ver0 = GetPoint(tri[0]); ver1 = GetPoint(tri[1]); ver2 = GetPoint(tri[2]); volume = ComputeVolume4(ver0, ver1, ver2, pt); if (volume < 0.0) { return false; } } return true; } double Mesh::ComputeDiagBB() { const size_t nPoints = GetNPoints(); if (nPoints == 0) return 0.0; Vec3<double> minBB = m_points[0]; Vec3<double> maxBB = m_points[0]; double x, y, z; for (size_t v = 1; v < nPoints; v++) { x = m_points[v][0]; y = m_points[v][1]; z = m_points[v][2]; if (x < minBB[0]) minBB[0] = x; else if (x > maxBB[0]) maxBB[0] = x; if (y < minBB[1]) minBB[1] = y; else if (y > maxBB[1]) maxBB[1] = y; if (z < minBB[2]) minBB[2] = z; else if (z > maxBB[2]) maxBB[2] = z; } return (m_diag = (maxBB - minBB).GetNorm()); } #ifdef VHACD_DEBUG_MESH bool Mesh::SaveVRML2(const std::string& fileName) const { std::ofstream fout(fileName.c_str()); if (fout.is_open()) { const Material material; if (SaveVRML2(fout, material)) { fout.close(); return true; } return false; } return false; } bool Mesh::SaveVRML2(std::ofstream& fout, const Material& material) const { if (fout.is_open()) { fout.setf(std::ios::fixed, std::ios::floatfield); fout.setf(std::ios::showpoint); fout.precision(6); size_t nV = m_points.Size(); size_t nT = m_triangles.Size(); fout << "#VRML V2.0 utf8" << std::endl; fout << "" << std::endl; fout << "# Vertices: " << nV << std::endl; fout << "# Triangles: " << nT << std::endl; fout << "" << std::endl; fout << "Group {" << std::endl; fout << " children [" << std::endl; fout << " Shape {" << std::endl; fout << " appearance Appearance {" << std::endl; fout << " material Material {" << std::endl; fout << " diffuseColor " << material.m_diffuseColor[0] << " " << material.m_diffuseColor[1] << " " << material.m_diffuseColor[2] << std::endl; fout << " ambientIntensity " << material.m_ambientIntensity << std::endl; fout << " specularColor " << material.m_specularColor[0] << " " << material.m_specularColor[1] << " " << material.m_specularColor[2] << std::endl; fout << " emissiveColor " << material.m_emissiveColor[0] << " " << material.m_emissiveColor[1] << " " << material.m_emissiveColor[2] << std::endl; fout << " shininess " << material.m_shininess << std::endl; fout << " transparency " << material.m_transparency << std::endl; fout << " }" << std::endl; fout << " }" << std::endl; fout << " geometry IndexedFaceSet {" << std::endl; fout << " ccw TRUE" << std::endl; fout << " solid TRUE" << std::endl; fout << " convex TRUE" << std::endl; if (nV > 0) { fout << " coord DEF co Coordinate {" << std::endl; fout << " point [" << std::endl; for (size_t v = 0; v < nV; v++) { fout << " " << m_points[v][0] << " " << m_points[v][1] << " " << m_points[v][2] << "," << std::endl; } fout << " ]" << std::endl; fout << " }" << std::endl; } if (nT > 0) { fout << " coordIndex [ " << std::endl; for (size_t f = 0; f < nT; f++) { fout << " " << m_triangles[f][0] << ", " << m_triangles[f][1] << ", " << m_triangles[f][2] << ", -1," << std::endl; } fout << " ]" << std::endl; } fout << " }" << std::endl; fout << " }" << std::endl; fout << " ]" << std::endl; fout << "}" << std::endl; return true; } return false; } bool Mesh::SaveOFF(const std::string& fileName) const { std::ofstream fout(fileName.c_str()); if (fout.is_open()) { size_t nV = m_points.Size(); size_t nT = m_triangles.Size(); fout << "OFF" << std::endl; fout << nV << " " << nT << " " << 0 << std::endl; for (size_t v = 0; v < nV; v++) { fout << m_points[v][0] << " " << m_points[v][1] << " " << m_points[v][2] << std::endl; } for (size_t f = 0; f < nT; f++) { fout << "3 " << m_triangles[f][0] << " " << m_triangles[f][1] << " " << m_triangles[f][2] << std::endl; } fout.close(); return true; } return false; } bool Mesh::LoadOFF(const std::string& fileName, bool invert) { FILE* fid = fopen(fileName.c_str(), "r"); if (fid) { const std::string strOFF("OFF"); char temp[1024]; fscanf(fid, "%s", temp); if (std::string(temp) != strOFF) { fclose(fid); return false; } else { int32_t nv = 0; int32_t nf = 0; int32_t ne = 0; fscanf(fid, "%i", &nv); fscanf(fid, "%i", &nf); fscanf(fid, "%i", &ne); m_points.Resize(nv); m_triangles.Resize(nf); Vec3<double> coord; float x, y, z; for (int32_t p = 0; p < nv; p++) { fscanf(fid, "%f", &x); fscanf(fid, "%f", &y); fscanf(fid, "%f", &z); m_points[p][0] = x; m_points[p][1] = y; m_points[p][2] = z; } int32_t i, j, k, s; for (int32_t t = 0; t < nf; ++t) { fscanf(fid, "%i", &s); if (s == 3) { fscanf(fid, "%i", &i); fscanf(fid, "%i", &j); fscanf(fid, "%i", &k); m_triangles[t][0] = i; if (invert) { m_triangles[t][1] = k; m_triangles[t][2] = j; } else { m_triangles[t][1] = j; m_triangles[t][2] = k; } } else // Fix me: support only triangular meshes { for (int32_t h = 0; h < s; ++h) fscanf(fid, "%i", &s); } } fclose(fid); } } else { return false; } return true; } #endif // VHACD_DEBUG_MESH }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/FloatMath.inl
// a set of routines that let you do common 3d math // operations without any vector, matrix, or quaternion // classes or templates. // // a vector (or point) is a 'float *' to 3 floating point numbers. // a matrix is a 'float *' to an array of 16 floating point numbers representing a 4x4 transformation matrix compatible with D3D or OGL // a quaternion is a 'float *' to 4 floats representing a quaternion x,y,z,w // #pragma warning(disable:4996) namespace FLOAT_MATH { void fm_inverseRT(const REAL matrix[16],const REAL pos[3],REAL t[3]) // inverse rotate translate the point. { REAL _x = pos[0] - matrix[3*4+0]; REAL _y = pos[1] - matrix[3*4+1]; REAL _z = pos[2] - matrix[3*4+2]; // Multiply inverse-translated source vector by inverted rotation transform t[0] = (matrix[0*4+0] * _x) + (matrix[0*4+1] * _y) + (matrix[0*4+2] * _z); t[1] = (matrix[1*4+0] * _x) + (matrix[1*4+1] * _y) + (matrix[1*4+2] * _z); t[2] = (matrix[2*4+0] * _x) + (matrix[2*4+1] * _y) + (matrix[2*4+2] * _z); } REAL fm_getDeterminant(const REAL matrix[16]) { REAL tempv[3]; REAL p0[3]; REAL p1[3]; REAL p2[3]; p0[0] = matrix[0*4+0]; p0[1] = matrix[0*4+1]; p0[2] = matrix[0*4+2]; p1[0] = matrix[1*4+0]; p1[1] = matrix[1*4+1]; p1[2] = matrix[1*4+2]; p2[0] = matrix[2*4+0]; p2[1] = matrix[2*4+1]; p2[2] = matrix[2*4+2]; fm_cross(tempv,p1,p2); return fm_dot(p0,tempv); } REAL fm_squared(REAL x) { return x*x; }; void fm_decomposeTransform(const REAL local_transform[16],REAL trans[3],REAL rot[4],REAL scale[3]) { trans[0] = local_transform[12]; trans[1] = local_transform[13]; trans[2] = local_transform[14]; scale[0] = (REAL)sqrt(fm_squared(local_transform[0*4+0]) + fm_squared(local_transform[0*4+1]) + fm_squared(local_transform[0*4+2])); scale[1] = (REAL)sqrt(fm_squared(local_transform[1*4+0]) + fm_squared(local_transform[1*4+1]) + fm_squared(local_transform[1*4+2])); scale[2] = (REAL)sqrt(fm_squared(local_transform[2*4+0]) + fm_squared(local_transform[2*4+1]) + fm_squared(local_transform[2*4+2])); REAL m[16]; memcpy(m,local_transform,sizeof(REAL)*16); REAL sx = 1.0f / scale[0]; REAL sy = 1.0f / scale[1]; REAL sz = 1.0f / scale[2]; m[0*4+0]*=sx; m[0*4+1]*=sx; m[0*4+2]*=sx; m[1*4+0]*=sy; m[1*4+1]*=sy; m[1*4+2]*=sy; m[2*4+0]*=sz; m[2*4+1]*=sz; m[2*4+2]*=sz; fm_matrixToQuat(m,rot); } void fm_getSubMatrix(int32_t ki,int32_t kj,REAL pDst[16],const REAL matrix[16]) { int32_t row, col; int32_t dstCol = 0, dstRow = 0; for ( col = 0; col < 4; col++ ) { if ( col == kj ) { continue; } for ( dstRow = 0, row = 0; row < 4; row++ ) { if ( row == ki ) { continue; } pDst[dstCol*4+dstRow] = matrix[col*4+row]; dstRow++; } dstCol++; } } void fm_inverseTransform(const REAL matrix[16],REAL inverse_matrix[16]) { REAL determinant = fm_getDeterminant(matrix); determinant = 1.0f / determinant; for (int32_t i = 0; i < 4; i++ ) { for (int32_t j = 0; j < 4; j++ ) { int32_t sign = 1 - ( ( i + j ) % 2 ) * 2; REAL subMat[16]; fm_identity(subMat); fm_getSubMatrix( i, j, subMat, matrix ); REAL subDeterminant = fm_getDeterminant(subMat); inverse_matrix[i*4+j] = ( subDeterminant * sign ) * determinant; } } } void fm_identity(REAL matrix[16]) // set 4x4 matrix to identity. { matrix[0*4+0] = 1; matrix[1*4+1] = 1; matrix[2*4+2] = 1; matrix[3*4+3] = 1; matrix[1*4+0] = 0; matrix[2*4+0] = 0; matrix[3*4+0] = 0; matrix[0*4+1] = 0; matrix[2*4+1] = 0; matrix[3*4+1] = 0; matrix[0*4+2] = 0; matrix[1*4+2] = 0; matrix[3*4+2] = 0; matrix[0*4+3] = 0; matrix[1*4+3] = 0; matrix[2*4+3] = 0; } void fm_quatToEuler(const REAL quat[4],REAL &ax,REAL &ay,REAL &az) { REAL x = quat[0]; REAL y = quat[1]; REAL z = quat[2]; REAL w = quat[3]; REAL sint = (2.0f * w * y) - (2.0f * x * z); REAL cost_temp = 1.0f - (sint * sint); REAL cost = 0; if ( (REAL)fabs(cost_temp) > 0.001f ) { cost = (REAL)sqrt( cost_temp ); } REAL sinv, cosv, sinf, cosf; if ( (REAL)fabs(cost) > 0.001f ) { cost = 1.0f / cost; sinv = ((2.0f * y * z) + (2.0f * w * x)) * cost; cosv = (1.0f - (2.0f * x * x) - (2.0f * y * y)) * cost; sinf = ((2.0f * x * y) + (2.0f * w * z)) * cost; cosf = (1.0f - (2.0f * y * y) - (2.0f * z * z)) * cost; } else { sinv = (2.0f * w * x) - (2.0f * y * z); cosv = 1.0f - (2.0f * x * x) - (2.0f * z * z); sinf = 0; cosf = 1.0f; } // compute output rotations ax = (REAL)atan2( sinv, cosv ); ay = (REAL)atan2( sint, cost ); az = (REAL)atan2( sinf, cosf ); } void fm_eulerToMatrix(REAL ax,REAL ay,REAL az,REAL *matrix) // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) { REAL quat[4]; fm_eulerToQuat(ax,ay,az,quat); fm_quatToMatrix(quat,matrix); } void fm_getAABB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *bmin,REAL *bmax) { const uint8_t *source = (const uint8_t *) points; bmin[0] = points[0]; bmin[1] = points[1]; bmin[2] = points[2]; bmax[0] = points[0]; bmax[1] = points[1]; bmax[2] = points[2]; for (uint32_t i=1; i<vcount; i++) { source+=pstride; const REAL *p = (const REAL *) source; if ( p[0] < bmin[0] ) bmin[0] = p[0]; if ( p[1] < bmin[1] ) bmin[1] = p[1]; if ( p[2] < bmin[2] ) bmin[2] = p[2]; if ( p[0] > bmax[0] ) bmax[0] = p[0]; if ( p[1] > bmax[1] ) bmax[1] = p[1]; if ( p[2] > bmax[2] ) bmax[2] = p[2]; } } void fm_eulerToQuat(const REAL *euler,REAL *quat) // convert euler angles to quaternion. { fm_eulerToQuat(euler[0],euler[1],euler[2],quat); } void fm_eulerToQuat(REAL roll,REAL pitch,REAL yaw,REAL *quat) // convert euler angles to quaternion. { roll *= 0.5f; pitch *= 0.5f; yaw *= 0.5f; REAL cr = (REAL)cos(roll); REAL cp = (REAL)cos(pitch); REAL cy = (REAL)cos(yaw); REAL sr = (REAL)sin(roll); REAL sp = (REAL)sin(pitch); REAL sy = (REAL)sin(yaw); REAL cpcy = cp * cy; REAL spsy = sp * sy; REAL spcy = sp * cy; REAL cpsy = cp * sy; quat[0] = ( sr * cpcy - cr * spsy); quat[1] = ( cr * spcy + sr * cpsy); quat[2] = ( cr * cpsy - sr * spcy); quat[3] = cr * cpcy + sr * spsy; } void fm_quatToMatrix(const REAL *quat,REAL *matrix) // convert quaterinion rotation to matrix, zeros out the translation component. { REAL xx = quat[0]*quat[0]; REAL yy = quat[1]*quat[1]; REAL zz = quat[2]*quat[2]; REAL xy = quat[0]*quat[1]; REAL xz = quat[0]*quat[2]; REAL yz = quat[1]*quat[2]; REAL wx = quat[3]*quat[0]; REAL wy = quat[3]*quat[1]; REAL wz = quat[3]*quat[2]; matrix[0*4+0] = 1 - 2 * ( yy + zz ); matrix[1*4+0] = 2 * ( xy - wz ); matrix[2*4+0] = 2 * ( xz + wy ); matrix[0*4+1] = 2 * ( xy + wz ); matrix[1*4+1] = 1 - 2 * ( xx + zz ); matrix[2*4+1] = 2 * ( yz - wx ); matrix[0*4+2] = 2 * ( xz - wy ); matrix[1*4+2] = 2 * ( yz + wx ); matrix[2*4+2] = 1 - 2 * ( xx + yy ); matrix[3*4+0] = matrix[3*4+1] = matrix[3*4+2] = (REAL) 0.0f; matrix[0*4+3] = matrix[1*4+3] = matrix[2*4+3] = (REAL) 0.0f; matrix[3*4+3] =(REAL) 1.0f; } void fm_quatRotate(const REAL *quat,const REAL *v,REAL *r) // rotate a vector directly by a quaternion. { REAL left[4]; left[0] = quat[3]*v[0] + quat[1]*v[2] - v[1]*quat[2]; left[1] = quat[3]*v[1] + quat[2]*v[0] - v[2]*quat[0]; left[2] = quat[3]*v[2] + quat[0]*v[1] - v[0]*quat[1]; left[3] = - quat[0]*v[0] - quat[1]*v[1] - quat[2]*v[2]; r[0] = (left[3]*-quat[0]) + (quat[3]*left[0]) + (left[1]*-quat[2]) - (-quat[1]*left[2]); r[1] = (left[3]*-quat[1]) + (quat[3]*left[1]) + (left[2]*-quat[0]) - (-quat[2]*left[0]); r[2] = (left[3]*-quat[2]) + (quat[3]*left[2]) + (left[0]*-quat[1]) - (-quat[0]*left[1]); } void fm_getTranslation(const REAL *matrix,REAL *t) { t[0] = matrix[3*4+0]; t[1] = matrix[3*4+1]; t[2] = matrix[3*4+2]; } void fm_matrixToQuat(const REAL *matrix,REAL *quat) // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w { REAL tr = matrix[0*4+0] + matrix[1*4+1] + matrix[2*4+2]; // check the diagonal if (tr > 0.0f ) { REAL s = (REAL) sqrt ( (double) (tr + 1.0f) ); quat[3] = s * 0.5f; s = 0.5f / s; quat[0] = (matrix[1*4+2] - matrix[2*4+1]) * s; quat[1] = (matrix[2*4+0] - matrix[0*4+2]) * s; quat[2] = (matrix[0*4+1] - matrix[1*4+0]) * s; } else { // diagonal is negative int32_t nxt[3] = {1, 2, 0}; REAL qa[4]; int32_t i = 0; if (matrix[1*4+1] > matrix[0*4+0]) i = 1; if (matrix[2*4+2] > matrix[i*4+i]) i = 2; int32_t j = nxt[i]; int32_t k = nxt[j]; REAL s = (REAL)sqrt ( ((matrix[i*4+i] - (matrix[j*4+j] + matrix[k*4+k])) + 1.0f) ); qa[i] = s * 0.5f; if (s != 0.0f ) s = 0.5f / s; qa[3] = (matrix[j*4+k] - matrix[k*4+j]) * s; qa[j] = (matrix[i*4+j] + matrix[j*4+i]) * s; qa[k] = (matrix[i*4+k] + matrix[k*4+i]) * s; quat[0] = qa[0]; quat[1] = qa[1]; quat[2] = qa[2]; quat[3] = qa[3]; } // fm_normalizeQuat(quat); } REAL fm_sphereVolume(REAL radius) // return's the volume of a sphere of this radius (4/3 PI * R cubed ) { return (4.0f / 3.0f ) * FM_PI * radius * radius * radius; } REAL fm_cylinderVolume(REAL radius,REAL h) { return FM_PI * radius * radius *h; } REAL fm_capsuleVolume(REAL radius,REAL h) { REAL volume = fm_sphereVolume(radius); // volume of the sphere portion. REAL ch = h-radius*2; // this is the cylinder length if ( ch > 0 ) { volume+=fm_cylinderVolume(radius,ch); } return volume; } void fm_transform(const REAL matrix[16],const REAL v[3],REAL t[3]) // rotate and translate this point { if ( matrix ) { REAL tx = (matrix[0*4+0] * v[0]) + (matrix[1*4+0] * v[1]) + (matrix[2*4+0] * v[2]) + matrix[3*4+0]; REAL ty = (matrix[0*4+1] * v[0]) + (matrix[1*4+1] * v[1]) + (matrix[2*4+1] * v[2]) + matrix[3*4+1]; REAL tz = (matrix[0*4+2] * v[0]) + (matrix[1*4+2] * v[1]) + (matrix[2*4+2] * v[2]) + matrix[3*4+2]; t[0] = tx; t[1] = ty; t[2] = tz; } else { t[0] = v[0]; t[1] = v[1]; t[2] = v[2]; } } void fm_rotate(const REAL matrix[16],const REAL v[3],REAL t[3]) // rotate and translate this point { if ( matrix ) { REAL tx = (matrix[0*4+0] * v[0]) + (matrix[1*4+0] * v[1]) + (matrix[2*4+0] * v[2]); REAL ty = (matrix[0*4+1] * v[0]) + (matrix[1*4+1] * v[1]) + (matrix[2*4+1] * v[2]); REAL tz = (matrix[0*4+2] * v[0]) + (matrix[1*4+2] * v[1]) + (matrix[2*4+2] * v[2]); t[0] = tx; t[1] = ty; t[2] = tz; } else { t[0] = v[0]; t[1] = v[1]; t[2] = v[2]; } } REAL fm_distance(const REAL *p1,const REAL *p2) { REAL dx = p1[0] - p2[0]; REAL dy = p1[1] - p2[1]; REAL dz = p1[2] - p2[2]; return (REAL)sqrt( dx*dx + dy*dy + dz *dz ); } REAL fm_distanceSquared(const REAL *p1,const REAL *p2) { REAL dx = p1[0] - p2[0]; REAL dy = p1[1] - p2[1]; REAL dz = p1[2] - p2[2]; return dx*dx + dy*dy + dz *dz; } REAL fm_distanceSquaredXZ(const REAL *p1,const REAL *p2) { REAL dx = p1[0] - p2[0]; REAL dz = p1[2] - p2[2]; return dx*dx + dz *dz; } REAL fm_computePlane(const REAL *A,const REAL *B,const REAL *C,REAL *n) // returns D { REAL vx = (B[0] - C[0]); REAL vy = (B[1] - C[1]); REAL vz = (B[2] - C[2]); REAL wx = (A[0] - B[0]); REAL wy = (A[1] - B[1]); REAL wz = (A[2] - B[2]); REAL vw_x = vy * wz - vz * wy; REAL vw_y = vz * wx - vx * wz; REAL vw_z = vx * wy - vy * wx; REAL mag = (REAL)sqrt((vw_x * vw_x) + (vw_y * vw_y) + (vw_z * vw_z)); if ( mag < 0.000001f ) { mag = 0; } else { mag = 1.0f/mag; } REAL x = vw_x * mag; REAL y = vw_y * mag; REAL z = vw_z * mag; REAL D = 0.0f - ((x*A[0])+(y*A[1])+(z*A[2])); n[0] = x; n[1] = y; n[2] = z; return D; } REAL fm_distToPlane(const REAL *plane,const REAL *p) // computes the distance of this point from the plane. { return p[0]*plane[0]+p[1]*plane[1]+p[2]*plane[2]+plane[3]; } REAL fm_dot(const REAL *p1,const REAL *p2) { return p1[0]*p2[0]+p1[1]*p2[1]+p1[2]*p2[2]; } void fm_cross(REAL *cross,const REAL *a,const REAL *b) { cross[0] = a[1]*b[2] - a[2]*b[1]; cross[1] = a[2]*b[0] - a[0]*b[2]; cross[2] = a[0]*b[1] - a[1]*b[0]; } void fm_computeNormalVector(REAL *n,const REAL *p1,const REAL *p2) { n[0] = p2[0] - p1[0]; n[1] = p2[1] - p1[1]; n[2] = p2[2] - p1[2]; fm_normalize(n); } bool fm_computeWindingOrder(const REAL *p1,const REAL *p2,const REAL *p3) // returns true if the triangle is clockwise. { bool ret = false; REAL v1[3]; REAL v2[3]; fm_computeNormalVector(v1,p1,p2); // p2-p1 (as vector) and then normalized fm_computeNormalVector(v2,p1,p3); // p3-p1 (as vector) and then normalized REAL cross[3]; fm_cross(cross, v1, v2 ); REAL ref[3] = { 1, 0, 0 }; REAL d = fm_dot( cross, ref ); if ( d <= 0 ) ret = false; else ret = true; return ret; } REAL fm_normalize(REAL *n) // normalize this vector { REAL dist = (REAL)sqrt(n[0]*n[0] + n[1]*n[1] + n[2]*n[2]); if ( dist > 0.0000001f ) { REAL mag = 1.0f / dist; n[0]*=mag; n[1]*=mag; n[2]*=mag; } else { n[0] = 1; n[1] = 0; n[2] = 0; } return dist; } void fm_matrixMultiply(const REAL *pA,const REAL *pB,REAL *pM) { #if 1 REAL a = pA[0*4+0] * pB[0*4+0] + pA[0*4+1] * pB[1*4+0] + pA[0*4+2] * pB[2*4+0] + pA[0*4+3] * pB[3*4+0]; REAL b = pA[0*4+0] * pB[0*4+1] + pA[0*4+1] * pB[1*4+1] + pA[0*4+2] * pB[2*4+1] + pA[0*4+3] * pB[3*4+1]; REAL c = pA[0*4+0] * pB[0*4+2] + pA[0*4+1] * pB[1*4+2] + pA[0*4+2] * pB[2*4+2] + pA[0*4+3] * pB[3*4+2]; REAL d = pA[0*4+0] * pB[0*4+3] + pA[0*4+1] * pB[1*4+3] + pA[0*4+2] * pB[2*4+3] + pA[0*4+3] * pB[3*4+3]; REAL e = pA[1*4+0] * pB[0*4+0] + pA[1*4+1] * pB[1*4+0] + pA[1*4+2] * pB[2*4+0] + pA[1*4+3] * pB[3*4+0]; REAL f = pA[1*4+0] * pB[0*4+1] + pA[1*4+1] * pB[1*4+1] + pA[1*4+2] * pB[2*4+1] + pA[1*4+3] * pB[3*4+1]; REAL g = pA[1*4+0] * pB[0*4+2] + pA[1*4+1] * pB[1*4+2] + pA[1*4+2] * pB[2*4+2] + pA[1*4+3] * pB[3*4+2]; REAL h = pA[1*4+0] * pB[0*4+3] + pA[1*4+1] * pB[1*4+3] + pA[1*4+2] * pB[2*4+3] + pA[1*4+3] * pB[3*4+3]; REAL i = pA[2*4+0] * pB[0*4+0] + pA[2*4+1] * pB[1*4+0] + pA[2*4+2] * pB[2*4+0] + pA[2*4+3] * pB[3*4+0]; REAL j = pA[2*4+0] * pB[0*4+1] + pA[2*4+1] * pB[1*4+1] + pA[2*4+2] * pB[2*4+1] + pA[2*4+3] * pB[3*4+1]; REAL k = pA[2*4+0] * pB[0*4+2] + pA[2*4+1] * pB[1*4+2] + pA[2*4+2] * pB[2*4+2] + pA[2*4+3] * pB[3*4+2]; REAL l = pA[2*4+0] * pB[0*4+3] + pA[2*4+1] * pB[1*4+3] + pA[2*4+2] * pB[2*4+3] + pA[2*4+3] * pB[3*4+3]; REAL m = pA[3*4+0] * pB[0*4+0] + pA[3*4+1] * pB[1*4+0] + pA[3*4+2] * pB[2*4+0] + pA[3*4+3] * pB[3*4+0]; REAL n = pA[3*4+0] * pB[0*4+1] + pA[3*4+1] * pB[1*4+1] + pA[3*4+2] * pB[2*4+1] + pA[3*4+3] * pB[3*4+1]; REAL o = pA[3*4+0] * pB[0*4+2] + pA[3*4+1] * pB[1*4+2] + pA[3*4+2] * pB[2*4+2] + pA[3*4+3] * pB[3*4+2]; REAL p = pA[3*4+0] * pB[0*4+3] + pA[3*4+1] * pB[1*4+3] + pA[3*4+2] * pB[2*4+3] + pA[3*4+3] * pB[3*4+3]; pM[0] = a; pM[1] = b; pM[2] = c; pM[3] = d; pM[4] = e; pM[5] = f; pM[6] = g; pM[7] = h; pM[8] = i; pM[9] = j; pM[10] = k; pM[11] = l; pM[12] = m; pM[13] = n; pM[14] = o; pM[15] = p; #else memset(pM, 0, sizeof(REAL)*16); for(int32_t i=0; i<4; i++ ) for(int32_t j=0; j<4; j++ ) for(int32_t k=0; k<4; k++ ) pM[4*i+j] += pA[4*i+k] * pB[4*k+j]; #endif } void fm_eulerToQuatDX(REAL x,REAL y,REAL z,REAL *quat) // convert euler angles to quaternion using the fucked up DirectX method { REAL matrix[16]; fm_eulerToMatrix(x,y,z,matrix); fm_matrixToQuat(matrix,quat); } // implementation copied from: http://blogs.msdn.com/mikepelton/archive/2004/10/29/249501.aspx void fm_eulerToMatrixDX(REAL x,REAL y,REAL z,REAL *matrix) // convert euler angles to quaternion using the fucked up DirectX method. { fm_identity(matrix); matrix[0*4+0] = (REAL)(cos(z)*cos(y) + sin(z)*sin(x)*sin(y)); matrix[0*4+1] = (REAL)(sin(z)*cos(x)); matrix[0*4+2] = (REAL)(cos(z)*-sin(y) + sin(z)*sin(x)*cos(y)); matrix[1*4+0] = (REAL)(-sin(z)*cos(y)+cos(z)*sin(x)*sin(y)); matrix[1*4+1] = (REAL)(cos(z)*cos(x)); matrix[1*4+2] = (REAL)(sin(z)*sin(y) +cos(z)*sin(x)*cos(y)); matrix[2*4+0] = (REAL)(cos(x)*sin(y)); matrix[2*4+1] = (REAL)(-sin(x)); matrix[2*4+2] = (REAL)(cos(x)*cos(y)); } void fm_scale(REAL x,REAL y,REAL z,REAL *fscale) // apply scale to the matrix. { fscale[0*4+0] = x; fscale[1*4+1] = y; fscale[2*4+2] = z; } void fm_composeTransform(const REAL *position,const REAL *quat,const REAL *scale,REAL *matrix) { fm_identity(matrix); fm_quatToMatrix(quat,matrix); if ( scale && ( scale[0] != 1 || scale[1] != 1 || scale[2] != 1 ) ) { REAL work[16]; memcpy(work,matrix,sizeof(REAL)*16); REAL mscale[16]; fm_identity(mscale); fm_scale(scale[0],scale[1],scale[2],mscale); fm_matrixMultiply(work,mscale,matrix); } matrix[12] = position[0]; matrix[13] = position[1]; matrix[14] = position[2]; } void fm_setTranslation(const REAL *translation,REAL *matrix) { matrix[12] = translation[0]; matrix[13] = translation[1]; matrix[14] = translation[2]; } static REAL enorm0_3d ( REAL x0, REAL y0, REAL z0, REAL x1, REAL y1, REAL z1 ) /**********************************************************************/ /* Purpose: ENORM0_3D computes the Euclidean norm of (P1-P0) in 3D. Modified: 18 April 1999 Author: John Burkardt Parameters: Input, REAL X0, Y0, Z0, X1, Y1, Z1, the coordinates of the points P0 and P1. Output, REAL ENORM0_3D, the Euclidean norm of (P1-P0). */ { REAL value; value = (REAL)sqrt ( ( x1 - x0 ) * ( x1 - x0 ) + ( y1 - y0 ) * ( y1 - y0 ) + ( z1 - z0 ) * ( z1 - z0 ) ); return value; } static REAL triangle_area_3d ( REAL x1, REAL y1, REAL z1, REAL x2,REAL y2, REAL z2, REAL x3, REAL y3, REAL z3 ) /**********************************************************************/ /* Purpose: TRIANGLE_AREA_3D computes the area of a triangle in 3D. Modified: 22 April 1999 Author: John Burkardt Parameters: Input, REAL X1, Y1, Z1, X2, Y2, Z2, X3, Y3, Z3, the (X,Y,Z) coordinates of the corners of the triangle. Output, REAL TRIANGLE_AREA_3D, the area of the triangle. */ { REAL a; REAL alpha; REAL area; REAL b; REAL base; REAL c; REAL dot; REAL height; /* Find the projection of (P3-P1) onto (P2-P1). */ dot = ( x2 - x1 ) * ( x3 - x1 ) + ( y2 - y1 ) * ( y3 - y1 ) + ( z2 - z1 ) * ( z3 - z1 ); base = enorm0_3d ( x1, y1, z1, x2, y2, z2 ); /* The height of the triangle is the length of (P3-P1) after its projection onto (P2-P1) has been subtracted. */ if ( base == 0.0 ) { height = 0.0; } else { alpha = dot / ( base * base ); a = x3 - x1 - alpha * ( x2 - x1 ); b = y3 - y1 - alpha * ( y2 - y1 ); c = z3 - z1 - alpha * ( z2 - z1 ); height = (REAL)sqrt ( a * a + b * b + c * c ); } area = 0.5f * base * height; return area; } REAL fm_computeArea(const REAL *p1,const REAL *p2,const REAL *p3) { REAL ret = 0; ret = triangle_area_3d(p1[0],p1[1],p1[2],p2[0],p2[1],p2[2],p3[0],p3[1],p3[2]); return ret; } void fm_lerp(const REAL *p1,const REAL *p2,REAL *dest,REAL lerpValue) { dest[0] = ((p2[0] - p1[0])*lerpValue) + p1[0]; dest[1] = ((p2[1] - p1[1])*lerpValue) + p1[1]; dest[2] = ((p2[2] - p1[2])*lerpValue) + p1[2]; } bool fm_pointTestXZ(const REAL *p,const REAL *i,const REAL *j) { bool ret = false; if (((( i[2] <= p[2] ) && ( p[2] < j[2] )) || (( j[2] <= p[2] ) && ( p[2] < i[2] ))) && ( p[0] < (j[0] - i[0]) * (p[2] - i[2]) / (j[2] - i[2]) + i[0])) ret = true; return ret; }; bool fm_insideTriangleXZ(const REAL *p,const REAL *p1,const REAL *p2,const REAL *p3) { bool ret = false; int32_t c = 0; if ( fm_pointTestXZ(p,p1,p2) ) c = !c; if ( fm_pointTestXZ(p,p2,p3) ) c = !c; if ( fm_pointTestXZ(p,p3,p1) ) c = !c; if ( c ) ret = true; return ret; } bool fm_insideAABB(const REAL *pos,const REAL *bmin,const REAL *bmax) { bool ret = false; if ( pos[0] >= bmin[0] && pos[0] <= bmax[0] && pos[1] >= bmin[1] && pos[1] <= bmax[1] && pos[2] >= bmin[2] && pos[2] <= bmax[2] ) ret = true; return ret; } uint32_t fm_clipTestPoint(const REAL *bmin,const REAL *bmax,const REAL *pos) { uint32_t ret = 0; if ( pos[0] < bmin[0] ) ret|=FMCS_XMIN; else if ( pos[0] > bmax[0] ) ret|=FMCS_XMAX; if ( pos[1] < bmin[1] ) ret|=FMCS_YMIN; else if ( pos[1] > bmax[1] ) ret|=FMCS_YMAX; if ( pos[2] < bmin[2] ) ret|=FMCS_ZMIN; else if ( pos[2] > bmax[2] ) ret|=FMCS_ZMAX; return ret; } uint32_t fm_clipTestPointXZ(const REAL *bmin,const REAL *bmax,const REAL *pos) // only tests X and Z, not Y { uint32_t ret = 0; if ( pos[0] < bmin[0] ) ret|=FMCS_XMIN; else if ( pos[0] > bmax[0] ) ret|=FMCS_XMAX; if ( pos[2] < bmin[2] ) ret|=FMCS_ZMIN; else if ( pos[2] > bmax[2] ) ret|=FMCS_ZMAX; return ret; } uint32_t fm_clipTestAABB(const REAL *bmin,const REAL *bmax,const REAL *p1,const REAL *p2,const REAL *p3,uint32_t &andCode) { uint32_t orCode = 0; andCode = FMCS_XMIN | FMCS_XMAX | FMCS_YMIN | FMCS_YMAX | FMCS_ZMIN | FMCS_ZMAX; uint32_t c = fm_clipTestPoint(bmin,bmax,p1); orCode|=c; andCode&=c; c = fm_clipTestPoint(bmin,bmax,p2); orCode|=c; andCode&=c; c = fm_clipTestPoint(bmin,bmax,p3); orCode|=c; andCode&=c; return orCode; } bool intersect(const REAL *si,const REAL *ei,const REAL *bmin,const REAL *bmax,REAL *time) { REAL st,et,fst = 0,fet = 1; for (int32_t i = 0; i < 3; i++) { if (*si < *ei) { if (*si > *bmax || *ei < *bmin) return false; REAL di = *ei - *si; st = (*si < *bmin)? (*bmin - *si) / di: 0; et = (*ei > *bmax)? (*bmax - *si) / di: 1; } else { if (*ei > *bmax || *si < *bmin) return false; REAL di = *ei - *si; st = (*si > *bmax)? (*bmax - *si) / di: 0; et = (*ei < *bmin)? (*bmin - *si) / di: 1; } if (st > fst) fst = st; if (et < fet) fet = et; if (fet < fst) return false; bmin++; bmax++; si++; ei++; } *time = fst; return true; } bool fm_lineTestAABB(const REAL *p1,const REAL *p2,const REAL *bmin,const REAL *bmax,REAL &time) { bool sect = intersect(p1,p2,bmin,bmax,&time); return sect; } bool fm_lineTestAABBXZ(const REAL *p1,const REAL *p2,const REAL *bmin,const REAL *bmax,REAL &time) { REAL _bmin[3]; REAL _bmax[3]; _bmin[0] = bmin[0]; _bmin[1] = -1e9; _bmin[2] = bmin[2]; _bmax[0] = bmax[0]; _bmax[1] = 1e9; _bmax[2] = bmax[2]; bool sect = intersect(p1,p2,_bmin,_bmax,&time); return sect; } void fm_minmax(const REAL *p,REAL *bmin,REAL *bmax) // accmulate to a min-max value { if ( p[0] < bmin[0] ) bmin[0] = p[0]; if ( p[1] < bmin[1] ) bmin[1] = p[1]; if ( p[2] < bmin[2] ) bmin[2] = p[2]; if ( p[0] > bmax[0] ) bmax[0] = p[0]; if ( p[1] > bmax[1] ) bmax[1] = p[1]; if ( p[2] > bmax[2] ) bmax[2] = p[2]; } REAL fm_solveX(const REAL *plane,REAL y,REAL z) // solve for X given this plane equation and the other two components. { REAL x = (y*plane[1]+z*plane[2]+plane[3]) / -plane[0]; return x; } REAL fm_solveY(const REAL *plane,REAL x,REAL z) // solve for Y given this plane equation and the other two components. { REAL y = (x*plane[0]+z*plane[2]+plane[3]) / -plane[1]; return y; } REAL fm_solveZ(const REAL *plane,REAL x,REAL y) // solve for Y given this plane equation and the other two components. { REAL z = (x*plane[0]+y*plane[1]+plane[3]) / -plane[2]; return z; } void fm_getAABBCenter(const REAL *bmin,const REAL *bmax,REAL *center) { center[0] = (bmax[0]-bmin[0])*0.5f+bmin[0]; center[1] = (bmax[1]-bmin[1])*0.5f+bmin[1]; center[2] = (bmax[2]-bmin[2])*0.5f+bmin[2]; } FM_Axis fm_getDominantAxis(const REAL normal[3]) { FM_Axis ret = FM_XAXIS; REAL x = (REAL)fabs(normal[0]); REAL y = (REAL)fabs(normal[1]); REAL z = (REAL)fabs(normal[2]); if ( y > x && y > z ) ret = FM_YAXIS; else if ( z > x && z > y ) ret = FM_ZAXIS; return ret; } bool fm_lineSphereIntersect(const REAL *center,REAL radius,const REAL *p1,const REAL *p2,REAL *intersect) { bool ret = false; REAL dir[3]; dir[0] = p2[0]-p1[0]; dir[1] = p2[1]-p1[1]; dir[2] = p2[2]-p1[2]; REAL distance = (REAL)sqrt( dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]); if ( distance > 0 ) { REAL recip = 1.0f / distance; dir[0]*=recip; dir[1]*=recip; dir[2]*=recip; ret = fm_raySphereIntersect(center,radius,p1,dir,distance,intersect); } else { dir[0] = center[0]-p1[0]; dir[1] = center[1]-p1[1]; dir[2] = center[2]-p1[2]; REAL d2 = dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]; REAL r2 = radius*radius; if ( d2 < r2 ) { ret = true; if ( intersect ) { intersect[0] = p1[0]; intersect[1] = p1[1]; intersect[2] = p1[2]; } } } return ret; } #define DOT(p1,p2) (p1[0]*p2[0]+p1[1]*p2[1]+p1[2]*p2[2]) bool fm_raySphereIntersect(const REAL *center,REAL radius,const REAL *pos,const REAL *dir,REAL distance,REAL *intersect) { bool ret = false; REAL E0[3]; E0[0] = center[0] - pos[0]; E0[1] = center[1] - pos[1]; E0[2] = center[2] - pos[2]; REAL V[3]; V[0] = dir[0]; V[1] = dir[1]; V[2] = dir[2]; REAL dist2 = E0[0]*E0[0] + E0[1]*E0[1] + E0[2] * E0[2]; REAL radius2 = radius*radius; // radius squared.. // Bug Fix For Gem, if origin is *inside* the sphere, invert the // direction vector so that we get a valid intersection location. if ( dist2 < radius2 ) { V[0]*=-1; V[1]*=-1; V[2]*=-1; } REAL v = DOT(E0,V); REAL disc = radius2 - (dist2 - v*v); if (disc > 0.0f) { if ( intersect ) { REAL d = (REAL)sqrt(disc); REAL diff = v-d; if ( diff < distance ) { intersect[0] = pos[0]+V[0]*diff; intersect[1] = pos[1]+V[1]*diff; intersect[2] = pos[2]+V[2]*diff; ret = true; } } } return ret; } void fm_catmullRom(REAL *out_vector,const REAL *p1,const REAL *p2,const REAL *p3,const REAL *p4, const REAL s) { REAL s_squared = s * s; REAL s_cubed = s_squared * s; REAL coefficient_p1 = -s_cubed + 2*s_squared - s; REAL coefficient_p2 = 3 * s_cubed - 5 * s_squared + 2; REAL coefficient_p3 = -3 * s_cubed +4 * s_squared + s; REAL coefficient_p4 = s_cubed - s_squared; out_vector[0] = (coefficient_p1 * p1[0] + coefficient_p2 * p2[0] + coefficient_p3 * p3[0] + coefficient_p4 * p4[0])*0.5f; out_vector[1] = (coefficient_p1 * p1[1] + coefficient_p2 * p2[1] + coefficient_p3 * p3[1] + coefficient_p4 * p4[1])*0.5f; out_vector[2] = (coefficient_p1 * p1[2] + coefficient_p2 * p2[2] + coefficient_p3 * p3[2] + coefficient_p4 * p4[2])*0.5f; } bool fm_intersectAABB(const REAL *bmin1,const REAL *bmax1,const REAL *bmin2,const REAL *bmax2) { if ((bmin1[0] > bmax2[0]) || (bmin2[0] > bmax1[0])) return false; if ((bmin1[1] > bmax2[1]) || (bmin2[1] > bmax1[1])) return false; if ((bmin1[2] > bmax2[2]) || (bmin2[2] > bmax1[2])) return false; return true; } bool fm_insideAABB(const REAL *obmin,const REAL *obmax,const REAL *tbmin,const REAL *tbmax) // test if bounding box tbmin/tmbax is fully inside obmin/obmax { bool ret = false; if ( tbmax[0] <= obmax[0] && tbmax[1] <= obmax[1] && tbmax[2] <= obmax[2] && tbmin[0] >= obmin[0] && tbmin[1] >= obmin[1] && tbmin[2] >= obmin[2] ) ret = true; return ret; } // Reference, from Stan Melax in Game Gems I // Quaternion q; // vector3 c = CrossProduct(v0,v1); // REAL d = DotProduct(v0,v1); // REAL s = (REAL)sqrt((1+d)*2); // q.x = c.x / s; // q.y = c.y / s; // q.z = c.z / s; // q.w = s /2.0f; // return q; void fm_rotationArc(const REAL *v0,const REAL *v1,REAL *quat) { REAL cross[3]; fm_cross(cross,v0,v1); REAL d = fm_dot(v0,v1); if( d<= -0.99999f ) // 180 about x axis { if ( fabsf((float)v0[0]) < 0.1f ) { quat[0] = 0; quat[1] = v0[2]; quat[2] = -v0[1]; quat[3] = 0; } else { quat[0] = v0[1]; quat[1] = -v0[0]; quat[2] = 0; quat[3] = 0; } REAL magnitudeSquared = quat[0]*quat[0] + quat[1]*quat[1] + quat[2]*quat[2] + quat[3]*quat[3]; REAL magnitude = sqrtf((float)magnitudeSquared); REAL recip = 1.0f / magnitude; quat[0]*=recip; quat[1]*=recip; quat[2]*=recip; quat[3]*=recip; } else { REAL s = (REAL)sqrt((1+d)*2); REAL recip = 1.0f / s; quat[0] = cross[0] * recip; quat[1] = cross[1] * recip; quat[2] = cross[2] * recip; quat[3] = s * 0.5f; } } REAL fm_distancePointLineSegment(const REAL *Point,const REAL *LineStart,const REAL *LineEnd,REAL *intersection,LineSegmentType &type,REAL epsilon) { REAL ret; REAL LineMag = fm_distance( LineEnd, LineStart ); if ( LineMag > 0 ) { REAL U = ( ( ( Point[0] - LineStart[0] ) * ( LineEnd[0] - LineStart[0] ) ) + ( ( Point[1] - LineStart[1] ) * ( LineEnd[1] - LineStart[1] ) ) + ( ( Point[2] - LineStart[2] ) * ( LineEnd[2] - LineStart[2] ) ) ) / ( LineMag * LineMag ); if( U < 0.0f || U > 1.0f ) { REAL d1 = fm_distanceSquared(Point,LineStart); REAL d2 = fm_distanceSquared(Point,LineEnd); if ( d1 <= d2 ) { ret = (REAL)sqrt(d1); intersection[0] = LineStart[0]; intersection[1] = LineStart[1]; intersection[2] = LineStart[2]; type = LS_START; } else { ret = (REAL)sqrt(d2); intersection[0] = LineEnd[0]; intersection[1] = LineEnd[1]; intersection[2] = LineEnd[2]; type = LS_END; } } else { intersection[0] = LineStart[0] + U * ( LineEnd[0] - LineStart[0] ); intersection[1] = LineStart[1] + U * ( LineEnd[1] - LineStart[1] ); intersection[2] = LineStart[2] + U * ( LineEnd[2] - LineStart[2] ); ret = fm_distance(Point,intersection); REAL d1 = fm_distanceSquared(intersection,LineStart); REAL d2 = fm_distanceSquared(intersection,LineEnd); REAL mag = (epsilon*2)*(epsilon*2); if ( d1 < mag ) // if less than 1/100th the total distance, treat is as the 'start' { type = LS_START; } else if ( d2 < mag ) { type = LS_END; } else { type = LS_MIDDLE; } } } else { ret = LineMag; intersection[0] = LineEnd[0]; intersection[1] = LineEnd[1]; intersection[2] = LineEnd[2]; type = LS_END; } return ret; } #ifndef BEST_FIT_PLANE_H #define BEST_FIT_PLANE_H template <class Type> class Eigen { public: void DecrSortEigenStuff(void) { Tridiagonal(); //diagonalize the matrix. QLAlgorithm(); // DecreasingSort(); GuaranteeRotation(); } void Tridiagonal(void) { Type fM00 = mElement[0][0]; Type fM01 = mElement[0][1]; Type fM02 = mElement[0][2]; Type fM11 = mElement[1][1]; Type fM12 = mElement[1][2]; Type fM22 = mElement[2][2]; m_afDiag[0] = fM00; m_afSubd[2] = 0; if (fM02 != (Type)0.0) { Type fLength = (REAL)sqrt(fM01*fM01+fM02*fM02); Type fInvLength = ((Type)1.0)/fLength; fM01 *= fInvLength; fM02 *= fInvLength; Type fQ = ((Type)2.0)*fM01*fM12+fM02*(fM22-fM11); m_afDiag[1] = fM11+fM02*fQ; m_afDiag[2] = fM22-fM02*fQ; m_afSubd[0] = fLength; m_afSubd[1] = fM12-fM01*fQ; mElement[0][0] = (Type)1.0; mElement[0][1] = (Type)0.0; mElement[0][2] = (Type)0.0; mElement[1][0] = (Type)0.0; mElement[1][1] = fM01; mElement[1][2] = fM02; mElement[2][0] = (Type)0.0; mElement[2][1] = fM02; mElement[2][2] = -fM01; m_bIsRotation = false; } else { m_afDiag[1] = fM11; m_afDiag[2] = fM22; m_afSubd[0] = fM01; m_afSubd[1] = fM12; mElement[0][0] = (Type)1.0; mElement[0][1] = (Type)0.0; mElement[0][2] = (Type)0.0; mElement[1][0] = (Type)0.0; mElement[1][1] = (Type)1.0; mElement[1][2] = (Type)0.0; mElement[2][0] = (Type)0.0; mElement[2][1] = (Type)0.0; mElement[2][2] = (Type)1.0; m_bIsRotation = true; } } bool QLAlgorithm(void) { const int32_t iMaxIter = 32; for (int32_t i0 = 0; i0 <3; i0++) { int32_t i1; for (i1 = 0; i1 < iMaxIter; i1++) { int32_t i2; for (i2 = i0; i2 <= (3-2); i2++) { Type fTmp = fabs(m_afDiag[i2]) + fabs(m_afDiag[i2+1]); if ( fabs(m_afSubd[i2]) + fTmp == fTmp ) break; } if (i2 == i0) { break; } Type fG = (m_afDiag[i0+1] - m_afDiag[i0])/(((Type)2.0) * m_afSubd[i0]); Type fR = (REAL)sqrt(fG*fG+(Type)1.0); if (fG < (Type)0.0) { fG = m_afDiag[i2]-m_afDiag[i0]+m_afSubd[i0]/(fG-fR); } else { fG = m_afDiag[i2]-m_afDiag[i0]+m_afSubd[i0]/(fG+fR); } Type fSin = (Type)1.0, fCos = (Type)1.0, fP = (Type)0.0; for (int32_t i3 = i2-1; i3 >= i0; i3--) { Type fF = fSin*m_afSubd[i3]; Type fB = fCos*m_afSubd[i3]; if (fabs(fF) >= fabs(fG)) { fCos = fG/fF; fR = (REAL)sqrt(fCos*fCos+(Type)1.0); m_afSubd[i3+1] = fF*fR; fSin = ((Type)1.0)/fR; fCos *= fSin; } else { fSin = fF/fG; fR = (REAL)sqrt(fSin*fSin+(Type)1.0); m_afSubd[i3+1] = fG*fR; fCos = ((Type)1.0)/fR; fSin *= fCos; } fG = m_afDiag[i3+1]-fP; fR = (m_afDiag[i3]-fG)*fSin+((Type)2.0)*fB*fCos; fP = fSin*fR; m_afDiag[i3+1] = fG+fP; fG = fCos*fR-fB; for (int32_t i4 = 0; i4 < 3; i4++) { fF = mElement[i4][i3+1]; mElement[i4][i3+1] = fSin*mElement[i4][i3]+fCos*fF; mElement[i4][i3] = fCos*mElement[i4][i3]-fSin*fF; } } m_afDiag[i0] -= fP; m_afSubd[i0] = fG; m_afSubd[i2] = (Type)0.0; } if (i1 == iMaxIter) { return false; } } return true; } void DecreasingSort(void) { //sort eigenvalues in decreasing order, e[0] >= ... >= e[iSize-1] for (int32_t i0 = 0, i1; i0 <= 3-2; i0++) { // locate maximum eigenvalue i1 = i0; Type fMax = m_afDiag[i1]; int32_t i2; for (i2 = i0+1; i2 < 3; i2++) { if (m_afDiag[i2] > fMax) { i1 = i2; fMax = m_afDiag[i1]; } } if (i1 != i0) { // swap eigenvalues m_afDiag[i1] = m_afDiag[i0]; m_afDiag[i0] = fMax; // swap eigenvectors for (i2 = 0; i2 < 3; i2++) { Type fTmp = mElement[i2][i0]; mElement[i2][i0] = mElement[i2][i1]; mElement[i2][i1] = fTmp; m_bIsRotation = !m_bIsRotation; } } } } void GuaranteeRotation(void) { if (!m_bIsRotation) { // change sign on the first column for (int32_t iRow = 0; iRow <3; iRow++) { mElement[iRow][0] = -mElement[iRow][0]; } } } Type mElement[3][3]; Type m_afDiag[3]; Type m_afSubd[3]; bool m_bIsRotation; }; #endif bool fm_computeBestFitPlane(uint32_t vcount, const REAL *points, uint32_t vstride, const REAL *weights, uint32_t wstride, REAL *plane) { bool ret = false; REAL kOrigin[3] = { 0, 0, 0 }; REAL wtotal = 0; { const char *source = (const char *) points; const char *wsource = (const char *) weights; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *) source; REAL w = 1; if ( wsource ) { const REAL *ws = (const REAL *) wsource; w = *ws; // wsource+=wstride; } kOrigin[0]+=p[0]*w; kOrigin[1]+=p[1]*w; kOrigin[2]+=p[2]*w; wtotal+=w; source+=vstride; } } REAL recip = 1.0f / wtotal; // reciprocol of total weighting kOrigin[0]*=recip; kOrigin[1]*=recip; kOrigin[2]*=recip; REAL fSumXX=0; REAL fSumXY=0; REAL fSumXZ=0; REAL fSumYY=0; REAL fSumYZ=0; REAL fSumZZ=0; { const char *source = (const char *) points; const char *wsource = (const char *) weights; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *) source; REAL w = 1; if ( wsource ) { const REAL *ws = (const REAL *) wsource; w = *ws; // wsource+=wstride; } REAL kDiff[3]; kDiff[0] = w*(p[0] - kOrigin[0]); // apply vertex weighting! kDiff[1] = w*(p[1] - kOrigin[1]); kDiff[2] = w*(p[2] - kOrigin[2]); fSumXX+= kDiff[0] * kDiff[0]; // sume of the squares of the differences. fSumXY+= kDiff[0] * kDiff[1]; // sume of the squares of the differences. fSumXZ+= kDiff[0] * kDiff[2]; // sume of the squares of the differences. fSumYY+= kDiff[1] * kDiff[1]; fSumYZ+= kDiff[1] * kDiff[2]; fSumZZ+= kDiff[2] * kDiff[2]; source+=vstride; } } fSumXX *= recip; fSumXY *= recip; fSumXZ *= recip; fSumYY *= recip; fSumYZ *= recip; fSumZZ *= recip; // setup the eigensolver Eigen<REAL> kES; kES.mElement[0][0] = fSumXX; kES.mElement[0][1] = fSumXY; kES.mElement[0][2] = fSumXZ; kES.mElement[1][0] = fSumXY; kES.mElement[1][1] = fSumYY; kES.mElement[1][2] = fSumYZ; kES.mElement[2][0] = fSumXZ; kES.mElement[2][1] = fSumYZ; kES.mElement[2][2] = fSumZZ; // compute eigenstuff, smallest eigenvalue is in last position kES.DecrSortEigenStuff(); REAL kNormal[3]; kNormal[0] = kES.mElement[0][2]; kNormal[1] = kES.mElement[1][2]; kNormal[2] = kES.mElement[2][2]; // the minimum energy plane[0] = kNormal[0]; plane[1] = kNormal[1]; plane[2] = kNormal[2]; plane[3] = 0 - fm_dot(kNormal,kOrigin); ret = true; return ret; } bool fm_colinear(const REAL a1[3],const REAL a2[3],const REAL b1[3],const REAL b2[3],REAL epsilon) // true if these two line segments are co-linear. { bool ret = false; REAL dir1[3]; REAL dir2[3]; dir1[0] = (a2[0] - a1[0]); dir1[1] = (a2[1] - a1[1]); dir1[2] = (a2[2] - a1[2]); dir2[0] = (b2[0]-a1[0]) - (b1[0]-a1[0]); dir2[1] = (b2[1]-a1[1]) - (b1[1]-a1[1]); dir2[2] = (b2[2]-a2[2]) - (b1[2]-a2[2]); fm_normalize(dir1); fm_normalize(dir2); REAL dot = fm_dot(dir1,dir2); if ( dot >= epsilon ) { ret = true; } return ret; } bool fm_colinear(const REAL *p1,const REAL *p2,const REAL *p3,REAL epsilon) { bool ret = false; REAL dir1[3]; REAL dir2[3]; dir1[0] = p2[0] - p1[0]; dir1[1] = p2[1] - p1[1]; dir1[2] = p2[2] - p1[2]; dir2[0] = p3[0] - p2[0]; dir2[1] = p3[1] - p2[1]; dir2[2] = p3[2] - p2[2]; fm_normalize(dir1); fm_normalize(dir2); REAL dot = fm_dot(dir1,dir2); if ( dot >= epsilon ) { ret = true; } return ret; } void fm_initMinMax(const REAL *p,REAL *bmin,REAL *bmax) { bmax[0] = bmin[0] = p[0]; bmax[1] = bmin[1] = p[1]; bmax[2] = bmin[2] = p[2]; } IntersectResult fm_intersectLineSegments2d(const REAL *a1,const REAL *a2,const REAL *b1,const REAL *b2,REAL *intersection) { IntersectResult ret; REAL denom = ((b2[1] - b1[1])*(a2[0] - a1[0])) - ((b2[0] - b1[0])*(a2[1] - a1[1])); REAL nume_a = ((b2[0] - b1[0])*(a1[1] - b1[1])) - ((b2[1] - b1[1])*(a1[0] - b1[0])); REAL nume_b = ((a2[0] - a1[0])*(a1[1] - b1[1])) - ((a2[1] - a1[1])*(a1[0] - b1[0])); if (denom == 0 ) { if(nume_a == 0 && nume_b == 0) { ret = IR_COINCIDENT; } else { ret = IR_PARALLEL; } } else { REAL recip = 1 / denom; REAL ua = nume_a * recip; REAL ub = nume_b * recip; if(ua >= 0 && ua <= 1 && ub >= 0 && ub <= 1 ) { // Get the intersection point. intersection[0] = a1[0] + ua*(a2[0] - a1[0]); intersection[1] = a1[1] + ua*(a2[1] - a1[1]); ret = IR_DO_INTERSECT; } else { ret = IR_DONT_INTERSECT; } } return ret; } IntersectResult fm_intersectLineSegments2dTime(const REAL *a1,const REAL *a2,const REAL *b1,const REAL *b2,REAL &t1,REAL &t2) { IntersectResult ret; REAL denom = ((b2[1] - b1[1])*(a2[0] - a1[0])) - ((b2[0] - b1[0])*(a2[1] - a1[1])); REAL nume_a = ((b2[0] - b1[0])*(a1[1] - b1[1])) - ((b2[1] - b1[1])*(a1[0] - b1[0])); REAL nume_b = ((a2[0] - a1[0])*(a1[1] - b1[1])) - ((a2[1] - a1[1])*(a1[0] - b1[0])); if (denom == 0 ) { if(nume_a == 0 && nume_b == 0) { ret = IR_COINCIDENT; } else { ret = IR_PARALLEL; } } else { REAL recip = 1 / denom; REAL ua = nume_a * recip; REAL ub = nume_b * recip; if(ua >= 0 && ua <= 1 && ub >= 0 && ub <= 1 ) { t1 = ua; t2 = ub; ret = IR_DO_INTERSECT; } else { ret = IR_DONT_INTERSECT; } } return ret; } //**** Plane Triangle Intersection // assumes that the points are on opposite sides of the plane! void fm_intersectPointPlane(const REAL *p1,const REAL *p2,REAL *split,const REAL *plane) { REAL dp1 = fm_distToPlane(plane,p1); REAL dir[3]; dir[0] = p2[0] - p1[0]; dir[1] = p2[1] - p1[1]; dir[2] = p2[2] - p1[2]; REAL dot1 = dir[0]*plane[0] + dir[1]*plane[1] + dir[2]*plane[2]; REAL dot2 = dp1 - plane[3]; REAL t = -(plane[3] + dot2 ) / dot1; split[0] = (dir[0]*t)+p1[0]; split[1] = (dir[1]*t)+p1[1]; split[2] = (dir[2]*t)+p1[2]; } PlaneTriResult fm_getSidePlane(const REAL *p,const REAL *plane,REAL epsilon) { PlaneTriResult ret = PTR_ON_PLANE; REAL d = fm_distToPlane(plane,p); if ( d < -epsilon || d > epsilon ) { if ( d > 0 ) ret = PTR_FRONT; // it is 'in front' within the provided epsilon value. else ret = PTR_BACK; } return ret; } #ifndef PLANE_TRIANGLE_INTERSECTION_H #define PLANE_TRIANGLE_INTERSECTION_H #define MAXPTS 256 template <class Type> class point { public: void set(const Type *p) { x = p[0]; y = p[1]; z = p[2]; } Type x; Type y; Type z; }; template <class Type> class plane { public: plane(const Type *p) { normal.x = p[0]; normal.y = p[1]; normal.z = p[2]; D = p[3]; } Type Classify_Point(const point<Type> &p) { return p.x*normal.x + p.y*normal.y + p.z*normal.z + D; } point<Type> normal; Type D; }; template <class Type> class polygon { public: polygon(void) { mVcount = 0; } polygon(const Type *p1,const Type *p2,const Type *p3) { mVcount = 3; mVertices[0].set(p1); mVertices[1].set(p2); mVertices[2].set(p3); } int32_t NumVertices(void) const { return mVcount; }; const point<Type>& Vertex(int32_t index) { if ( index < 0 ) index+=mVcount; return mVertices[index]; }; void set(const point<Type> *pts,int32_t count) { for (int32_t i=0; i<count; i++) { mVertices[i] = pts[i]; } mVcount = count; } void Split_Polygon(polygon<Type> *poly,plane<Type> *part, polygon<Type> &front, polygon<Type> &back) { int32_t count = poly->NumVertices (); int32_t out_c = 0, in_c = 0; point<Type> ptA, ptB,outpts[MAXPTS],inpts[MAXPTS]; Type sideA, sideB; ptA = poly->Vertex (count - 1); sideA = part->Classify_Point (ptA); for (int32_t i = -1; ++i < count;) { ptB = poly->Vertex(i); sideB = part->Classify_Point(ptB); if (sideB > 0) { if (sideA < 0) { point<Type> v; fm_intersectPointPlane(&ptB.x, &ptA.x, &v.x, &part->normal.x ); outpts[out_c++] = inpts[in_c++] = v; } outpts[out_c++] = ptB; } else if (sideB < 0) { if (sideA > 0) { point<Type> v; fm_intersectPointPlane(&ptB.x, &ptA.x, &v.x, &part->normal.x ); outpts[out_c++] = inpts[in_c++] = v; } inpts[in_c++] = ptB; } else outpts[out_c++] = inpts[in_c++] = ptB; ptA = ptB; sideA = sideB; } front.set(&outpts[0], out_c); back.set(&inpts[0], in_c); } int32_t mVcount; point<Type> mVertices[MAXPTS]; }; #endif static inline void add(const REAL *p,REAL *dest,uint32_t tstride,uint32_t &pcount) { char *d = (char *) dest; d = d + pcount*tstride; dest = (REAL *) d; dest[0] = p[0]; dest[1] = p[1]; dest[2] = p[2]; pcount++; assert( pcount <= 4 ); } PlaneTriResult fm_planeTriIntersection(const REAL *_plane, // the plane equation in Ax+By+Cz+D format const REAL *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* REAL epsilon, // the co-planar epsilon value. REAL *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle REAL *back, // the triangle in back of the plane uint32_t &bcount) // the number of vertices in the 'back' triangle. { fcount = 0; bcount = 0; const char *tsource = (const char *) triangle; // get the three vertices of the triangle. const REAL *p1 = (const REAL *) (tsource); const REAL *p2 = (const REAL *) (tsource+tstride); const REAL *p3 = (const REAL *) (tsource+tstride*2); PlaneTriResult r1 = fm_getSidePlane(p1,_plane,epsilon); // compute the side of the plane each vertex is on PlaneTriResult r2 = fm_getSidePlane(p2,_plane,epsilon); PlaneTriResult r3 = fm_getSidePlane(p3,_plane,epsilon); // If any of the points lay right *on* the plane.... if ( r1 == PTR_ON_PLANE || r2 == PTR_ON_PLANE || r3 == PTR_ON_PLANE ) { // If the triangle is completely co-planar, then just treat it as 'front' and return! if ( r1 == PTR_ON_PLANE && r2 == PTR_ON_PLANE && r3 == PTR_ON_PLANE ) { add(p1,front,tstride,fcount); add(p2,front,tstride,fcount); add(p3,front,tstride,fcount); return PTR_FRONT; } // Decide to place the co-planar points on the same side as the co-planar point. PlaneTriResult r= PTR_ON_PLANE; if ( r1 != PTR_ON_PLANE ) r = r1; else if ( r2 != PTR_ON_PLANE ) r = r2; else if ( r3 != PTR_ON_PLANE ) r = r3; if ( r1 == PTR_ON_PLANE ) r1 = r; if ( r2 == PTR_ON_PLANE ) r2 = r; if ( r3 == PTR_ON_PLANE ) r3 = r; } if ( r1 == r2 && r1 == r3 ) // if all three vertices are on the same side of the plane. { if ( r1 == PTR_FRONT ) // if all three are in front of the plane, then copy to the 'front' output triangle. { add(p1,front,tstride,fcount); add(p2,front,tstride,fcount); add(p3,front,tstride,fcount); } else { add(p1,back,tstride,bcount); // if all three are in 'back' then copy to the 'back' output triangle. add(p2,back,tstride,bcount); add(p3,back,tstride,bcount); } return r1; // if all three points are on the same side of the plane return result } polygon<REAL> pi(p1,p2,p3); polygon<REAL> pfront,pback; plane<REAL> part(_plane); pi.Split_Polygon(&pi,&part,pfront,pback); for (int32_t i=0; i<pfront.mVcount; i++) { add( &pfront.mVertices[i].x, front, tstride, fcount ); } for (int32_t i=0; i<pback.mVcount; i++) { add( &pback.mVertices[i].x, back, tstride, bcount ); } PlaneTriResult ret = PTR_SPLIT; if ( fcount < 3 ) fcount = 0; if ( bcount < 3 ) bcount = 0; if ( fcount == 0 && bcount ) ret = PTR_BACK; if ( bcount == 0 && fcount ) ret = PTR_FRONT; return ret; } // computes the OBB for this set of points relative to this transform matrix. void computeOBB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *sides,REAL *matrix) { const char *src = (const char *) points; REAL bmin[3] = { 1e9, 1e9, 1e9 }; REAL bmax[3] = { -1e9, -1e9, -1e9 }; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *) src; REAL t[3]; fm_inverseRT(matrix, p, t ); // inverse rotate translate if ( t[0] < bmin[0] ) bmin[0] = t[0]; if ( t[1] < bmin[1] ) bmin[1] = t[1]; if ( t[2] < bmin[2] ) bmin[2] = t[2]; if ( t[0] > bmax[0] ) bmax[0] = t[0]; if ( t[1] > bmax[1] ) bmax[1] = t[1]; if ( t[2] > bmax[2] ) bmax[2] = t[2]; src+=pstride; } REAL center[3]; sides[0] = bmax[0]-bmin[0]; sides[1] = bmax[1]-bmin[1]; sides[2] = bmax[2]-bmin[2]; center[0] = sides[0]*0.5f+bmin[0]; center[1] = sides[1]*0.5f+bmin[1]; center[2] = sides[2]*0.5f+bmin[2]; REAL ocenter[3]; fm_rotate(matrix,center,ocenter); matrix[12]+=ocenter[0]; matrix[13]+=ocenter[1]; matrix[14]+=ocenter[2]; } void fm_computeBestFitOBB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *sides,REAL *matrix,bool bruteForce) { REAL plane[4]; fm_computeBestFitPlane(vcount,points,pstride,0,0,plane); fm_planeToMatrix(plane,matrix); computeOBB( vcount, points, pstride, sides, matrix ); REAL refmatrix[16]; memcpy(refmatrix,matrix,16*sizeof(REAL)); REAL volume = sides[0]*sides[1]*sides[2]; if ( bruteForce ) { for (REAL a=10; a<180; a+=10) { REAL quat[4]; fm_eulerToQuat(0,a*FM_DEG_TO_RAD,0,quat); REAL temp[16]; REAL pmatrix[16]; fm_quatToMatrix(quat,temp); fm_matrixMultiply(temp,refmatrix,pmatrix); REAL psides[3]; computeOBB( vcount, points, pstride, psides, pmatrix ); REAL v = psides[0]*psides[1]*psides[2]; if ( v < volume ) { volume = v; memcpy(matrix,pmatrix,sizeof(REAL)*16); sides[0] = psides[0]; sides[1] = psides[1]; sides[2] = psides[2]; } } } } void fm_computeBestFitOBB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *sides,REAL *pos,REAL *quat,bool bruteForce) { REAL matrix[16]; fm_computeBestFitOBB(vcount,points,pstride,sides,matrix,bruteForce); fm_getTranslation(matrix,pos); fm_matrixToQuat(matrix,quat); } void fm_computeBestFitABB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *sides,REAL *pos) { REAL bmin[3]; REAL bmax[3]; bmin[0] = points[0]; bmin[1] = points[1]; bmin[2] = points[2]; bmax[0] = points[0]; bmax[1] = points[1]; bmax[2] = points[2]; const char *cp = (const char *) points; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *) cp; if ( p[0] < bmin[0] ) bmin[0] = p[0]; if ( p[1] < bmin[1] ) bmin[1] = p[1]; if ( p[2] < bmin[2] ) bmin[2] = p[2]; if ( p[0] > bmax[0] ) bmax[0] = p[0]; if ( p[1] > bmax[1] ) bmax[1] = p[1]; if ( p[2] > bmax[2] ) bmax[2] = p[2]; cp+=pstride; } sides[0] = bmax[0] - bmin[0]; sides[1] = bmax[1] - bmin[1]; sides[2] = bmax[2] - bmin[2]; pos[0] = bmin[0]+sides[0]*0.5f; pos[1] = bmin[1]+sides[1]*0.5f; pos[2] = bmin[2]+sides[2]*0.5f; } void fm_planeToMatrix(const REAL *plane,REAL *matrix) // convert a plane equation to a 4x4 rotation matrix { REAL ref[3] = { 0, 1, 0 }; REAL quat[4]; fm_rotationArc(ref,plane,quat); fm_quatToMatrix(quat,matrix); REAL origin[3] = { 0, -plane[3], 0 }; REAL center[3]; fm_transform(matrix,origin,center); fm_setTranslation(center,matrix); } void fm_planeToQuat(const REAL *plane,REAL *quat,REAL *pos) // convert a plane equation to a quaternion and translation { REAL ref[3] = { 0, 1, 0 }; REAL matrix[16]; fm_rotationArc(ref,plane,quat); fm_quatToMatrix(quat,matrix); REAL origin[3] = { 0, plane[3], 0 }; fm_transform(matrix,origin,pos); } void fm_eulerMatrix(REAL ax,REAL ay,REAL az,REAL *matrix) // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) { REAL quat[4]; fm_eulerToQuat(ax,ay,az,quat); fm_quatToMatrix(quat,matrix); } //********************************************************** //********************************************************** //**** Vertex Welding //********************************************************** //********************************************************** #ifndef VERTEX_INDEX_H #define VERTEX_INDEX_H namespace VERTEX_INDEX { class KdTreeNode; typedef std::vector< KdTreeNode * > KdTreeNodeVector; enum Axes { X_AXIS = 0, Y_AXIS = 1, Z_AXIS = 2 }; class KdTreeFindNode { public: KdTreeFindNode(void) { mNode = 0; mDistance = 0; } KdTreeNode *mNode; double mDistance; }; class KdTreeInterface { public: virtual const double * getPositionDouble(uint32_t index) const = 0; virtual const float * getPositionFloat(uint32_t index) const = 0; }; class KdTreeNode { public: KdTreeNode(void) { mIndex = 0; mLeft = 0; mRight = 0; } KdTreeNode(uint32_t index) { mIndex = index; mLeft = 0; mRight = 0; }; ~KdTreeNode(void) { } void addDouble(KdTreeNode *node,Axes dim,const KdTreeInterface *iface) { const double *nodePosition = iface->getPositionDouble( node->mIndex ); const double *position = iface->getPositionDouble( mIndex ); switch ( dim ) { case X_AXIS: if ( nodePosition[0] <= position[0] ) { if ( mLeft ) mLeft->addDouble(node,Y_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addDouble(node,Y_AXIS,iface); else mRight = node; } break; case Y_AXIS: if ( nodePosition[1] <= position[1] ) { if ( mLeft ) mLeft->addDouble(node,Z_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addDouble(node,Z_AXIS,iface); else mRight = node; } break; case Z_AXIS: if ( nodePosition[2] <= position[2] ) { if ( mLeft ) mLeft->addDouble(node,X_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addDouble(node,X_AXIS,iface); else mRight = node; } break; } } void addFloat(KdTreeNode *node,Axes dim,const KdTreeInterface *iface) { const float *nodePosition = iface->getPositionFloat( node->mIndex ); const float *position = iface->getPositionFloat( mIndex ); switch ( dim ) { case X_AXIS: if ( nodePosition[0] <= position[0] ) { if ( mLeft ) mLeft->addFloat(node,Y_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addFloat(node,Y_AXIS,iface); else mRight = node; } break; case Y_AXIS: if ( nodePosition[1] <= position[1] ) { if ( mLeft ) mLeft->addFloat(node,Z_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addFloat(node,Z_AXIS,iface); else mRight = node; } break; case Z_AXIS: if ( nodePosition[2] <= position[2] ) { if ( mLeft ) mLeft->addFloat(node,X_AXIS,iface); else mLeft = node; } else { if ( mRight ) mRight->addFloat(node,X_AXIS,iface); else mRight = node; } break; } } uint32_t getIndex(void) const { return mIndex; }; void search(Axes axis,const double *pos,double radius,uint32_t &count,uint32_t maxObjects,KdTreeFindNode *found,const KdTreeInterface *iface) { const double *position = iface->getPositionDouble(mIndex); double dx = pos[0] - position[0]; double dy = pos[1] - position[1]; double dz = pos[2] - position[2]; KdTreeNode *search1 = 0; KdTreeNode *search2 = 0; switch ( axis ) { case X_AXIS: if ( dx <= 0 ) // JWR if we are to the left { search1 = mLeft; // JWR then search to the left if ( -dx < radius ) // JWR if distance to the right is less than our search radius, continue on the right as well. search2 = mRight; } else { search1 = mRight; // JWR ok, we go down the left tree if ( dx < radius ) // JWR if the distance from the right is less than our search radius search2 = mLeft; } axis = Y_AXIS; break; case Y_AXIS: if ( dy <= 0 ) { search1 = mLeft; if ( -dy < radius ) search2 = mRight; } else { search1 = mRight; if ( dy < radius ) search2 = mLeft; } axis = Z_AXIS; break; case Z_AXIS: if ( dz <= 0 ) { search1 = mLeft; if ( -dz < radius ) search2 = mRight; } else { search1 = mRight; if ( dz < radius ) search2 = mLeft; } axis = X_AXIS; break; } double r2 = radius*radius; double m = dx*dx+dy*dy+dz*dz; if ( m < r2 ) { switch ( count ) { case 0: found[count].mNode = this; found[count].mDistance = m; break; case 1: if ( m < found[0].mDistance ) { if ( maxObjects == 1 ) { found[0].mNode = this; found[0].mDistance = m; } else { found[1] = found[0]; found[0].mNode = this; found[0].mDistance = m; } } else if ( maxObjects > 1) { found[1].mNode = this; found[1].mDistance = m; } break; default: { bool inserted = false; for (uint32_t i=0; i<count; i++) { if ( m < found[i].mDistance ) // if this one is closer than a pre-existing one... { // insertion sort... uint32_t scan = count; if ( scan >= maxObjects ) scan=maxObjects-1; for (uint32_t j=scan; j>i; j--) { found[j] = found[j-1]; } found[i].mNode = this; found[i].mDistance = m; inserted = true; break; } } if ( !inserted && count < maxObjects ) { found[count].mNode = this; found[count].mDistance = m; } } break; } count++; if ( count > maxObjects ) { count = maxObjects; } } if ( search1 ) search1->search( axis, pos,radius, count, maxObjects, found, iface); if ( search2 ) search2->search( axis, pos,radius, count, maxObjects, found, iface); } void search(Axes axis,const float *pos,float radius,uint32_t &count,uint32_t maxObjects,KdTreeFindNode *found,const KdTreeInterface *iface) { const float *position = iface->getPositionFloat(mIndex); float dx = pos[0] - position[0]; float dy = pos[1] - position[1]; float dz = pos[2] - position[2]; KdTreeNode *search1 = 0; KdTreeNode *search2 = 0; switch ( axis ) { case X_AXIS: if ( dx <= 0 ) // JWR if we are to the left { search1 = mLeft; // JWR then search to the left if ( -dx < radius ) // JWR if distance to the right is less than our search radius, continue on the right as well. search2 = mRight; } else { search1 = mRight; // JWR ok, we go down the left tree if ( dx < radius ) // JWR if the distance from the right is less than our search radius search2 = mLeft; } axis = Y_AXIS; break; case Y_AXIS: if ( dy <= 0 ) { search1 = mLeft; if ( -dy < radius ) search2 = mRight; } else { search1 = mRight; if ( dy < radius ) search2 = mLeft; } axis = Z_AXIS; break; case Z_AXIS: if ( dz <= 0 ) { search1 = mLeft; if ( -dz < radius ) search2 = mRight; } else { search1 = mRight; if ( dz < radius ) search2 = mLeft; } axis = X_AXIS; break; } float r2 = radius*radius; float m = dx*dx+dy*dy+dz*dz; if ( m < r2 ) { switch ( count ) { case 0: found[count].mNode = this; found[count].mDistance = m; break; case 1: if ( m < found[0].mDistance ) { if ( maxObjects == 1 ) { found[0].mNode = this; found[0].mDistance = m; } else { found[1] = found[0]; found[0].mNode = this; found[0].mDistance = m; } } else if ( maxObjects > 1) { found[1].mNode = this; found[1].mDistance = m; } break; default: { bool inserted = false; for (uint32_t i=0; i<count; i++) { if ( m < found[i].mDistance ) // if this one is closer than a pre-existing one... { // insertion sort... uint32_t scan = count; if ( scan >= maxObjects ) scan=maxObjects-1; for (uint32_t j=scan; j>i; j--) { found[j] = found[j-1]; } found[i].mNode = this; found[i].mDistance = m; inserted = true; break; } } if ( !inserted && count < maxObjects ) { found[count].mNode = this; found[count].mDistance = m; } } break; } count++; if ( count > maxObjects ) { count = maxObjects; } } if ( search1 ) search1->search( axis, pos,radius, count, maxObjects, found, iface); if ( search2 ) search2->search( axis, pos,radius, count, maxObjects, found, iface); } private: void setLeft(KdTreeNode *left) { mLeft = left; }; void setRight(KdTreeNode *right) { mRight = right; }; KdTreeNode *getLeft(void) { return mLeft; } KdTreeNode *getRight(void) { return mRight; } uint32_t mIndex; KdTreeNode *mLeft; KdTreeNode *mRight; }; #define MAX_BUNDLE_SIZE 1024 // 1024 nodes at a time, to minimize memory allocation and guarantee that pointers are persistent. class KdTreeNodeBundle { public: KdTreeNodeBundle(void) { mNext = 0; mIndex = 0; } bool isFull(void) const { return (bool)( mIndex == MAX_BUNDLE_SIZE ); } KdTreeNode * getNextNode(void) { assert(mIndex<MAX_BUNDLE_SIZE); KdTreeNode *ret = &mNodes[mIndex]; mIndex++; return ret; } KdTreeNodeBundle *mNext; uint32_t mIndex; KdTreeNode mNodes[MAX_BUNDLE_SIZE]; }; typedef std::vector< double > DoubleVector; typedef std::vector< float > FloatVector; class KdTree : public KdTreeInterface { public: KdTree(void) { mRoot = 0; mBundle = 0; mVcount = 0; mUseDouble = false; } virtual ~KdTree(void) { reset(); } const double * getPositionDouble(uint32_t index) const { assert( mUseDouble ); assert ( index < mVcount ); return &mVerticesDouble[index*3]; } const float * getPositionFloat(uint32_t index) const { assert( !mUseDouble ); assert ( index < mVcount ); return &mVerticesFloat[index*3]; } uint32_t search(const double *pos,double radius,uint32_t maxObjects,KdTreeFindNode *found) const { assert( mUseDouble ); if ( !mRoot ) return 0; uint32_t count = 0; mRoot->search(X_AXIS,pos,radius,count,maxObjects,found,this); return count; } uint32_t search(const float *pos,float radius,uint32_t maxObjects,KdTreeFindNode *found) const { assert( !mUseDouble ); if ( !mRoot ) return 0; uint32_t count = 0; mRoot->search(X_AXIS,pos,radius,count,maxObjects,found,this); return count; } void reset(void) { mRoot = 0; mVerticesDouble.clear(); mVerticesFloat.clear(); KdTreeNodeBundle *bundle = mBundle; while ( bundle ) { KdTreeNodeBundle *next = bundle->mNext; delete bundle; bundle = next; } mBundle = 0; mVcount = 0; } uint32_t add(double x,double y,double z) { assert(mUseDouble); uint32_t ret = mVcount; mVerticesDouble.push_back(x); mVerticesDouble.push_back(y); mVerticesDouble.push_back(z); mVcount++; KdTreeNode *node = getNewNode(ret); if ( mRoot ) { mRoot->addDouble(node,X_AXIS,this); } else { mRoot = node; } return ret; } uint32_t add(float x,float y,float z) { assert(!mUseDouble); uint32_t ret = mVcount; mVerticesFloat.push_back(x); mVerticesFloat.push_back(y); mVerticesFloat.push_back(z); mVcount++; KdTreeNode *node = getNewNode(ret); if ( mRoot ) { mRoot->addFloat(node,X_AXIS,this); } else { mRoot = node; } return ret; } KdTreeNode * getNewNode(uint32_t index) { if ( mBundle == 0 ) { mBundle = new KdTreeNodeBundle; } if ( mBundle->isFull() ) { KdTreeNodeBundle *bundle = new KdTreeNodeBundle; mBundle->mNext = bundle; mBundle = bundle; } KdTreeNode *node = mBundle->getNextNode(); new ( node ) KdTreeNode(index); return node; } uint32_t getNearest(const double *pos,double radius,bool &_found) const // returns the nearest possible neighbor's index. { assert( mUseDouble ); uint32_t ret = 0; _found = false; KdTreeFindNode found[1]; uint32_t count = search(pos,radius,1,found); if ( count ) { KdTreeNode *node = found[0].mNode; ret = node->getIndex(); _found = true; } return ret; } uint32_t getNearest(const float *pos,float radius,bool &_found) const // returns the nearest possible neighbor's index. { assert( !mUseDouble ); uint32_t ret = 0; _found = false; KdTreeFindNode found[1]; uint32_t count = search(pos,radius,1,found); if ( count ) { KdTreeNode *node = found[0].mNode; ret = node->getIndex(); _found = true; } return ret; } const double * getVerticesDouble(void) const { assert( mUseDouble ); const double *ret = 0; if ( !mVerticesDouble.empty() ) { ret = &mVerticesDouble[0]; } return ret; } const float * getVerticesFloat(void) const { assert( !mUseDouble ); const float * ret = 0; if ( !mVerticesFloat.empty() ) { ret = &mVerticesFloat[0]; } return ret; } uint32_t getVcount(void) const { return mVcount; }; void setUseDouble(bool useDouble) { mUseDouble = useDouble; } private: bool mUseDouble; KdTreeNode *mRoot; KdTreeNodeBundle *mBundle; uint32_t mVcount; DoubleVector mVerticesDouble; FloatVector mVerticesFloat; }; }; // end of namespace VERTEX_INDEX class MyVertexIndex : public fm_VertexIndex { public: MyVertexIndex(double granularity,bool snapToGrid) { mDoubleGranularity = granularity; mFloatGranularity = (float)granularity; mSnapToGrid = snapToGrid; mUseDouble = true; mKdTree.setUseDouble(true); } MyVertexIndex(float granularity,bool snapToGrid) { mDoubleGranularity = granularity; mFloatGranularity = (float)granularity; mSnapToGrid = snapToGrid; mUseDouble = false; mKdTree.setUseDouble(false); } virtual ~MyVertexIndex(void) { } double snapToGrid(double p) { double m = fmod(p,mDoubleGranularity); p-=m; return p; } float snapToGrid(float p) { float m = fmodf(p,mFloatGranularity); p-=m; return p; } uint32_t getIndex(const float *_p,bool &newPos) // get index for a vector float { uint32_t ret; if ( mUseDouble ) { double p[3]; p[0] = _p[0]; p[1] = _p[1]; p[2] = _p[2]; return getIndex(p,newPos); } newPos = false; float p[3]; if ( mSnapToGrid ) { p[0] = snapToGrid(_p[0]); p[1] = snapToGrid(_p[1]); p[2] = snapToGrid(_p[2]); } else { p[0] = _p[0]; p[1] = _p[1]; p[2] = _p[2]; } bool found; ret = mKdTree.getNearest(p,mFloatGranularity,found); if ( !found ) { newPos = true; ret = mKdTree.add(p[0],p[1],p[2]); } return ret; } uint32_t getIndex(const double *_p,bool &newPos) // get index for a vector double { uint32_t ret; if ( !mUseDouble ) { float p[3]; p[0] = (float)_p[0]; p[1] = (float)_p[1]; p[2] = (float)_p[2]; return getIndex(p,newPos); } newPos = false; double p[3]; if ( mSnapToGrid ) { p[0] = snapToGrid(_p[0]); p[1] = snapToGrid(_p[1]); p[2] = snapToGrid(_p[2]); } else { p[0] = _p[0]; p[1] = _p[1]; p[2] = _p[2]; } bool found; ret = mKdTree.getNearest(p,mDoubleGranularity,found); if ( !found ) { newPos = true; ret = mKdTree.add(p[0],p[1],p[2]); } return ret; } const float * getVerticesFloat(void) const { const float * ret = 0; assert( !mUseDouble ); ret = mKdTree.getVerticesFloat(); return ret; } const double * getVerticesDouble(void) const { const double * ret = 0; assert( mUseDouble ); ret = mKdTree.getVerticesDouble(); return ret; } const float * getVertexFloat(uint32_t index) const { const float * ret = 0; assert( !mUseDouble ); #ifdef _DEBUG uint32_t vcount = mKdTree.getVcount(); assert( index < vcount ); #endif ret = mKdTree.getVerticesFloat(); ret = &ret[index*3]; return ret; } const double * getVertexDouble(uint32_t index) const { const double * ret = 0; assert( mUseDouble ); #ifdef _DEBUG uint32_t vcount = mKdTree.getVcount(); assert( index < vcount ); #endif ret = mKdTree.getVerticesDouble(); ret = &ret[index*3]; return ret; } uint32_t getVcount(void) const { return mKdTree.getVcount(); } bool isDouble(void) const { return mUseDouble; } bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices) { bool ret = false; FILE *fph = fopen(fname,"wb"); if ( fph ) { ret = true; uint32_t vcount = getVcount(); if ( mUseDouble ) { const double *v = getVerticesDouble(); for (uint32_t i=0; i<vcount; i++) { fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", (float)v[0], (float)v[1], (float)v[2] ); v+=3; } } else { const float *v = getVerticesFloat(); for (uint32_t i=0; i<vcount; i++) { fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", v[0], v[1], v[2] ); v+=3; } } for (uint32_t i=0; i<tcount; i++) { uint32_t i1 = *indices++; uint32_t i2 = *indices++; uint32_t i3 = *indices++; fprintf(fph,"f %d %d %d\r\n", i1+1, i2+1, i3+1 ); } fclose(fph); } return ret; } private: bool mUseDouble:1; bool mSnapToGrid:1; double mDoubleGranularity; float mFloatGranularity; VERTEX_INDEX::KdTree mKdTree; }; fm_VertexIndex * fm_createVertexIndex(double granularity,bool snapToGrid) // create an indexed vertex system for doubles { MyVertexIndex *ret = new MyVertexIndex(granularity,snapToGrid); return static_cast< fm_VertexIndex *>(ret); } fm_VertexIndex * fm_createVertexIndex(float granularity,bool snapToGrid) // create an indexed vertext system for floats { MyVertexIndex *ret = new MyVertexIndex(granularity,snapToGrid); return static_cast< fm_VertexIndex *>(ret); } void fm_releaseVertexIndex(fm_VertexIndex *vindex) { MyVertexIndex *m = static_cast< MyVertexIndex *>(vindex); delete m; } #endif // END OF VERTEX WELDING CODE REAL fm_computeBestFitAABB(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *bmin,REAL *bmax) // returns the diagonal distance { const uint8_t *source = (const uint8_t *) points; bmin[0] = points[0]; bmin[1] = points[1]; bmin[2] = points[2]; bmax[0] = points[0]; bmax[1] = points[1]; bmax[2] = points[2]; for (uint32_t i=1; i<vcount; i++) { source+=pstride; const REAL *p = (const REAL *) source; if ( p[0] < bmin[0] ) bmin[0] = p[0]; if ( p[1] < bmin[1] ) bmin[1] = p[1]; if ( p[2] < bmin[2] ) bmin[2] = p[2]; if ( p[0] > bmax[0] ) bmax[0] = p[0]; if ( p[1] > bmax[1] ) bmax[1] = p[1]; if ( p[2] > bmax[2] ) bmax[2] = p[2]; } REAL dx = bmax[0] - bmin[0]; REAL dy = bmax[1] - bmin[1]; REAL dz = bmax[2] - bmin[2]; return (REAL) sqrt( dx*dx + dy*dy + dz*dz ); } /* a = b - c */ #define vector(a,b,c) \ (a)[0] = (b)[0] - (c)[0]; \ (a)[1] = (b)[1] - (c)[1]; \ (a)[2] = (b)[2] - (c)[2]; #define innerProduct(v,q) \ ((v)[0] * (q)[0] + \ (v)[1] * (q)[1] + \ (v)[2] * (q)[2]) #define crossProduct(a,b,c) \ (a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \ (a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \ (a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1]; bool fm_lineIntersectsTriangle(const REAL *rayStart,const REAL *rayEnd,const REAL *p1,const REAL *p2,const REAL *p3,REAL *sect) { REAL dir[3]; dir[0] = rayEnd[0] - rayStart[0]; dir[1] = rayEnd[1] - rayStart[1]; dir[2] = rayEnd[2] - rayStart[2]; REAL d = (REAL)sqrt(dir[0]*dir[0] + dir[1]*dir[1] + dir[2]*dir[2]); REAL r = 1.0f / d; dir[0]*=r; dir[1]*=r; dir[2]*=r; REAL t; bool ret = fm_rayIntersectsTriangle(rayStart, dir, p1, p2, p3, t ); if ( ret ) { if ( t > d ) { sect[0] = rayStart[0] + dir[0]*t; sect[1] = rayStart[1] + dir[1]*t; sect[2] = rayStart[2] + dir[2]*t; } else { ret = false; } } return ret; } bool fm_rayIntersectsTriangle(const REAL *p,const REAL *d,const REAL *v0,const REAL *v1,const REAL *v2,REAL &t) { REAL e1[3],e2[3],h[3],s[3],q[3]; REAL a,f,u,v; vector(e1,v1,v0); vector(e2,v2,v0); crossProduct(h,d,e2); a = innerProduct(e1,h); if (a > -0.00001 && a < 0.00001) return(false); f = 1/a; vector(s,p,v0); u = f * (innerProduct(s,h)); if (u < 0.0 || u > 1.0) return(false); crossProduct(q,s,e1); v = f * innerProduct(d,q); if (v < 0.0 || u + v > 1.0) return(false); // at this stage we can compute t to find out where // the intersection point is on the line t = f * innerProduct(e2,q); if (t > 0) // ray intersection return(true); else // this means that there is a line intersection // but not a ray intersection return (false); } inline REAL det(const REAL *p1,const REAL *p2,const REAL *p3) { return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2]; } REAL fm_computeMeshVolume(const REAL *vertices,uint32_t tcount,const uint32_t *indices) { REAL volume = 0; for (uint32_t i=0; i<tcount; i++,indices+=3) { const REAL *p1 = &vertices[ indices[0]*3 ]; const REAL *p2 = &vertices[ indices[1]*3 ]; const REAL *p3 = &vertices[ indices[2]*3 ]; volume+=det(p1,p2,p3); // compute the volume of the tetrahedran relative to the origin. } volume*=(1.0f/6.0f); if ( volume < 0 ) volume*=-1; return volume; } const REAL * fm_getPoint(const REAL *points,uint32_t pstride,uint32_t index) { const uint8_t *scan = (const uint8_t *)points; scan+=(index*pstride); return (REAL *)scan; } bool fm_insideTriangle(REAL Ax, REAL Ay, REAL Bx, REAL By, REAL Cx, REAL Cy, REAL Px, REAL Py) { REAL ax, ay, bx, by, cx, cy, apx, apy, bpx, bpy, cpx, cpy; REAL cCROSSap, bCROSScp, aCROSSbp; ax = Cx - Bx; ay = Cy - By; bx = Ax - Cx; by = Ay - Cy; cx = Bx - Ax; cy = By - Ay; apx= Px - Ax; apy= Py - Ay; bpx= Px - Bx; bpy= Py - By; cpx= Px - Cx; cpy= Py - Cy; aCROSSbp = ax*bpy - ay*bpx; cCROSSap = cx*apy - cy*apx; bCROSScp = bx*cpy - by*cpx; return ((aCROSSbp >= 0.0f) && (bCROSScp >= 0.0f) && (cCROSSap >= 0.0f)); } REAL fm_areaPolygon2d(uint32_t pcount,const REAL *points,uint32_t pstride) { int32_t n = (int32_t)pcount; REAL A=0.0f; for(int32_t p=n-1,q=0; q<n; p=q++) { const REAL *p1 = fm_getPoint(points,pstride,p); const REAL *p2 = fm_getPoint(points,pstride,q); A+= p1[0]*p2[1] - p2[0]*p1[1]; } return A*0.5f; } bool fm_pointInsidePolygon2d(uint32_t pcount,const REAL *points,uint32_t pstride,const REAL *point,uint32_t xindex,uint32_t yindex) { uint32_t j = pcount-1; int32_t oddNodes = 0; REAL x = point[xindex]; REAL y = point[yindex]; for (uint32_t i=0; i<pcount; i++) { const REAL *p1 = fm_getPoint(points,pstride,i); const REAL *p2 = fm_getPoint(points,pstride,j); REAL x1 = p1[xindex]; REAL y1 = p1[yindex]; REAL x2 = p2[xindex]; REAL y2 = p2[yindex]; if ( y1 < y && y2 >= y || y2 < y && y1 >= y ) { if (x1+(y-y1)/(y2-y1)*(x2-x1)<x) { oddNodes = 1-oddNodes; } } j = i; } return oddNodes ? true : false; } uint32_t fm_consolidatePolygon(uint32_t pcount,const REAL *points,uint32_t pstride,REAL *_dest,REAL epsilon) // collapses co-linear edges. { uint32_t ret = 0; if ( pcount >= 3 ) { const REAL *prev = fm_getPoint(points,pstride,pcount-1); const REAL *current = points; const REAL *next = fm_getPoint(points,pstride,1); REAL *dest = _dest; for (uint32_t i=0; i<pcount; i++) { next = (i+1)==pcount ? points : next; if ( !fm_colinear(prev,current,next,epsilon) ) { dest[0] = current[0]; dest[1] = current[1]; dest[2] = current[2]; dest+=3; ret++; } prev = current; current+=3; next+=3; } } return ret; } #ifndef RECT3D_TEMPLATE #define RECT3D_TEMPLATE template <class T> class Rect3d { public: Rect3d(void) { }; Rect3d(const T *bmin,const T *bmax) { mMin[0] = bmin[0]; mMin[1] = bmin[1]; mMin[2] = bmin[2]; mMax[0] = bmax[0]; mMax[1] = bmax[1]; mMax[2] = bmax[2]; } void SetMin(const T *bmin) { mMin[0] = bmin[0]; mMin[1] = bmin[1]; mMin[2] = bmin[2]; } void SetMax(const T *bmax) { mMax[0] = bmax[0]; mMax[1] = bmax[1]; mMax[2] = bmax[2]; } void SetMin(T x,T y,T z) { mMin[0] = x; mMin[1] = y; mMin[2] = z; } void SetMax(T x,T y,T z) { mMax[0] = x; mMax[1] = y; mMax[2] = z; } T mMin[3]; T mMax[3]; }; #endif void splitRect(uint32_t axis, const Rect3d<REAL> &source, Rect3d<REAL> &b1, Rect3d<REAL> &b2, const REAL *midpoint) { switch ( axis ) { case 0: b1.SetMin(source.mMin); b1.SetMax( midpoint[0], source.mMax[1], source.mMax[2] ); b2.SetMin( midpoint[0], source.mMin[1], source.mMin[2] ); b2.SetMax(source.mMax); break; case 1: b1.SetMin(source.mMin); b1.SetMax( source.mMax[0], midpoint[1], source.mMax[2] ); b2.SetMin( source.mMin[0], midpoint[1], source.mMin[2] ); b2.SetMax(source.mMax); break; case 2: b1.SetMin(source.mMin); b1.SetMax( source.mMax[0], source.mMax[1], midpoint[2] ); b2.SetMin( source.mMin[0], source.mMin[1], midpoint[2] ); b2.SetMax(source.mMax); break; } } bool fm_computeSplitPlane(uint32_t vcount, const REAL *vertices, uint32_t /* tcount */, const uint32_t * /* indices */, REAL *plane) { REAL sides[3]; REAL matrix[16]; fm_computeBestFitOBB( vcount, vertices, sizeof(REAL)*3, sides, matrix ); REAL bmax[3]; REAL bmin[3]; bmax[0] = sides[0]*0.5f; bmax[1] = sides[1]*0.5f; bmax[2] = sides[2]*0.5f; bmin[0] = -bmax[0]; bmin[1] = -bmax[1]; bmin[2] = -bmax[2]; REAL dx = sides[0]; REAL dy = sides[1]; REAL dz = sides[2]; uint32_t axis = 0; if ( dy > dx ) { axis = 1; } if ( dz > dx && dz > dy ) { axis = 2; } REAL p1[3]; REAL p2[3]; REAL p3[3]; p3[0] = p2[0] = p1[0] = bmin[0] + dx*0.5f; p3[1] = p2[1] = p1[1] = bmin[1] + dy*0.5f; p3[2] = p2[2] = p1[2] = bmin[2] + dz*0.5f; Rect3d<REAL> b(bmin,bmax); Rect3d<REAL> b1,b2; splitRect(axis,b,b1,b2,p1); switch ( axis ) { case 0: p2[1] = bmin[1]; p2[2] = bmin[2]; if ( dz > dy ) { p3[1] = bmax[1]; p3[2] = bmin[2]; } else { p3[1] = bmin[1]; p3[2] = bmax[2]; } break; case 1: p2[0] = bmin[0]; p2[2] = bmin[2]; if ( dx > dz ) { p3[0] = bmax[0]; p3[2] = bmin[2]; } else { p3[0] = bmin[0]; p3[2] = bmax[2]; } break; case 2: p2[0] = bmin[0]; p2[1] = bmin[1]; if ( dx > dy ) { p3[0] = bmax[0]; p3[1] = bmin[1]; } else { p3[0] = bmin[0]; p3[1] = bmax[1]; } break; } REAL tp1[3]; REAL tp2[3]; REAL tp3[3]; fm_transform(matrix,p1,tp1); fm_transform(matrix,p2,tp2); fm_transform(matrix,p3,tp3); plane[3] = fm_computePlane(tp1,tp2,tp3,plane); return true; } #pragma warning(disable:4100) void fm_nearestPointInTriangle(const REAL * /*nearestPoint*/,const REAL * /*p1*/,const REAL * /*p2*/,const REAL * /*p3*/,REAL * /*nearest*/) { } static REAL Partial(const REAL *a,const REAL *p) { return (a[0]*p[1]) - (p[0]*a[1]); } REAL fm_areaTriangle(const REAL *p0,const REAL *p1,const REAL *p2) { REAL A = Partial(p0,p1); A+= Partial(p1,p2); A+= Partial(p2,p0); return A*0.5f; } void fm_subtract(const REAL *A,const REAL *B,REAL *diff) // compute A-B and store the result in 'diff' { diff[0] = A[0]-B[0]; diff[1] = A[1]-B[1]; diff[2] = A[2]-B[2]; } void fm_multiplyTransform(const REAL *pA,const REAL *pB,REAL *pM) { REAL a = pA[0*4+0] * pB[0*4+0] + pA[0*4+1] * pB[1*4+0] + pA[0*4+2] * pB[2*4+0] + pA[0*4+3] * pB[3*4+0]; REAL b = pA[0*4+0] * pB[0*4+1] + pA[0*4+1] * pB[1*4+1] + pA[0*4+2] * pB[2*4+1] + pA[0*4+3] * pB[3*4+1]; REAL c = pA[0*4+0] * pB[0*4+2] + pA[0*4+1] * pB[1*4+2] + pA[0*4+2] * pB[2*4+2] + pA[0*4+3] * pB[3*4+2]; REAL d = pA[0*4+0] * pB[0*4+3] + pA[0*4+1] * pB[1*4+3] + pA[0*4+2] * pB[2*4+3] + pA[0*4+3] * pB[3*4+3]; REAL e = pA[1*4+0] * pB[0*4+0] + pA[1*4+1] * pB[1*4+0] + pA[1*4+2] * pB[2*4+0] + pA[1*4+3] * pB[3*4+0]; REAL f = pA[1*4+0] * pB[0*4+1] + pA[1*4+1] * pB[1*4+1] + pA[1*4+2] * pB[2*4+1] + pA[1*4+3] * pB[3*4+1]; REAL g = pA[1*4+0] * pB[0*4+2] + pA[1*4+1] * pB[1*4+2] + pA[1*4+2] * pB[2*4+2] + pA[1*4+3] * pB[3*4+2]; REAL h = pA[1*4+0] * pB[0*4+3] + pA[1*4+1] * pB[1*4+3] + pA[1*4+2] * pB[2*4+3] + pA[1*4+3] * pB[3*4+3]; REAL i = pA[2*4+0] * pB[0*4+0] + pA[2*4+1] * pB[1*4+0] + pA[2*4+2] * pB[2*4+0] + pA[2*4+3] * pB[3*4+0]; REAL j = pA[2*4+0] * pB[0*4+1] + pA[2*4+1] * pB[1*4+1] + pA[2*4+2] * pB[2*4+1] + pA[2*4+3] * pB[3*4+1]; REAL k = pA[2*4+0] * pB[0*4+2] + pA[2*4+1] * pB[1*4+2] + pA[2*4+2] * pB[2*4+2] + pA[2*4+3] * pB[3*4+2]; REAL l = pA[2*4+0] * pB[0*4+3] + pA[2*4+1] * pB[1*4+3] + pA[2*4+2] * pB[2*4+3] + pA[2*4+3] * pB[3*4+3]; REAL m = pA[3*4+0] * pB[0*4+0] + pA[3*4+1] * pB[1*4+0] + pA[3*4+2] * pB[2*4+0] + pA[3*4+3] * pB[3*4+0]; REAL n = pA[3*4+0] * pB[0*4+1] + pA[3*4+1] * pB[1*4+1] + pA[3*4+2] * pB[2*4+1] + pA[3*4+3] * pB[3*4+1]; REAL o = pA[3*4+0] * pB[0*4+2] + pA[3*4+1] * pB[1*4+2] + pA[3*4+2] * pB[2*4+2] + pA[3*4+3] * pB[3*4+2]; REAL p = pA[3*4+0] * pB[0*4+3] + pA[3*4+1] * pB[1*4+3] + pA[3*4+2] * pB[2*4+3] + pA[3*4+3] * pB[3*4+3]; pM[0] = a; pM[1] = b; pM[2] = c; pM[3] = d; pM[4] = e; pM[5] = f; pM[6] = g; pM[7] = h; pM[8] = i; pM[9] = j; pM[10] = k; pM[11] = l; pM[12] = m; pM[13] = n; pM[14] = o; pM[15] = p; } void fm_multiply(REAL *A,REAL scaler) { A[0]*=scaler; A[1]*=scaler; A[2]*=scaler; } void fm_add(const REAL *A,const REAL *B,REAL *sum) { sum[0] = A[0]+B[0]; sum[1] = A[1]+B[1]; sum[2] = A[2]+B[2]; } void fm_copy3(const REAL *source,REAL *dest) { dest[0] = source[0]; dest[1] = source[1]; dest[2] = source[2]; } uint32_t fm_copyUniqueVertices(uint32_t vcount,const REAL *input_vertices,REAL *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices) { uint32_t ret = 0; REAL *vertices = (REAL *)malloc(sizeof(REAL)*vcount*3); memcpy(vertices,input_vertices,sizeof(REAL)*vcount*3); REAL *dest = output_vertices; uint32_t *reindex = (uint32_t *)malloc(sizeof(uint32_t)*vcount); memset(reindex,0xFF,sizeof(uint32_t)*vcount); uint32_t icount = tcount*3; for (uint32_t i=0; i<icount; i++) { uint32_t index = *input_indices++; assert( index < vcount ); if ( reindex[index] == 0xFFFFFFFF ) { *output_indices++ = ret; reindex[index] = ret; const REAL *pos = &vertices[index*3]; dest[0] = pos[0]; dest[1] = pos[1]; dest[2] = pos[2]; dest+=3; ret++; } else { *output_indices++ = reindex[index]; } } free(vertices); free(reindex); return ret; } bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const REAL *vertices,bool doubleSided) // returns true if this collection of indexed triangles are co-planar! { bool ret = true; if ( tcount > 0 ) { uint32_t i1 = indices[0]; uint32_t i2 = indices[1]; uint32_t i3 = indices[2]; const REAL *p1 = &vertices[i1*3]; const REAL *p2 = &vertices[i2*3]; const REAL *p3 = &vertices[i3*3]; REAL plane[4]; plane[3] = fm_computePlane(p1,p2,p3,plane); const uint32_t *scan = &indices[3]; for (uint32_t i=1; i<tcount; i++) { i1 = *scan++; i2 = *scan++; i3 = *scan++; p1 = &vertices[i1*3]; p2 = &vertices[i2*3]; p3 = &vertices[i3*3]; REAL _plane[4]; _plane[3] = fm_computePlane(p1,p2,p3,_plane); if ( !fm_samePlane(plane,_plane,0.01f,0.001f,doubleSided) ) { ret = false; break; } } } return ret; } bool fm_samePlane(const REAL p1[4],const REAL p2[4],REAL normalEpsilon,REAL dEpsilon,bool doubleSided) { bool ret = false; REAL diff = (REAL) fabs(p1[3]-p2[3]); if ( diff < dEpsilon ) // if the plane -d co-efficient is within our epsilon { REAL dot = fm_dot(p1,p2); // compute the dot-product of the vector normals. if ( doubleSided ) dot = (REAL)fabs(dot); REAL dmin = 1 - normalEpsilon; REAL dmax = 1 + normalEpsilon; if ( dot >= dmin && dot <= dmax ) { ret = true; // then the plane equation is for practical purposes identical. } } return ret; } void fm_initMinMax(REAL bmin[3],REAL bmax[3]) { bmin[0] = FLT_MAX; bmin[1] = FLT_MAX; bmin[2] = FLT_MAX; bmax[0] = -FLT_MAX; bmax[1] = -FLT_MAX; bmax[2] = -FLT_MAX; } void fm_inflateMinMax(REAL bmin[3], REAL bmax[3], REAL ratio) { REAL inflate = fm_distance(bmin, bmax)*0.5f*ratio; bmin[0] -= inflate; bmin[1] -= inflate; bmin[2] -= inflate; bmax[0] += inflate; bmax[1] += inflate; bmax[2] += inflate; } #ifndef TESSELATE_H #define TESSELATE_H typedef std::vector< uint32_t > UintVector; class Myfm_Tesselate : public fm_Tesselate { public: virtual ~Myfm_Tesselate(void) { } const uint32_t * tesselate(fm_VertexIndex *vindex,uint32_t tcount,const uint32_t *indices,float longEdge,uint32_t maxDepth,uint32_t &outcount) { const uint32_t *ret = 0; mMaxDepth = maxDepth; mLongEdge = longEdge*longEdge; mLongEdgeD = mLongEdge; mVertices = vindex; if ( mVertices->isDouble() ) { uint32_t vcount = mVertices->getVcount(); double *vertices = (double *)malloc(sizeof(double)*vcount*3); memcpy(vertices,mVertices->getVerticesDouble(),sizeof(double)*vcount*3); for (uint32_t i=0; i<tcount; i++) { uint32_t i1 = *indices++; uint32_t i2 = *indices++; uint32_t i3 = *indices++; const double *p1 = &vertices[i1*3]; const double *p2 = &vertices[i2*3]; const double *p3 = &vertices[i3*3]; tesselate(p1,p2,p3,0); } free(vertices); } else { uint32_t vcount = mVertices->getVcount(); float *vertices = (float *)malloc(sizeof(float)*vcount*3); memcpy(vertices,mVertices->getVerticesFloat(),sizeof(float)*vcount*3); for (uint32_t i=0; i<tcount; i++) { uint32_t i1 = *indices++; uint32_t i2 = *indices++; uint32_t i3 = *indices++; const float *p1 = &vertices[i1*3]; const float *p2 = &vertices[i2*3]; const float *p3 = &vertices[i3*3]; tesselate(p1,p2,p3,0); } free(vertices); } outcount = (uint32_t)(mIndices.size()/3); ret = &mIndices[0]; return ret; } void tesselate(const float *p1,const float *p2,const float *p3,uint32_t recurse) { bool split = false; float l1,l2,l3; l1 = l2 = l3 = 0; if ( recurse < mMaxDepth ) { l1 = fm_distanceSquared(p1,p2); l2 = fm_distanceSquared(p2,p3); l3 = fm_distanceSquared(p3,p1); if ( l1 > mLongEdge || l2 > mLongEdge || l3 > mLongEdge ) split = true; } if ( split ) { uint32_t edge; if ( l1 >= l2 && l1 >= l3 ) edge = 0; else if ( l2 >= l1 && l2 >= l3 ) edge = 1; else edge = 2; float splits[3]; switch ( edge ) { case 0: { fm_lerp(p1,p2,splits,0.5f); tesselate(p1,splits,p3, recurse+1 ); tesselate(splits,p2,p3, recurse+1 ); } break; case 1: { fm_lerp(p2,p3,splits,0.5f); tesselate(p1,p2,splits, recurse+1 ); tesselate(p1,splits,p3, recurse+1 ); } break; case 2: { fm_lerp(p3,p1,splits,0.5f); tesselate(p1,p2,splits, recurse+1 ); tesselate(splits,p2,p3, recurse+1 ); } break; } } else { bool newp; uint32_t i1 = mVertices->getIndex(p1,newp); uint32_t i2 = mVertices->getIndex(p2,newp); uint32_t i3 = mVertices->getIndex(p3,newp); mIndices.push_back(i1); mIndices.push_back(i2); mIndices.push_back(i3); } } void tesselate(const double *p1,const double *p2,const double *p3,uint32_t recurse) { bool split = false; double l1,l2,l3; l1 = l2 = l3 = 0; if ( recurse < mMaxDepth ) { l1 = fm_distanceSquared(p1,p2); l2 = fm_distanceSquared(p2,p3); l3 = fm_distanceSquared(p3,p1); if ( l1 > mLongEdgeD || l2 > mLongEdgeD || l3 > mLongEdgeD ) split = true; } if ( split ) { uint32_t edge; if ( l1 >= l2 && l1 >= l3 ) edge = 0; else if ( l2 >= l1 && l2 >= l3 ) edge = 1; else edge = 2; double splits[3]; switch ( edge ) { case 0: { fm_lerp(p1,p2,splits,0.5); tesselate(p1,splits,p3, recurse+1 ); tesselate(splits,p2,p3, recurse+1 ); } break; case 1: { fm_lerp(p2,p3,splits,0.5); tesselate(p1,p2,splits, recurse+1 ); tesselate(p1,splits,p3, recurse+1 ); } break; case 2: { fm_lerp(p3,p1,splits,0.5); tesselate(p1,p2,splits, recurse+1 ); tesselate(splits,p2,p3, recurse+1 ); } break; } } else { bool newp; uint32_t i1 = mVertices->getIndex(p1,newp); uint32_t i2 = mVertices->getIndex(p2,newp); uint32_t i3 = mVertices->getIndex(p3,newp); mIndices.push_back(i1); mIndices.push_back(i2); mIndices.push_back(i3); } } private: float mLongEdge; double mLongEdgeD; fm_VertexIndex *mVertices; UintVector mIndices; uint32_t mMaxDepth; }; fm_Tesselate * fm_createTesselate(void) { Myfm_Tesselate *m = new Myfm_Tesselate; return static_cast< fm_Tesselate * >(m); } void fm_releaseTesselate(fm_Tesselate *t) { Myfm_Tesselate *m = static_cast< Myfm_Tesselate *>(t); delete m; } #endif #ifndef RAY_ABB_INTERSECT #define RAY_ABB_INTERSECT //! Integer representation of a floating-point value. #define IR(x) ((uint32_t&)x) /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** * A method to compute a ray-AABB intersection. * Original code by Andrew Woo, from "Graphics Gems", Academic Press, 1990 * Optimized code by Pierre Terdiman, 2000 (~20-30% faster on my Celeron 500) * Epsilon value added by Klaus Hartmann. (discarding it saves a few cycles only) * * Hence this version is faster as well as more robust than the original one. * * Should work provided: * 1) the integer representation of 0.0f is 0x00000000 * 2) the sign bit of the float is the most significant one * * Report bugs: [email protected] * * \param aabb [in] the axis-aligned bounding box * \param origin [in] ray origin * \param dir [in] ray direction * \param coord [out] impact coordinates * \return true if ray intersects AABB */ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define RAYAABB_EPSILON 0.00001f bool fm_intersectRayAABB(const float MinB[3],const float MaxB[3],const float origin[3],const float dir[3],float coord[3]) { bool Inside = true; float MaxT[3]; MaxT[0]=MaxT[1]=MaxT[2]=-1.0f; // Find candidate planes. for(uint32_t i=0;i<3;i++) { if(origin[i] < MinB[i]) { coord[i] = MinB[i]; Inside = false; // Calculate T distances to candidate planes if(IR(dir[i])) MaxT[i] = (MinB[i] - origin[i]) / dir[i]; } else if(origin[i] > MaxB[i]) { coord[i] = MaxB[i]; Inside = false; // Calculate T distances to candidate planes if(IR(dir[i])) MaxT[i] = (MaxB[i] - origin[i]) / dir[i]; } } // Ray origin inside bounding box if(Inside) { coord[0] = origin[0]; coord[1] = origin[1]; coord[2] = origin[2]; return true; } // Get largest of the maxT's for final choice of intersection uint32_t WhichPlane = 0; if(MaxT[1] > MaxT[WhichPlane]) WhichPlane = 1; if(MaxT[2] > MaxT[WhichPlane]) WhichPlane = 2; // Check final candidate actually inside box if(IR(MaxT[WhichPlane])&0x80000000) return false; for(uint32_t i=0;i<3;i++) { if(i!=WhichPlane) { coord[i] = origin[i] + MaxT[WhichPlane] * dir[i]; #ifdef RAYAABB_EPSILON if(coord[i] < MinB[i] - RAYAABB_EPSILON || coord[i] > MaxB[i] + RAYAABB_EPSILON) return false; #else if(coord[i] < MinB[i] || coord[i] > MaxB[i]) return false; #endif } } return true; // ray hits box } bool fm_intersectLineSegmentAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],float intersect[3]) { bool ret = false; float dir[3]; dir[0] = p2[0] - p1[0]; dir[1] = p2[1] - p1[1]; dir[2] = p2[2] - p1[2]; float dist = fm_normalize(dir); if ( dist > RAYAABB_EPSILON ) { ret = fm_intersectRayAABB(bmin,bmax,p1,dir,intersect); if ( ret ) { float d = fm_distanceSquared(p1,intersect); if ( d > (dist*dist) ) { ret = false; } } } return ret; } #endif #ifndef OBB_TO_AABB #define OBB_TO_AABB #pragma warning(disable:4100) void fm_OBBtoAABB(const float /*obmin*/[3],const float /*obmax*/[3],const float /*matrix*/[16],float /*abmin*/[3],float /*abmax*/[3]) { assert(0); // not yet implemented. } const REAL * computePos(uint32_t index,const REAL *vertices,uint32_t vstride) { const char *tmp = (const char *)vertices; tmp+=(index*vstride); return (const REAL*)tmp; } void computeNormal(uint32_t index,REAL *normals,uint32_t nstride,const REAL *normal) { char *tmp = (char *)normals; tmp+=(index*nstride); REAL *dest = (REAL *)tmp; dest[0]+=normal[0]; dest[1]+=normal[1]; dest[2]+=normal[2]; } void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const REAL *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. REAL *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices) // the triangle indices { // Step #1 : Zero out the vertex normals char *dest = (char *)normals; for (uint32_t i=0; i<vcount; i++) { REAL *n = (REAL *)dest; n[0] = 0; n[1] = 0; n[2] = 0; dest+=nstride; } // Step #2 : Compute the face normals and accumulate them const uint32_t *scan = indices; for (uint32_t i=0; i<tcount; i++) { uint32_t i1 = *scan++; uint32_t i2 = *scan++; uint32_t i3 = *scan++; const REAL *p1 = computePos(i1,vertices,vstride); const REAL *p2 = computePos(i2,vertices,vstride); const REAL *p3 = computePos(i3,vertices,vstride); REAL normal[3]; fm_computePlane(p3,p2,p1,normal); computeNormal(i1,normals,nstride,normal); computeNormal(i2,normals,nstride,normal); computeNormal(i3,normals,nstride,normal); } // Normalize the accumulated normals dest = (char *)normals; for (uint32_t i=0; i<vcount; i++) { REAL *n = (REAL *)dest; fm_normalize(n); dest+=nstride; } } #endif #define BIGNUMBER 100000000.0 /* hundred million */ static inline void Set(REAL *n,REAL x,REAL y,REAL z) { n[0] = x; n[1] = y; n[2] = z; }; static inline void Copy(REAL *dest,const REAL *source) { dest[0] = source[0]; dest[1] = source[1]; dest[2] = source[2]; } REAL fm_computeBestFitSphere(uint32_t vcount,const REAL *points,uint32_t pstride,REAL *center) { REAL radius; REAL radius2; REAL xmin[3]; REAL xmax[3]; REAL ymin[3]; REAL ymax[3]; REAL zmin[3]; REAL zmax[3]; REAL dia1[3]; REAL dia2[3]; /* FIRST PASS: find 6 minima/maxima points */ Set(xmin,BIGNUMBER,BIGNUMBER,BIGNUMBER); Set(xmax,-BIGNUMBER,-BIGNUMBER,-BIGNUMBER); Set(ymin,BIGNUMBER,BIGNUMBER,BIGNUMBER); Set(ymax,-BIGNUMBER,-BIGNUMBER,-BIGNUMBER); Set(zmin,BIGNUMBER,BIGNUMBER,BIGNUMBER); Set(zmax,-BIGNUMBER,-BIGNUMBER,-BIGNUMBER); { const char *scan = (const char *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *caller_p = (const REAL *)scan; if (caller_p[0]<xmin[0]) Copy(xmin,caller_p); /* New xminimum point */ if (caller_p[0]>xmax[0]) Copy(xmax,caller_p); if (caller_p[1]<ymin[1]) Copy(ymin,caller_p); if (caller_p[1]>ymax[1]) Copy(ymax,caller_p); if (caller_p[2]<zmin[2]) Copy(zmin,caller_p); if (caller_p[2]>zmax[2]) Copy(zmax,caller_p); scan+=pstride; } } /* Set xspan = distance between the 2 points xmin & xmax (squared) */ REAL dx = xmax[0] - xmin[0]; REAL dy = xmax[1] - xmin[1]; REAL dz = xmax[2] - xmin[2]; REAL xspan = dx*dx + dy*dy + dz*dz; /* Same for y & z spans */ dx = ymax[0] - ymin[0]; dy = ymax[1] - ymin[1]; dz = ymax[2] - ymin[2]; REAL yspan = dx*dx + dy*dy + dz*dz; dx = zmax[0] - zmin[0]; dy = zmax[1] - zmin[1]; dz = zmax[2] - zmin[2]; REAL zspan = dx*dx + dy*dy + dz*dz; /* Set points dia1 & dia2 to the maximally separated pair */ Copy(dia1,xmin); Copy(dia2,xmax); /* assume xspan biggest */ REAL maxspan = xspan; if (yspan>maxspan) { maxspan = yspan; Copy(dia1,ymin); Copy(dia2,ymax); } if (zspan>maxspan) { maxspan = zspan; Copy(dia1,zmin); Copy(dia2,zmax); } /* dia1,dia2 is a diameter of initial sphere */ /* calc initial center */ center[0] = (dia1[0]+dia2[0])*0.5f; center[1] = (dia1[1]+dia2[1])*0.5f; center[2] = (dia1[2]+dia2[2])*0.5f; /* calculate initial radius**2 and radius */ dx = dia2[0]-center[0]; /* x component of radius vector */ dy = dia2[1]-center[1]; /* y component of radius vector */ dz = dia2[2]-center[2]; /* z component of radius vector */ radius2 = dx*dx + dy*dy + dz*dz; radius = REAL(sqrt(radius2)); /* SECOND PASS: increment current sphere */ { const char *scan = (const char *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *caller_p = (const REAL *)scan; dx = caller_p[0]-center[0]; dy = caller_p[1]-center[1]; dz = caller_p[2]-center[2]; REAL old_to_p_sq = dx*dx + dy*dy + dz*dz; if (old_to_p_sq > radius2) /* do r**2 test first */ { /* this point is outside of current sphere */ REAL old_to_p = REAL(sqrt(old_to_p_sq)); /* calc radius of new sphere */ radius = (radius + old_to_p) * 0.5f; radius2 = radius*radius; /* for next r**2 compare */ REAL old_to_new = old_to_p - radius; /* calc center of new sphere */ REAL recip = 1.0f /old_to_p; REAL cx = (radius*center[0] + old_to_new*caller_p[0]) * recip; REAL cy = (radius*center[1] + old_to_new*caller_p[1]) * recip; REAL cz = (radius*center[2] + old_to_new*caller_p[2]) * recip; Set(center,cx,cy,cz); scan+=pstride; } } } return radius; } void fm_computeBestFitCapsule(uint32_t vcount,const REAL *points,uint32_t pstride,REAL &radius,REAL &height,REAL matrix[16],bool bruteForce) { REAL sides[3]; REAL omatrix[16]; fm_computeBestFitOBB(vcount,points,pstride,sides,omatrix,bruteForce); int32_t axis = 0; if ( sides[0] > sides[1] && sides[0] > sides[2] ) axis = 0; else if ( sides[1] > sides[0] && sides[1] > sides[2] ) axis = 1; else axis = 2; REAL localTransform[16]; REAL maxDist = 0; REAL maxLen = 0; switch ( axis ) { case 0: { fm_eulerMatrix(0,0,FM_PI/2,localTransform); fm_matrixMultiply(localTransform,omatrix,matrix); const uint8_t *scan = (const uint8_t *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *)scan; REAL t[3]; fm_inverseRT(omatrix,p,t); REAL dist = t[1]*t[1]+t[2]*t[2]; if ( dist > maxDist ) { maxDist = dist; } REAL l = (REAL) fabs(t[0]); if ( l > maxLen ) { maxLen = l; } scan+=pstride; } } height = sides[0]; break; case 1: { fm_eulerMatrix(0,FM_PI/2,0,localTransform); fm_matrixMultiply(localTransform,omatrix,matrix); const uint8_t *scan = (const uint8_t *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *)scan; REAL t[3]; fm_inverseRT(omatrix,p,t); REAL dist = t[0]*t[0]+t[2]*t[2]; if ( dist > maxDist ) { maxDist = dist; } REAL l = (REAL) fabs(t[1]); if ( l > maxLen ) { maxLen = l; } scan+=pstride; } } height = sides[1]; break; case 2: { fm_eulerMatrix(FM_PI/2,0,0,localTransform); fm_matrixMultiply(localTransform,omatrix,matrix); const uint8_t *scan = (const uint8_t *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *)scan; REAL t[3]; fm_inverseRT(omatrix,p,t); REAL dist = t[0]*t[0]+t[1]*t[1]; if ( dist > maxDist ) { maxDist = dist; } REAL l = (REAL) fabs(t[2]); if ( l > maxLen ) { maxLen = l; } scan+=pstride; } } height = sides[2]; break; } radius = (REAL)sqrt(maxDist); height = (maxLen*2)-(radius*2); } //************* Triangulation #ifndef TRIANGULATE_H #define TRIANGULATE_H typedef uint32_t TU32; class TVec { public: TVec(double _x,double _y,double _z) { x = _x; y = _y; z = _z; }; TVec(void) { }; double x; double y; double z; }; typedef std::vector< TVec > TVecVector; typedef std::vector< TU32 > TU32Vector; class CTriangulator { public: /// Default constructor CTriangulator(); /// Default destructor virtual ~CTriangulator(); /// Triangulates the contour void triangulate(TU32Vector &indices); /// Returns the given point in the triangulator array inline TVec get(const TU32 id) { return mPoints[id]; } virtual void reset(void) { mInputPoints.clear(); mPoints.clear(); mIndices.clear(); } virtual void addPoint(double x,double y,double z) { TVec v(x,y,z); // update bounding box... if ( mInputPoints.empty() ) { mMin = v; mMax = v; } else { if ( x < mMin.x ) mMin.x = x; if ( y < mMin.y ) mMin.y = y; if ( z < mMin.z ) mMin.z = z; if ( x > mMax.x ) mMax.x = x; if ( y > mMax.y ) mMax.y = y; if ( z > mMax.z ) mMax.z = z; } mInputPoints.push_back(v); } // Triangulation happens in 2d. We could inverse transform the polygon around the normal direction, or we just use the two most signficant axes // Here we find the two longest axes and use them to triangulate. Inverse transforming them would introduce more doubleing point error and isn't worth it. virtual uint32_t * triangulate(uint32_t &tcount,double epsilon) { uint32_t *ret = 0; tcount = 0; mEpsilon = epsilon; if ( !mInputPoints.empty() ) { mPoints.clear(); double dx = mMax.x - mMin.x; // locate the first, second and third longest edges and store them in i1, i2, i3 double dy = mMax.y - mMin.y; double dz = mMax.z - mMin.z; uint32_t i1,i2,i3; if ( dx > dy && dx > dz ) { i1 = 0; if ( dy > dz ) { i2 = 1; i3 = 2; } else { i2 = 2; i3 = 1; } } else if ( dy > dx && dy > dz ) { i1 = 1; if ( dx > dz ) { i2 = 0; i3 = 2; } else { i2 = 2; i3 = 0; } } else { i1 = 2; if ( dx > dy ) { i2 = 0; i3 = 1; } else { i2 = 1; i3 = 0; } } uint32_t pcount = (uint32_t)mInputPoints.size(); const double *points = &mInputPoints[0].x; for (uint32_t i=0; i<pcount; i++) { TVec v( points[i1], points[i2], points[i3] ); mPoints.push_back(v); points+=3; } mIndices.clear(); triangulate(mIndices); tcount = (uint32_t)mIndices.size()/3; if ( tcount ) { ret = &mIndices[0]; } } return ret; } virtual const double * getPoint(uint32_t index) { return &mInputPoints[index].x; } private: double mEpsilon; TVec mMin; TVec mMax; TVecVector mInputPoints; TVecVector mPoints; TU32Vector mIndices; /// Tests if a point is inside the given triangle bool _insideTriangle(const TVec& A, const TVec& B, const TVec& C,const TVec& P); /// Returns the area of the contour double _area(); bool _snip(int32_t u, int32_t v, int32_t w, int32_t n, int32_t *V); /// Processes the triangulation void _process(TU32Vector &indices); }; /// Default constructor CTriangulator::CTriangulator(void) { } /// Default destructor CTriangulator::~CTriangulator() { } /// Triangulates the contour void CTriangulator::triangulate(TU32Vector &indices) { _process(indices); } /// Processes the triangulation void CTriangulator::_process(TU32Vector &indices) { const int32_t n = (const int32_t)mPoints.size(); if (n < 3) return; int32_t *V = (int32_t *)malloc(sizeof(int32_t)*n); bool flipped = false; if (0.0f < _area()) { for (int32_t v = 0; v < n; v++) V[v] = v; } else { flipped = true; for (int32_t v = 0; v < n; v++) V[v] = (n - 1) - v; } int32_t nv = n; int32_t count = 2 * nv; for (int32_t m = 0, v = nv - 1; nv > 2;) { if (0 >= (count--)) return; int32_t u = v; if (nv <= u) u = 0; v = u + 1; if (nv <= v) v = 0; int32_t w = v + 1; if (nv <= w) w = 0; if (_snip(u, v, w, nv, V)) { int32_t a, b, c, s, t; a = V[u]; b = V[v]; c = V[w]; if ( flipped ) { indices.push_back(a); indices.push_back(b); indices.push_back(c); } else { indices.push_back(c); indices.push_back(b); indices.push_back(a); } m++; for (s = v, t = v + 1; t < nv; s++, t++) V[s] = V[t]; nv--; count = 2 * nv; } } free(V); } /// Returns the area of the contour double CTriangulator::_area() { int32_t n = (uint32_t)mPoints.size(); double A = 0.0f; for (int32_t p = n - 1, q = 0; q < n; p = q++) { const TVec &pval = mPoints[p]; const TVec &qval = mPoints[q]; A += pval.x * qval.y - qval.x * pval.y; } A*=0.5f; return A; } bool CTriangulator::_snip(int32_t u, int32_t v, int32_t w, int32_t n, int32_t *V) { int32_t p; const TVec &A = mPoints[ V[u] ]; const TVec &B = mPoints[ V[v] ]; const TVec &C = mPoints[ V[w] ]; if (mEpsilon > (((B.x - A.x) * (C.y - A.y)) - ((B.y - A.y) * (C.x - A.x))) ) return false; for (p = 0; p < n; p++) { if ((p == u) || (p == v) || (p == w)) continue; const TVec &P = mPoints[ V[p] ]; if (_insideTriangle(A, B, C, P)) return false; } return true; } /// Tests if a point is inside the given triangle bool CTriangulator::_insideTriangle(const TVec& A, const TVec& B, const TVec& C,const TVec& P) { double ax, ay, bx, by, cx, cy, apx, apy, bpx, bpy, cpx, cpy; double cCROSSap, bCROSScp, aCROSSbp; ax = C.x - B.x; ay = C.y - B.y; bx = A.x - C.x; by = A.y - C.y; cx = B.x - A.x; cy = B.y - A.y; apx = P.x - A.x; apy = P.y - A.y; bpx = P.x - B.x; bpy = P.y - B.y; cpx = P.x - C.x; cpy = P.y - C.y; aCROSSbp = ax * bpy - ay * bpx; cCROSSap = cx * apy - cy * apx; bCROSScp = bx * cpy - by * cpx; return ((aCROSSbp >= 0.0f) && (bCROSScp >= 0.0f) && (cCROSSap >= 0.0f)); } class Triangulate : public fm_Triangulate { public: Triangulate(void) { mPointsFloat = 0; mPointsDouble = 0; } virtual ~Triangulate(void) { reset(); } void reset(void) { free(mPointsFloat); free(mPointsDouble); mPointsFloat = 0; mPointsDouble = 0; } virtual const double * triangulate3d(uint32_t pcount, const double *_points, uint32_t vstride, uint32_t &tcount, bool consolidate, double epsilon) { reset(); double *points = (double *)malloc(sizeof(double)*pcount*3); if ( consolidate ) { pcount = fm_consolidatePolygon(pcount,_points,vstride,points,1-epsilon); } else { double *dest = points; for (uint32_t i=0; i<pcount; i++) { const double *src = fm_getPoint(_points,vstride,i); dest[0] = src[0]; dest[1] = src[1]; dest[2] = src[2]; dest+=3; } vstride = sizeof(double)*3; } if ( pcount >= 3 ) { CTriangulator ct; for (uint32_t i=0; i<pcount; i++) { const double *src = fm_getPoint(points,vstride,i); ct.addPoint( src[0], src[1], src[2] ); } uint32_t _tcount; uint32_t *indices = ct.triangulate(_tcount,epsilon); if ( indices ) { tcount = _tcount; mPointsDouble = (double *)malloc(sizeof(double)*tcount*3*3); double *dest = mPointsDouble; for (uint32_t i=0; i<tcount; i++) { uint32_t i1 = indices[i*3+0]; uint32_t i2 = indices[i*3+1]; uint32_t i3 = indices[i*3+2]; const double *p1 = ct.getPoint(i1); const double *p2 = ct.getPoint(i2); const double *p3 = ct.getPoint(i3); dest[0] = p1[0]; dest[1] = p1[1]; dest[2] = p1[2]; dest[3] = p2[0]; dest[4] = p2[1]; dest[5] = p2[2]; dest[6] = p3[0]; dest[7] = p3[1]; dest[8] = p3[2]; dest+=9; } } } free(points); return mPointsDouble; } virtual const float * triangulate3d(uint32_t pcount, const float *points, uint32_t vstride, uint32_t &tcount, bool consolidate, float epsilon) { reset(); double *temp = (double *)malloc(sizeof(double)*pcount*3); double *dest = temp; for (uint32_t i=0; i<pcount; i++) { const float *p = fm_getPoint(points,vstride,i); dest[0] = p[0]; dest[1] = p[1]; dest[2] = p[2]; dest+=3; } const double *results = triangulate3d(pcount,temp,sizeof(double)*3,tcount,consolidate,epsilon); if ( results ) { uint32_t fcount = tcount*3*3; mPointsFloat = (float *)malloc(sizeof(float)*tcount*3*3); for (uint32_t i=0; i<fcount; i++) { mPointsFloat[i] = (float) results[i]; } free(mPointsDouble); mPointsDouble = 0; } free(temp); return mPointsFloat; } private: float *mPointsFloat; double *mPointsDouble; }; fm_Triangulate * fm_createTriangulate(void) { Triangulate *t = new Triangulate; return static_cast< fm_Triangulate *>(t); } void fm_releaseTriangulate(fm_Triangulate *t) { Triangulate *tt = static_cast< Triangulate *>(t); delete tt; } #endif bool validDistance(const REAL *p1,const REAL *p2,REAL epsilon) { bool ret = true; REAL dx = p1[0] - p2[0]; REAL dy = p1[1] - p2[1]; REAL dz = p1[2] - p2[2]; REAL dist = dx*dx+dy*dy+dz*dz; if ( dist < (epsilon*epsilon) ) { ret = false; } return ret; } bool fm_isValidTriangle(const REAL *p1,const REAL *p2,const REAL *p3,REAL epsilon) { bool ret = false; if ( validDistance(p1,p2,epsilon) && validDistance(p1,p3,epsilon) && validDistance(p2,p3,epsilon) ) { REAL area = fm_computeArea(p1,p2,p3); if ( area > epsilon ) { REAL _vertices[3*3],vertices[64*3]; _vertices[0] = p1[0]; _vertices[1] = p1[1]; _vertices[2] = p1[2]; _vertices[3] = p2[0]; _vertices[4] = p2[1]; _vertices[5] = p2[2]; _vertices[6] = p3[0]; _vertices[7] = p3[1]; _vertices[8] = p3[2]; uint32_t pcount = fm_consolidatePolygon(3,_vertices,sizeof(REAL)*3,vertices,1-epsilon); if ( pcount == 3 ) { ret = true; } } } return ret; } void fm_multiplyQuat(const REAL *left,const REAL *right,REAL *quat) { REAL a,b,c,d; a = left[3]*right[3] - left[0]*right[0] - left[1]*right[1] - left[2]*right[2]; b = left[3]*right[0] + right[3]*left[0] + left[1]*right[2] - right[1]*left[2]; c = left[3]*right[1] + right[3]*left[1] + left[2]*right[0] - right[2]*left[0]; d = left[3]*right[2] + right[3]*left[2] + left[0]*right[1] - right[0]*left[1]; quat[3] = a; quat[0] = b; quat[1] = c; quat[2] = d; } bool fm_computeCentroid(uint32_t vcount, // number of input data points const REAL *points, // starting address of points array. uint32_t vstride, // stride between input points. REAL *center) { bool ret = false; if ( vcount ) { center[0] = 0; center[1] = 0; center[2] = 0; const char *scan = (const char *)points; for (uint32_t i=0; i<vcount; i++) { const REAL *p = (const REAL *)scan; center[0]+=p[0]; center[1]+=p[1]; center[2]+=p[2]; scan+=vstride; } REAL recip = 1.0f / (REAL)vcount; center[0]*=recip; center[1]*=recip; center[2]*=recip; ret = true; } return ret; } #ifndef TEMPLATE_VEC3 #define TEMPLATE_VEC3 template <class Type> class Vec3 { public: Vec3(void) { } Vec3(Type _x,Type _y,Type _z) { x = _x; y = _y; z = _z; } Type x; Type y; Type z; }; #endif void fm_transformAABB(const REAL bmin[3],const REAL bmax[3],const REAL matrix[16],REAL tbmin[3],REAL tbmax[3]) { Vec3<REAL> box[8]; box[0] = Vec3< REAL >( bmin[0], bmin[1], bmin[2] ); box[1] = Vec3< REAL >( bmax[0], bmin[1], bmin[2] ); box[2] = Vec3< REAL >( bmax[0], bmax[1], bmin[2] ); box[3] = Vec3< REAL >( bmin[0], bmax[1], bmin[2] ); box[4] = Vec3< REAL >( bmin[0], bmin[1], bmax[2] ); box[5] = Vec3< REAL >( bmax[0], bmin[1], bmax[2] ); box[6] = Vec3< REAL >( bmax[0], bmax[1], bmax[2] ); box[7] = Vec3< REAL >( bmin[0], bmax[1], bmax[2] ); // transform all 8 corners of the box and then recompute a new AABB for (unsigned int i=0; i<8; i++) { Vec3< REAL > &p = box[i]; fm_transform(matrix,&p.x,&p.x); if ( i == 0 ) { tbmin[0] = tbmax[0] = p.x; tbmin[1] = tbmax[1] = p.y; tbmin[2] = tbmax[2] = p.z; } else { if ( p.x < tbmin[0] ) tbmin[0] = p.x; if ( p.y < tbmin[1] ) tbmin[1] = p.y; if ( p.z < tbmin[2] ) tbmin[2] = p.z; if ( p.x > tbmax[0] ) tbmax[0] = p.x; if ( p.y > tbmax[1] ) tbmax[1] = p.y; if ( p.z > tbmax[2] ) tbmax[2] = p.z; } } } REAL fm_normalizeQuat(REAL n[4]) // normalize this quat { REAL dx = n[0]*n[0]; REAL dy = n[1]*n[1]; REAL dz = n[2]*n[2]; REAL dw = n[3]*n[3]; REAL dist = dx*dx+dy*dy+dz*dz+dw*dw; dist = (REAL)sqrt(dist); REAL recip = 1.0f / dist; n[0]*=recip; n[1]*=recip; n[2]*=recip; n[3]*=recip; return dist; } }; // end of namespace
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btAlignedAllocator.cpp
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "btAlignedAllocator.h" #ifdef _MSC_VER #pragma warning(disable:4311 4302) #endif int32_t gNumAlignedAllocs = 0; int32_t gNumAlignedFree = 0; int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks static void* btAllocDefault(size_t size) { return malloc(size); } static void btFreeDefault(void* ptr) { free(ptr); } static btAllocFunc* sAllocFunc = btAllocDefault; static btFreeFunc* sFreeFunc = btFreeDefault; #if defined(BT_HAS_ALIGNED_ALLOCATOR) #include <malloc.h> static void* btAlignedAllocDefault(size_t size, int32_t alignment) { return _aligned_malloc(size, (size_t)alignment); } static void btAlignedFreeDefault(void* ptr) { _aligned_free(ptr); } #elif defined(__CELLOS_LV2__) #include <stdlib.h> static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) { return memalign(alignment, size); } static inline void btAlignedFreeDefault(void* ptr) { free(ptr); } #else static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) { void* ret; char* real; unsigned long offset; real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1)); if (real) { offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); ret = (void*)((real + sizeof(void*)) + offset); *((void**)(ret)-1) = (void*)(real); } else { ret = (void*)(real); } return (ret); } static inline void btAlignedFreeDefault(void* ptr) { void* real; if (ptr) { real = *((void**)(ptr)-1); sFreeFunc(real); } } #endif static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault; static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault; void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc) { sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault; sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault; } void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc) { sAllocFunc = allocFunc ? allocFunc : btAllocDefault; sFreeFunc = freeFunc ? freeFunc : btFreeDefault; } #ifdef BT_DEBUG_MEMORY_ALLOCATIONS //this generic allocator provides the total allocated number of bytes #include <stdio.h> void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename) { void* ret; char* real; unsigned long offset; gTotalBytesAlignedAllocs += size; gNumAlignedAllocs++; real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1)); if (real) { offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1); ret = (void*)((real + 2 * sizeof(void*)) + offset); *((void**)(ret)-1) = (void*)(real); *((int32_t*)(ret)-2) = size; } else { ret = (void*)(real); //?? } printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size); int32_t* ptr = (int32_t*)ret; *ptr = 12; return (ret); } void btAlignedFreeInternal(void* ptr, int32_t line, char* filename) { void* real; gNumAlignedFree++; if (ptr) { real = *((void**)(ptr)-1); int32_t size = *((int32_t*)(ptr)-2); gTotalBytesAlignedAllocs -= size; printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size); sFreeFunc(real); } else { printf("NULL ptr\n"); } } #else //BT_DEBUG_MEMORY_ALLOCATIONS void* btAlignedAllocInternal(size_t size, int32_t alignment) { gNumAlignedAllocs++; void* ptr; ptr = sAlignedAllocFunc(size, alignment); // printf("btAlignedAllocInternal %d, %x\n",size,ptr); return ptr; } void btAlignedFreeInternal(void* ptr) { if (!ptr) { return; } gNumAlignedFree++; // printf("btAlignedFreeInternal %x\n",ptr); sAlignedFreeFunc(ptr); } #endif //BT_DEBUG_MEMORY_ALLOCATIONS
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD-ASYNC.cpp
#include "../public/VHACD.h" #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <thread> #include <atomic> #include <mutex> #include <string> #include <float.h> #define ENABLE_ASYNC 1 #define HACD_ALLOC(x) malloc(x) #define HACD_FREE(x) free(x) #define HACD_ASSERT(x) assert(x) namespace VHACD { class MyHACD_API : public VHACD::IVHACD, public VHACD::IVHACD::IUserCallback, VHACD::IVHACD::IUserLogger { public: MyHACD_API(void) { mVHACD = VHACD::CreateVHACD(); } virtual ~MyHACD_API(void) { releaseHACD(); Cancel(); mVHACD->Release(); } virtual bool Compute(const double* const _points, const uint32_t countPoints, const uint32_t* const _triangles, const uint32_t countTriangles, const Parameters& _desc) final { #if ENABLE_ASYNC Cancel(); // if we previously had a solution running; cancel it. releaseHACD(); // We need to copy the input vertices and triangles into our own buffers so we can operate // on them safely from the background thread. mVertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3); mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*countTriangles * 3); memcpy(mVertices, _points, sizeof(double)*countPoints * 3); memcpy(mIndices, _triangles, sizeof(uint32_t)*countTriangles * 3); mRunning = true; mThread = new std::thread([this, countPoints, countTriangles, _desc]() { ComputeNow(mVertices, countPoints, mIndices, countTriangles, _desc); mRunning = false; }); #else releaseHACD(); ComputeNow(_points, countPoints, _triangles, countTriangles, _desc); #endif return true; } bool ComputeNow(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& _desc) { uint32_t ret = 0; mHullCount = 0; mCallback = _desc.m_callback; mLogger = _desc.m_logger; IVHACD::Parameters desc = _desc; // Set our intercepting callback interfaces if non-null desc.m_callback = desc.m_callback ? this : nullptr; desc.m_logger = desc.m_logger ? this : nullptr; if ( countPoints ) { bool ok = mVHACD->Compute(points, countPoints, triangles, countTriangles, desc); if (ok) { ret = mVHACD->GetNConvexHulls(); mHulls = new IVHACD::ConvexHull[ret]; for (uint32_t i = 0; i < ret; i++) { VHACD::IVHACD::ConvexHull vhull; mVHACD->GetConvexHull(i, vhull); VHACD::IVHACD::ConvexHull h; h.m_nPoints = vhull.m_nPoints; h.m_points = (double *)HACD_ALLOC(sizeof(double) * 3 * h.m_nPoints); memcpy(h.m_points, vhull.m_points, sizeof(double) * 3 * h.m_nPoints); h.m_nTriangles = vhull.m_nTriangles; h.m_triangles = (uint32_t *)HACD_ALLOC(sizeof(uint32_t) * 3 * h.m_nTriangles); memcpy(h.m_triangles, vhull.m_triangles, sizeof(uint32_t) * 3 * h.m_nTriangles); h.m_volume = vhull.m_volume; h.m_center[0] = vhull.m_center[0]; h.m_center[1] = vhull.m_center[1]; h.m_center[2] = vhull.m_center[2]; mHulls[i] = h; if (mCancel) { ret = 0; break; } } } } mHullCount = ret; return ret ? true : false; } void releaseHull(VHACD::IVHACD::ConvexHull &h) { HACD_FREE((void *)h.m_triangles); HACD_FREE((void *)h.m_points); h.m_triangles = nullptr; h.m_points = nullptr; } virtual void GetConvexHull(const uint32_t index, VHACD::IVHACD::ConvexHull& ch) const final { if ( index < mHullCount ) { ch = mHulls[index]; } } void releaseHACD(void) // release memory associated with the last HACD request { for (uint32_t i=0; i<mHullCount; i++) { releaseHull(mHulls[i]); } delete[]mHulls; mHulls = nullptr; mHullCount = 0; HACD_FREE(mVertices); mVertices = nullptr; HACD_FREE(mIndices); mIndices = nullptr; } virtual void release(void) // release the HACD_API interface { delete this; } virtual uint32_t getHullCount(void) { return mHullCount; } virtual void Cancel() final { if (mRunning) { mVHACD->Cancel(); // Set the cancel signal to the base VHACD } if (mThread) { mThread->join(); // Wait for the thread to fully exit before we delete the instance delete mThread; mThread = nullptr; Log("Convex Decomposition thread canceled\n"); } mCancel = false; // clear the cancel semaphore } virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) final { double *vertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3); const float *source = points; double *dest = vertices; for (uint32_t i = 0; i < countPoints; i++) { dest[0] = source[0]; dest[1] = source[1]; dest[2] = source[2]; dest += 3; source += 3; } bool ret = Compute(vertices, countPoints, triangles, countTriangles, params); HACD_FREE(vertices); return ret; } virtual uint32_t GetNConvexHulls() const final { processPendingMessages(); return mHullCount; } virtual void Clean(void) final // release internally allocated memory { Cancel(); releaseHACD(); mVHACD->Clean(); } virtual void Release(void) final // release IVHACD { delete this; } virtual bool OCLInit(void* const oclDevice, IVHACD::IUserLogger* const logger = 0) final { return mVHACD->OCLInit(oclDevice, logger); } virtual bool OCLRelease(IVHACD::IUserLogger* const logger = 0) final { return mVHACD->OCLRelease(logger); } virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) final { mMessageMutex.lock(); mHaveUpdateMessage = true; mOverallProgress = overallProgress; mStageProgress = stageProgress; mOperationProgress = operationProgress; mStage = std::string(stage); mOperation = std::string(operation); mMessageMutex.unlock(); } virtual void Log(const char* const msg) final { mMessageMutex.lock(); mHaveLogMessage = true; mMessage = std::string(msg); mMessageMutex.unlock(); } virtual bool IsReady(void) const final { processPendingMessages(); return !mRunning; } // As a convenience for the calling application we only send it update and log messages from it's own main // thread. This reduces the complexity burden on the caller by making sure it only has to deal with log // messages in it's main application thread. void processPendingMessages(void) const { // If we have a new update message and the user has specified a callback we send the message and clear the semaphore if (mHaveUpdateMessage && mCallback) { mMessageMutex.lock(); mCallback->Update(mOverallProgress, mStageProgress, mOperationProgress, mStage.c_str(), mOperation.c_str()); mHaveUpdateMessage = false; mMessageMutex.unlock(); } // If we have a new log message and the user has specified a callback we send the message and clear the semaphore if (mHaveLogMessage && mLogger) { mMessageMutex.lock(); mLogger->Log(mMessage.c_str()); mHaveLogMessage = false; mMessageMutex.unlock(); } } // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const { bool ret = false; centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; if (mVHACD && IsReady() ) { ret = mVHACD->ComputeCenterOfMass(centerOfMass); } return ret; } // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) final { uint32_t ret = 0; if (mVHACD && IsReady()) { ret = mVHACD->ComputeConstraints(); } return ret; } virtual const Constraint *GetConstraint(uint32_t index) const final { const Constraint * ret = nullptr; if (mVHACD && IsReady()) { ret = mVHACD->GetConstraint(index); } return ret; } private: double *mVertices{ nullptr }; uint32_t *mIndices{ nullptr }; std::atomic< uint32_t> mHullCount{ 0 }; VHACD::IVHACD::ConvexHull *mHulls{ nullptr }; VHACD::IVHACD::IUserCallback *mCallback{ nullptr }; VHACD::IVHACD::IUserLogger *mLogger{ nullptr }; VHACD::IVHACD *mVHACD{ nullptr }; std::thread *mThread{ nullptr }; std::atomic< bool > mRunning{ false }; std::atomic<bool> mCancel{ false }; // Thread safe caching mechanism for messages and update status. // This is so that caller always gets messages in his own thread // Member variables are marked as 'mutable' since the message dispatch function // is called from const query methods. mutable std::mutex mMessageMutex; mutable std::atomic< bool > mHaveUpdateMessage{ false }; mutable std::atomic< bool > mHaveLogMessage{ false }; mutable double mOverallProgress{ 0 }; mutable double mStageProgress{ 0 }; mutable double mOperationProgress{ 0 }; mutable std::string mStage; mutable std::string mOperation; mutable std::string mMessage; }; IVHACD* CreateVHACD_ASYNC(void) { MyHACD_API *m = new MyHACD_API; return static_cast<IVHACD *>(m); } }; // end of VHACD namespace
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/FloatMath.cpp
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <float.h> #include "FloatMath.h" #include <vector> #include <malloc.h> #define REAL float #include "FloatMath.inl" #undef REAL #define REAL double #include "FloatMath.inl"
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btMinMax.h
/* Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_GEN_MINMAX_H #define BT_GEN_MINMAX_H #include "btScalar.h" template <class T> SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b) { return a < b ? a : b; } template <class T> SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b) { return a > b ? a : b; } template <class T> SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub) { return a < lb ? lb : (ub < a ? ub : a); } template <class T> SIMD_FORCE_INLINE void btSetMin(T& a, const T& b) { if (b < a) { a = b; } } template <class T> SIMD_FORCE_INLINE void btSetMax(T& a, const T& b) { if (a < b) { a = b; } } template <class T> SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub) { if (a < lb) { a = lb; } else if (ub < a) { a = ub; } } #endif //BT_GEN_MINMAX_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMutex.h
/*! ** ** Copyright (c) 2009 by John W. Ratcliff mailto:[email protected] ** ** Portions of this source has been released with the PhysXViewer application, as well as ** Rocket, CreateDynamics, ODF, and as a number of sample code snippets. ** ** If you find this code useful or you are feeling particularily generous I would ** ask that you please go to http://www.amillionpixels.us and make a donation ** to Troy DeMolay. ** ** DeMolay is a youth group for young men between the ages of 12 and 21. ** It teaches strong moral principles, as well as leadership skills and ** public speaking. The donations page uses the 'pay for pixels' paradigm ** where, in this case, a pixel is only a single penny. Donations can be ** made for as small as $4 or as high as a $100 block. Each person who donates ** will get a link to their own site as well as acknowledgement on the ** donations blog located here http://www.amillionpixels.blogspot.com/ ** ** If you wish to contact me you can use the following methods: ** ** Skype ID: jratcliff63367 ** Yahoo: jratcliff63367 ** AOL: jratcliff1961 ** email: [email protected] ** ** ** The MIT license: ** ** Permission is hereby granted, free of charge, to any person obtaining a copy ** of this software and associated documentation files (the "Software"), to deal ** in the Software without restriction, including without limitation the rights ** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ** copies of the Software, and to permit persons to whom the Software is furnished ** to do so, subject to the following conditions: ** ** The above copyright notice and this permission notice shall be included in all ** copies or substantial portions of the Software. ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #ifndef VHACD_MUTEX_H #define VHACD_MUTEX_H #if defined(WIN32) #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x400 #endif #include <windows.h> #pragma comment(lib, "winmm.lib") #endif #if defined(__linux__) //#include <sys/time.h> #include <errno.h> #include <time.h> #include <unistd.h> #define __stdcall #endif #if defined(__APPLE__) || defined(__linux__) #include <pthread.h> #endif #if defined(__APPLE__) #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE #endif #define VHACD_DEBUG //#define VHACD_NDEBUG #ifdef VHACD_NDEBUG #define VHACD_VERIFY(x) (x) #else #define VHACD_VERIFY(x) assert((x)) #endif namespace VHACD { class Mutex { public: Mutex(void) { #if defined(WIN32) || defined(_XBOX) InitializeCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutexattr_t mutexAttr; // Mutex Attribute VHACD_VERIFY(pthread_mutexattr_init(&mutexAttr) == 0); VHACD_VERIFY(pthread_mutexattr_settype(&mutexAttr, PTHREAD_MUTEX_RECURSIVE_NP) == 0); VHACD_VERIFY(pthread_mutex_init(&m_mutex, &mutexAttr) == 0); VHACD_VERIFY(pthread_mutexattr_destroy(&mutexAttr) == 0); #endif } ~Mutex(void) { #if defined(WIN32) || defined(_XBOX) DeleteCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_destroy(&m_mutex) == 0); #endif } void Lock(void) { #if defined(WIN32) || defined(_XBOX) EnterCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_lock(&m_mutex) == 0); #endif } bool TryLock(void) { #if defined(WIN32) || defined(_XBOX) bool bRet = false; //assert(("TryEnterCriticalSection seems to not work on XP???", 0)); bRet = TryEnterCriticalSection(&m_mutex) ? true : false; return bRet; #elif defined(__APPLE__) || defined(__linux__) int32_t result = pthread_mutex_trylock(&m_mutex); return (result == 0); #endif } void Unlock(void) { #if defined(WIN32) || defined(_XBOX) LeaveCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_unlock(&m_mutex) == 0); #endif } private: #if defined(WIN32) || defined(_XBOX) CRITICAL_SECTION m_mutex; #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_t m_mutex; #endif }; } #endif // VHACD_MUTEX_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btScalar.h
/* Copyright (c) 2003-2009 Erwin Coumans http://bullet.googlecode.com This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_SCALAR_H #define BT_SCALAR_H #ifdef BT_MANAGED_CODE //Aligned data types not supported in managed code #pragma unmanaged #endif #include <float.h> #include <math.h> #include <stdlib.h> //size_t for MSVC 6.0 #include <stdint.h> /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ #define BT_BULLET_VERSION 279 inline int32_t btGetVersion() { return BT_BULLET_VERSION; } #if defined(DEBUG) || defined(_DEBUG) #define BT_DEBUG #endif #ifdef _WIN32 #if defined(__MINGW32__) || defined(__CYGWIN__) || (defined(_MSC_VER) && _MSC_VER < 1300) #define SIMD_FORCE_INLINE inline #define ATTRIBUTE_ALIGNED16(a) a #define ATTRIBUTE_ALIGNED64(a) a #define ATTRIBUTE_ALIGNED128(a) a #else //#define BT_HAS_ALIGNED_ALLOCATOR #pragma warning(disable : 4324) // disable padding warning // #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning. // #pragma warning(disable:4996) //Turn off warnings about deprecated C routines // #pragma warning(disable:4786) // Disable the "debug name too long" warning #define SIMD_FORCE_INLINE __forceinline #define ATTRIBUTE_ALIGNED16(a) __declspec(align(16)) a #define ATTRIBUTE_ALIGNED64(a) __declspec(align(64)) a #define ATTRIBUTE_ALIGNED128(a) __declspec(align(128)) a #ifdef _XBOX #define BT_USE_VMX128 #include <ppcintrinsics.h> #define BT_HAVE_NATIVE_FSEL #define btFsel(a, b, c) __fsel((a), (b), (c)) #else #if (defined(_WIN32) && (_MSC_VER) && _MSC_VER >= 1400) && (!defined(BT_USE_DOUBLE_PRECISION)) #define BT_USE_SSE #include <emmintrin.h> #endif #endif //_XBOX #endif //__MINGW32__ #include <assert.h> #ifdef BT_DEBUG #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #if defined(__CELLOS_LV2__) #define SIMD_FORCE_INLINE inline __attribute__((always_inline)) #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #ifdef BT_DEBUG #ifdef __SPU__ #include <spu_printf.h> #define printf spu_printf #define btAssert(x) \ { \ if (!(x)) { \ printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \ spu_hcmpeq(0, 0); \ } \ } #else #define btAssert assert #endif #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #ifdef USE_LIBSPE2 #define SIMD_FORCE_INLINE __inline #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #ifdef BT_DEBUG #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) __builtin_expect((_c), 1) #define btUnlikely(_c) __builtin_expect((_c), 0) #else //non-windows systems #if (defined(__APPLE__) && defined(__i386__) && (!defined(BT_USE_DOUBLE_PRECISION))) #define BT_USE_SSE #include <emmintrin.h> #define SIMD_FORCE_INLINE inline ///@todo: check out alignment methods for other platforms/compilers #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #if defined(DEBUG) || defined(_DEBUG) #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #define SIMD_FORCE_INLINE inline ///@todo: check out alignment methods for other platforms/compilers ///#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) ///#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) ///#define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) #define ATTRIBUTE_ALIGNED16(a) a #define ATTRIBUTE_ALIGNED64(a) a #define ATTRIBUTE_ALIGNED128(a) a #ifndef assert #include <assert.h> #endif #if defined(DEBUG) || defined(_DEBUG) #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #endif //__APPLE__ #endif // LIBSPE2 #endif //__CELLOS_LV2__ #endif ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. #if defined(BT_USE_DOUBLE_PRECISION) typedef double btScalar; //this number could be bigger in double precision #define BT_LARGE_FLOAT 1e30 #else typedef float btScalar; //keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX #define BT_LARGE_FLOAT 1e18f #endif #define BT_DECLARE_ALIGNED_ALLOCATOR() \ SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void operator delete(void*, void*) {} \ SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void operator delete[](void*, void*) {} #if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS) SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) { return sqrt(x); } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); } SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return acos(x); } SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return asin(x); } SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); } SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); } SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); } SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); } SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); } SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); } #else SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) { #ifdef USE_APPROXIMATION double x, z, tempf; unsigned long* tfptr = ((unsigned long*)&tempf) + 1; tempf = y; *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */ x = tempf; z = y * btScalar(0.5); x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */ x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); return x * y; #else return sqrtf(y); #endif } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); } SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return acosf(x); } SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return asinf(x); } SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); } SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); } SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); } SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); } SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); } SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); } #endif #define SIMD_2_PI btScalar(6.283185307179586232) #define SIMD_PI (SIMD_2_PI * btScalar(0.5)) #define SIMD_HALF_PI (SIMD_2_PI * btScalar(0.25)) #define SIMD_RADS_PER_DEG (SIMD_2_PI / btScalar(360.0)) #define SIMD_DEGS_PER_RAD (btScalar(360.0) / SIMD_2_PI) #define SIMDSQRT12 btScalar(0.7071067811865475244008443621048490) #define btRecipSqrt(x) ((btScalar)(btScalar(1.0) / btSqrt(btScalar(x)))) /* reciprocal square root */ #ifdef BT_USE_DOUBLE_PRECISION #define SIMD_EPSILON DBL_EPSILON #define SIMD_INFINITY DBL_MAX #else #define SIMD_EPSILON FLT_EPSILON #define SIMD_INFINITY FLT_MAX #endif SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) { btScalar coeff_1 = SIMD_PI / 4.0f; btScalar coeff_2 = 3.0f * coeff_1; btScalar abs_y = btFabs(y); btScalar angle; if (x >= 0.0f) { btScalar r = (x - abs_y) / (x + abs_y); angle = coeff_1 - coeff_1 * r; } else { btScalar r = (x + abs_y) / (abs_y - x); angle = coeff_2 - coeff_1 * r; } return (y < 0.0f) ? -angle : angle; } SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; } SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) { return (((a) <= eps) && !((a) < -eps)); } SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) { return (!((a) <= eps)); } SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) { return x < btScalar(0.0) ? 1 : 0; } SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; } SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; } #define BT_DECLARE_HANDLE(name) \ typedef struct name##__ { \ int32_t unused; \ } * name #ifndef btFsel SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) { return a >= 0 ? b : c; } #endif #define btFsels(a, b, c) (btScalar) btFsel(a, b, c) SIMD_FORCE_INLINE bool btMachineIsLittleEndian() { long int i = 1; const char* p = (const char*)&i; if (p[0] == 1) // Lowest address contains the least significant byte return true; else return false; } ///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 ///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) { // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero // Rely on positive value or'ed with its negative having sign bit on // and zero value or'ed with its negative (which is still zero) having sign bit off // Use arithmetic shift right, shifting the sign bit through all 32 bits unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); unsigned testEqz = ~testNz; return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) { unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); unsigned testEqz = ~testNz; return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) { #ifdef BT_HAVE_NATIVE_FSEL return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); #else return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; #endif } template <typename T> SIMD_FORCE_INLINE void btSwap(T& a, T& b) { T tmp = a; a = b; b = tmp; } //PCK: endian swapping functions SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) { return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); } SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) { return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); } SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) { return btSwapEndian((unsigned)val); } SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) { return btSwapEndian((unsigned short)val); } ///btSwapFloat uses using char pointers to swap the endianness ////btSwapFloat/btSwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values ///Not all values of sign/exponent/mantissa are valid floating point numbers according to IEEE 754. ///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception. ///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you. ///so instead of returning a float/double, we return integer/long long integer SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) { uint32_t a = 0; unsigned char* dst = (unsigned char*)&a; unsigned char* src = (unsigned char*)&d; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; return a; } // unswap using char pointers SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) { float d = 0.0f; unsigned char* src = (unsigned char*)&a; unsigned char* dst = (unsigned char*)&d; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; return d; } // swap using char pointers SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst) { unsigned char* src = (unsigned char*)&d; dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; } // unswap using char pointers SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src) { double d = 0.0; unsigned char* dst = (unsigned char*)&d; dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; return d; } // returns normalized value in range [-SIMD_PI, SIMD_PI] SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) { angleInRadians = btFmod(angleInRadians, SIMD_2_PI); if (angleInRadians < -SIMD_PI) { return angleInRadians + SIMD_2_PI; } else if (angleInRadians > SIMD_PI) { return angleInRadians - SIMD_2_PI; } else { return angleInRadians; } } ///rudimentary class to provide type info struct btTypedObject { btTypedObject(int32_t objectType) : m_objectType(objectType) { } int32_t m_objectType; inline int32_t getObjectType() const { return m_objectType; } }; #endif //BT_SCALAR_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdRaycastMesh.h
#ifndef RAYCAST_MESH_H #define RAYCAST_MESH_H #include <stdint.h> namespace VHACD { // Very simple brute force raycast against a triangle mesh. Tests every triangle; no hierachy. // Does a deep copy, always does calculations with full double float precision class RaycastMesh { public: static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... virtual bool raycast(const double *from, // The starting point of the raycast const double *to, // The ending point of the raycast const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point) double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location double *hitDistance) = 0; // The distance the ray traveled to the hit location virtual void release(void) = 0; protected: virtual ~RaycastMesh(void) { }; }; } // end of VHACD namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MESH_H #define VHACD_MESH_H #include "vhacdSArray.h" #include "vhacdVector.h" #define VHACD_DEBUG_MESH namespace VHACD { enum AXIS { AXIS_X = 0, AXIS_Y = 1, AXIS_Z = 2 }; struct Plane { double m_a; double m_b; double m_c; double m_d; AXIS m_axis; short m_index; }; #ifdef VHACD_DEBUG_MESH struct Material { Vec3<double> m_diffuseColor; double m_ambientIntensity; Vec3<double> m_specularColor; Vec3<double> m_emissiveColor; double m_shininess; double m_transparency; Material(void) { m_diffuseColor.X() = 0.5; m_diffuseColor.Y() = 0.5; m_diffuseColor.Z() = 0.5; m_specularColor.X() = 0.5; m_specularColor.Y() = 0.5; m_specularColor.Z() = 0.5; m_ambientIntensity = 0.4; m_emissiveColor.X() = 0.0; m_emissiveColor.Y() = 0.0; m_emissiveColor.Z() = 0.0; m_shininess = 0.4; m_transparency = 0.0; }; }; #endif // VHACD_DEBUG_MESH //! Triangular mesh data structure class Mesh { public: void AddPoint(const Vec3<double>& pt) { m_points.PushBack(pt); }; void SetPoint(size_t index, const Vec3<double>& pt) { m_points[index] = pt; }; const Vec3<double>& GetPoint(size_t index) const { return m_points[index]; }; Vec3<double>& GetPoint(size_t index) { return m_points[index]; }; size_t GetNPoints() const { return m_points.Size(); }; double* GetPoints() { return (double*)m_points.Data(); } // ugly const double* const GetPoints() const { return (double*)m_points.Data(); } // ugly const Vec3<double>* const GetPointsBuffer() const { return m_points.Data(); } // Vec3<double>* const GetPointsBuffer() { return m_points.Data(); } // void AddTriangle(const Vec3<int32_t>& tri) { m_triangles.PushBack(tri); }; void SetTriangle(size_t index, const Vec3<int32_t>& tri) { m_triangles[index] = tri; }; const Vec3<int32_t>& GetTriangle(size_t index) const { return m_triangles[index]; }; Vec3<int32_t>& GetTriangle(size_t index) { return m_triangles[index]; }; size_t GetNTriangles() const { return m_triangles.Size(); }; int32_t* GetTriangles() { return (int32_t*)m_triangles.Data(); } // ugly const int32_t* const GetTriangles() const { return (int32_t*)m_triangles.Data(); } // ugly const Vec3<int32_t>* const GetTrianglesBuffer() const { return m_triangles.Data(); } Vec3<int32_t>* const GetTrianglesBuffer() { return m_triangles.Data(); } const Vec3<double>& GetCenter() const { return m_center; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } void ClearPoints() { m_points.Clear(); } void ClearTriangles() { m_triangles.Clear(); } void Clear() { ClearPoints(); ClearTriangles(); } void ResizePoints(size_t nPts) { m_points.Resize(nPts); } void ResizeTriangles(size_t nTri) { m_triangles.Resize(nTri); } void CopyPoints(SArray<Vec3<double> >& points) const { points = m_points; } double GetDiagBB() const { return m_diag; } double ComputeVolume() const; void ComputeConvexHull(const double* const pts, const size_t nPts); void Clip(const Plane& plane, SArray<Vec3<double> >& positivePart, SArray<Vec3<double> >& negativePart) const; bool IsInside(const Vec3<double>& pt) const; double ComputeDiagBB(); Vec3<double> &ComputeCenter(void); #ifdef VHACD_DEBUG_MESH bool LoadOFF(const std::string& fileName, bool invert); bool SaveVRML2(const std::string& fileName) const; bool SaveVRML2(std::ofstream& fout, const Material& material) const; bool SaveOFF(const std::string& fileName) const; #endif // VHACD_DEBUG_MESH //! Constructor. Mesh(); //! Destructor. ~Mesh(void); private: SArray<Vec3<double> > m_points; SArray<Vec3<int32_t> > m_triangles; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_center; double m_diag; }; } #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btVector3.h
/* Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_VECTOR3_H #define BT_VECTOR3_H #include "btMinMax.h" #include "btScalar.h" #ifdef BT_USE_DOUBLE_PRECISION #define btVector3Data btVector3DoubleData #define btVector3DataName "btVector3DoubleData" #else #define btVector3Data btVector3FloatData #define btVector3DataName "btVector3FloatData" #endif //BT_USE_DOUBLE_PRECISION /**@brief btVector3 can be used to represent 3D points and vectors. * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers */ ATTRIBUTE_ALIGNED16(class) btVector3 { public: #if defined(__SPU__) && defined(__CELLOS_LV2__) btScalar m_floats[4]; public: SIMD_FORCE_INLINE const vec_float4& get128() const { return *((const vec_float4*)&m_floats[0]); } public: #else //__CELLOS_LV2__ __SPU__ #ifdef BT_USE_SSE // _WIN32 union { __m128 mVec128; btScalar m_floats[4]; }; SIMD_FORCE_INLINE __m128 get128() const { return mVec128; } SIMD_FORCE_INLINE void set128(__m128 v128) { mVec128 = v128; } #else btScalar m_floats[4]; #endif #endif //__CELLOS_LV2__ __SPU__ public: /**@brief No initialization constructor */ SIMD_FORCE_INLINE btVector3() {} /**@brief Constructor from scalars * @param x X value * @param y Y value * @param z Z value */ SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } /**@brief Add a vector to this one * @param The vector to add to this one */ SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v) { m_floats[0] += v.m_floats[0]; m_floats[1] += v.m_floats[1]; m_floats[2] += v.m_floats[2]; return *this; } /**@brief Subtract a vector from this one * @param The vector to subtract */ SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v) { m_floats[0] -= v.m_floats[0]; m_floats[1] -= v.m_floats[1]; m_floats[2] -= v.m_floats[2]; return *this; } /**@brief Scale the vector * @param s Scale factor */ SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s) { m_floats[0] *= s; m_floats[1] *= s; m_floats[2] *= s; return *this; } /**@brief Inversely scale the vector * @param s Scale factor to divide by */ SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s) { btFullAssert(s != btScalar(0.0)); return * this *= btScalar(1.0) / s; } /**@brief Return the dot product * @param v The other vector in the dot product */ SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const { return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2]; } /**@brief Return the length of the vector squared */ SIMD_FORCE_INLINE btScalar length2() const { return dot(*this); } /**@brief Return the length of the vector */ SIMD_FORCE_INLINE btScalar length() const { return btSqrt(length2()); } /**@brief Return the distance squared between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const; /**@brief Return the distance between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const; SIMD_FORCE_INLINE btVector3& safeNormalize() { btVector3 absVec = this->absolute(); int32_t maxIndex = absVec.maxAxis(); if (absVec[maxIndex] > 0) { *this /= absVec[maxIndex]; return * this /= length(); } setValue(1, 0, 0); return *this; } /**@brief Normalize this vector * x^2 + y^2 + z^2 = 1 */ SIMD_FORCE_INLINE btVector3& normalize() { return * this /= length(); } /**@brief Return a normalized version of this vector */ SIMD_FORCE_INLINE btVector3 normalized() const; /**@brief Return a rotated version of this vector * @param wAxis The axis to rotate about * @param angle The angle to rotate by */ SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const; /**@brief Return the angle between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const { btScalar s = btSqrt(length2() * v.length2()); btFullAssert(s != btScalar(0.0)); return btAcos(dot(v) / s); } /**@brief Return a vector will the absolute values of each element */ SIMD_FORCE_INLINE btVector3 absolute() const { return btVector3( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2])); } /**@brief Return the cross product between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const { return btVector3( m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1], m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); } SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const { return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); } /**@brief Return the axis with the smallest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t minAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2); } /**@brief Return the axis with the largest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t maxAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0); } SIMD_FORCE_INLINE int32_t furthestAxis() const { return absolute().minAxis(); } SIMD_FORCE_INLINE int32_t closestAxis() const { return absolute().maxAxis(); } SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt) { btScalar s = btScalar(1.0) - rt; m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0]; m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1]; m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2]; //don't do the unused w component // m_co[3] = s * v0[3] + rt * v1[3]; } /**@brief Return the linear interpolation between this and another vector * @param v The other vector * @param t The ration of this to v (t = 0 => return this, t=1 => return other) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const { return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, m_floats[2] + (v.m_floats[2] - m_floats[2]) * t); } /**@brief Elementwise multiply this vector by the other * @param v The other vector */ SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v) { m_floats[0] *= v.m_floats[0]; m_floats[1] *= v.m_floats[1]; m_floats[2] *= v.m_floats[2]; return *this; } /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; } /**@brief Set the x value */ SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; }; /**@brief Set the y value */ SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; }; /**@brief Set the z value */ SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; }; /**@brief Set the w value */ SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; }; /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; } /**@brief Return the w value */ SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; } //SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; } //SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; } ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; } SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; } SIMD_FORCE_INLINE bool operator==(const btVector3& other) const { return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0])); } SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const { return !(*this == other); } /**@brief Set each element to the max of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMax(const btVector3& other) { btSetMax(m_floats[0], other.m_floats[0]); btSetMax(m_floats[1], other.m_floats[1]); btSetMax(m_floats[2], other.m_floats[2]); btSetMax(m_floats[3], other.w()); } /**@brief Set each element to the min of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMin(const btVector3& other) { btSetMin(m_floats[0], other.m_floats[0]); btSetMin(m_floats[1], other.m_floats[1]); btSetMin(m_floats[2], other.m_floats[2]); btSetMin(m_floats[3], other.w()); } SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const { v0->setValue(0., -z(), y()); v1->setValue(z(), 0., -x()); v2->setValue(-y(), x(), 0.); } void setZero() { setValue(btScalar(0.), btScalar(0.), btScalar(0.)); } SIMD_FORCE_INLINE bool isZero() const { return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0); } SIMD_FORCE_INLINE bool fuzzyZero() const { return length2() < SIMD_EPSILON; } SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const; SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn); SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const; SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn); SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const; SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn); }; /**@brief Return the sum of two vectors (Point symantics)*/ SIMD_FORCE_INLINE btVector3 operator+(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); } /**@brief Return the elementwise product of two vectors */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); } /**@brief Return the difference between two vectors */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); } /**@brief Return the negative of the vector */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v) { return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v, const btScalar& s) { return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btScalar& s, const btVector3& v) { return v * s; } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v, const btScalar& s) { btFullAssert(s != btScalar(0.0)); return v * (btScalar(1.0) / s); } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]); } /**@brief Return the dot product between two vectors */ SIMD_FORCE_INLINE btScalar btDot(const btVector3& v1, const btVector3& v2) { return v1.dot(v2); } /**@brief Return the distance squared between two vectors */ SIMD_FORCE_INLINE btScalar btDistance2(const btVector3& v1, const btVector3& v2) { return v1.distance2(v2); } /**@brief Return the distance between two vectors */ SIMD_FORCE_INLINE btScalar btDistance(const btVector3& v1, const btVector3& v2) { return v1.distance(v2); } /**@brief Return the angle between two vectors */ SIMD_FORCE_INLINE btScalar btAngle(const btVector3& v1, const btVector3& v2) { return v1.angle(v2); } /**@brief Return the cross product of two vectors */ SIMD_FORCE_INLINE btVector3 btCross(const btVector3& v1, const btVector3& v2) { return v1.cross(v2); } SIMD_FORCE_INLINE btScalar btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3) { return v1.triple(v2, v3); } /**@brief Return the linear interpolation between two vectors * @param v1 One vector * @param v2 The other vector * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v1, const btVector3& v2, const btScalar& t) { return v1.lerp(v2, t); } SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const { return (v - *this).length2(); } SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const { return (v - *this).length(); } SIMD_FORCE_INLINE btVector3 btVector3::normalized() const { return *this / length(); } SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const { // wAxis must be a unit lenght vector btVector3 o = wAxis * wAxis.dot(*this); btVector3 x = *this - o; btVector3 y; y = wAxis.cross(*this); return (o + x * btCos(angle) + y * btSin(angle)); } class btVector4 : public btVector3 { public: SIMD_FORCE_INLINE btVector4() {} SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) : btVector3(x, y, z) { m_floats[3] = w; } SIMD_FORCE_INLINE btVector4 absolute4() const { return btVector4( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2]), btFabs(m_floats[3])); } btScalar getW() const { return m_floats[3]; } SIMD_FORCE_INLINE int32_t maxAxis4() const { int32_t maxIndex = -1; btScalar maxVal = btScalar(-BT_LARGE_FLOAT); if (m_floats[0] > maxVal) { maxIndex = 0; maxVal = m_floats[0]; } if (m_floats[1] > maxVal) { maxIndex = 1; maxVal = m_floats[1]; } if (m_floats[2] > maxVal) { maxIndex = 2; maxVal = m_floats[2]; } if (m_floats[3] > maxVal) { maxIndex = 3; } return maxIndex; } SIMD_FORCE_INLINE int32_t minAxis4() const { int32_t minIndex = -1; btScalar minVal = btScalar(BT_LARGE_FLOAT); if (m_floats[0] < minVal) { minIndex = 0; minVal = m_floats[0]; } if (m_floats[1] < minVal) { minIndex = 1; minVal = m_floats[1]; } if (m_floats[2] < minVal) { minIndex = 2; minVal = m_floats[2]; } if (m_floats[3] < minVal) { minIndex = 3; } return minIndex; } SIMD_FORCE_INLINE int32_t closestAxis4() const { return absolute4().maxAxis4(); } /**@brief Set x,y,z and zero w * @param x Value of x * @param y Value of y * @param z Value of z */ /* void getValue(btScalar *m) const { m[0] = m_floats[0]; m[1] = m_floats[1]; m[2] =m_floats[2]; } */ /**@brief Set the values * @param x Value of x * @param y Value of y * @param z Value of z * @param w Value of w */ SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = w; } }; ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal) { #ifdef BT_USE_DOUBLE_PRECISION unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[7]; dest[1] = src[6]; dest[2] = src[5]; dest[3] = src[4]; dest[4] = src[3]; dest[5] = src[2]; dest[6] = src[1]; dest[7] = src[0]; #else unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[3]; dest[1] = src[2]; dest[2] = src[1]; dest[3] = src[0]; #endif //BT_USE_DOUBLE_PRECISION } ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec) { for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(sourceVec[i], destVec[i]); } } ///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector) { btVector3 swappedVec; for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(vector[i], swappedVec[i]); } vector = swappedVec; } template <class T> SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q) { if (btFabs(n[2]) > SIMDSQRT12) { // choose p in y-z plane btScalar a = n[1] * n[1] + n[2] * n[2]; btScalar k = btRecipSqrt(a); p[0] = 0; p[1] = -n[2] * k; p[2] = n[1] * k; // set q = n x p q[0] = a * k; q[1] = -n[0] * p[2]; q[2] = n[0] * p[1]; } else { // choose p in x-y plane btScalar a = n[0] * n[0] + n[1] * n[1]; btScalar k = btRecipSqrt(a); p[0] = -n[1] * k; p[1] = n[0] * k; p[2] = 0; // set q = n x p q[0] = -n[2] * p[1]; q[1] = n[2] * p[0]; q[2] = a * k; } } struct btVector3FloatData { float m_floats[4]; }; struct btVector3DoubleData { double m_floats[4]; }; SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = float(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = double(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = m_floats[i]; } SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = dataIn.m_floats[i]; } #endif //BT_VECTOR3_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VHACD_H #define VHACD_VHACD_H #ifdef OPENCL_FOUND #ifdef __MACH__ #include <OpenCL/cl.h> #else #include <CL/cl.h> #endif #endif //OPENCL_FOUND #include "vhacdMutex.h" #include "vhacdVolume.h" #include "vhacdRaycastMesh.h" #include <vector> typedef std::vector< VHACD::IVHACD::Constraint > ConstraintVector; #define USE_THREAD 1 #define OCL_MIN_NUM_PRIMITIVES 4096 #define CH_APP_MIN_NUM_PRIMITIVES 64000 namespace VHACD { class VHACD : public IVHACD { public: //! Constructor. VHACD() { #if USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 2 * omp_get_num_procs(); omp_set_num_threads(m_ompNumProcessors); #else //USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 1; #endif //USE_THREAD == 1 && _OPENMP #ifdef CL_VERSION_1_1 m_oclWorkGroupSize = 0; m_oclDevice = 0; m_oclQueue = 0; m_oclKernelComputePartialVolumes = 0; m_oclKernelComputeSum = 0; #endif //CL_VERSION_1_1 Init(); } //! Destructor. ~VHACD(void) { } uint32_t GetNConvexHulls() const { return (uint32_t)m_convexHulls.Size(); } void Cancel() { SetCancel(true); } void GetConvexHull(const uint32_t index, ConvexHull& ch) const { Mesh* mesh = m_convexHulls[index]; ch.m_nPoints = (uint32_t)mesh->GetNPoints(); ch.m_nTriangles = (uint32_t)mesh->GetNTriangles(); ch.m_points = mesh->GetPoints(); ch.m_triangles = (uint32_t *)mesh->GetTriangles(); ch.m_volume = mesh->ComputeVolume(); Vec3<double> &center = mesh->ComputeCenter(); ch.m_center[0] = center.X(); ch.m_center[1] = center.Y(); ch.m_center[2] = center.Z(); } void Clean(void) { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } delete m_volume; delete m_pset; size_t nCH = m_convexHulls.Size(); for (size_t p = 0; p < nCH; ++p) { delete m_convexHulls[p]; } m_convexHulls.Clear(); Init(); } void Release(void) { delete this; } bool Compute(const float* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool Compute(const double* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0); bool OCLRelease(IUserLogger* const logger = 0); virtual bool ComputeCenterOfMass(double centerOfMass[3]) const; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void); // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const; private: void SetCancel(bool cancel) { m_cancelMutex.Lock(); m_cancel = cancel; m_cancelMutex.Unlock(); } bool GetCancel() { m_cancelMutex.Lock(); bool cancel = m_cancel; m_cancelMutex.Unlock(); return cancel; } void Update(const double stageProgress, const double operationProgress, const Parameters& params) { m_stageProgress = stageProgress; m_operationProgress = operationProgress; if (params.m_callback) { params.m_callback->Update(m_overallProgress, m_stageProgress, m_operationProgress, m_stage.c_str(), m_operation.c_str()); } } void Init() { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } memset(m_rot, 0, sizeof(double) * 9); m_dim = 64; m_volume = 0; m_volumeCH0 = 0.0; m_pset = 0; m_overallProgress = 0.0; m_stageProgress = 0.0; m_operationProgress = 0.0; m_stage = ""; m_operation = ""; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0; m_rot[0][0] = m_rot[1][1] = m_rot[2][2] = 1.0; SetCancel(false); } void ComputePrimitiveSet(const Parameters& params); void ComputeACD(const Parameters& params); void MergeConvexHulls(const Parameters& params); void SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume); void SimplifyConvexHulls(const Parameters& params); void ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes, const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta, const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane, double& minConcavity, const Parameters& params); template <class T> void AlignMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel() || !params.m_pca) { return; } m_timer.Tic(); m_stage = "Align mesh"; m_operation = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); if (GetCancel()) { return; } m_dim = (size_t)(pow((double)params.m_resolution, 1.0 / 3.0) + 0.5); Volume volume; volume.Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); size_t n = volume.GetNPrimitivesOnSurf() + volume.GetNPrimitivesInsideSurf(); Update(50.0, 100.0, params); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } if (GetCancel()) { return; } m_operation = "PCA"; Update(50.0, 0.0, params); volume.AlignToPrincipalAxes(m_rot); m_overallProgress = 1.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> void VoxelizeMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } delete m_volume; m_volume = 0; int32_t iteration = 0; const int32_t maxIteration = 5; double progress = 0.0; while (iteration++ < maxIteration && !m_cancel) { msg.str(""); msg << "Iteration " << iteration; m_operation = msg.str(); progress = iteration * 100.0 / maxIteration; Update(progress, 0.0, params); m_volume = new Volume; m_volume->Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); Update(progress, 100.0, params); size_t n = m_volume->GetNPrimitivesOnSurf() + m_volume->GetNPrimitivesInsideSurf(); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } double a = pow((double)(params.m_resolution) / n, 0.33); size_t dim_next = (size_t)(m_dim * a + 0.5); if (n < params.m_resolution && iteration < maxIteration && m_volume->GetNPrimitivesOnSurf() < params.m_resolution / 8 && m_dim != dim_next) { delete m_volume; m_volume = 0; m_dim = dim_next; } else { break; } } m_overallProgress = 10.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> bool ComputeACD(const T* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params) { Init(); if (params.m_projectHullVertices) { mRaycastMesh = RaycastMesh::createRaycastMesh(nPoints, points, nTriangles, (const uint32_t *)triangles); } if (params.m_oclAcceleration) { // build kernels } AlignMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); VoxelizeMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); ComputePrimitiveSet(params); ComputeACD(params); MergeConvexHulls(params); SimplifyConvexHulls(params); if (params.m_oclAcceleration) { // Release kernels } if (GetCancel()) { Clean(); return false; } return true; } private: RaycastMesh *mRaycastMesh{ nullptr }; SArray<Mesh*> m_convexHulls; std::string m_stage; std::string m_operation; double m_overallProgress; double m_stageProgress; double m_operationProgress; double m_rot[3][3]; double m_volumeCH0; Vec3<double> m_barycenter; Timer m_timer; size_t m_dim; Volume* m_volume; PrimitiveSet* m_pset; Mutex m_cancelMutex; bool m_cancel; int32_t m_ompNumProcessors; #ifdef CL_VERSION_1_1 cl_device_id* m_oclDevice; cl_context m_oclContext; cl_program m_oclProgram; cl_command_queue* m_oclQueue; cl_kernel* m_oclKernelComputePartialVolumes; cl_kernel* m_oclKernelComputeSum; size_t m_oclWorkGroupSize; #endif //CL_VERSION_1_1 ConstraintVector mConstraints; }; } #endif // VHACD_VHACD_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVolume.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VOLUME_H #define VHACD_VOLUME_H #include "vhacdMesh.h" #include "vhacdVector.h" #include <assert.h> #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4456 4701) #endif namespace VHACD { enum VOXEL_VALUE { PRIMITIVE_UNDEFINED = 0, PRIMITIVE_OUTSIDE_SURFACE = 1, PRIMITIVE_INSIDE_SURFACE = 2, PRIMITIVE_ON_SURFACE = 3 }; struct Voxel { public: short m_coord[3]; short m_data; }; class PrimitiveSet { public: virtual ~PrimitiveSet(){}; virtual PrimitiveSet* Create() const = 0; virtual const size_t GetNPrimitives() const = 0; virtual const size_t GetNPrimitivesOnSurf() const = 0; virtual const size_t GetNPrimitivesInsideSurf() const = 0; virtual const double GetEigenValue(AXIS axis) const = 0; virtual const double ComputeMaxVolumeError() const = 0; virtual const double ComputeVolume() const = 0; virtual void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const = 0; virtual void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const = 0; virtual void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const = 0; virtual void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const = 0; virtual void SelectOnSurface(PrimitiveSet* const onSurfP) const = 0; virtual void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const = 0; virtual void ComputeBB() = 0; virtual void ComputePrincipalAxes() = 0; virtual void AlignToPrincipalAxes() = 0; virtual void RevertAlignToPrincipalAxes() = 0; virtual void Convert(Mesh& mesh, const VOXEL_VALUE value) const = 0; const Mesh& GetConvexHull() const { return m_convexHull; }; Mesh& GetConvexHull() { return m_convexHull; }; private: Mesh m_convexHull; }; //! class VoxelSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~VoxelSet(void); //! Constructor. VoxelSet(); const size_t GetNPrimitives() const { return m_voxels.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double ComputeVolume() const { return m_unitVolume * m_voxels.Size(); } const double ComputeMaxVolumeError() const { return m_unitVolume * m_numVoxelsOnSurface; } const Vec3<short>& GetMinBBVoxels() const { return m_minBBVoxels; } const Vec3<short>& GetMaxBBVoxels() const { return m_maxBBVoxels; } const Vec3<double>& GetMinBB() const { return m_minBB; } const double& GetScale() const { return m_scale; } const double& GetUnitVolume() const { return m_unitVolume; } Vec3<double> GetPoint(Vec3<short> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(const Voxel& voxel) const { return Vec3<double>(voxel.m_coord[0] * m_scale + m_minBB[0], voxel.m_coord[1] * m_scale + m_minBB[1], voxel.m_coord[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(Vec3<double> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } void GetPoints(const Voxel& voxel, Vec3<double>* const pts) const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void ComputePrincipalAxes(); PrimitiveSet* Create() const { return new VoxelSet(); } void AlignToPrincipalAxes(){}; void RevertAlignToPrincipalAxes(){}; Voxel* const GetVoxels() { return m_voxels.Data(); } const Voxel* const GetVoxels() const { return m_voxels.Data(); } private: size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; Vec3<double> m_minBB; double m_scale; SArray<Voxel, 8> m_voxels; double m_unitVolume; Vec3<double> m_minBBPts; Vec3<double> m_maxBBPts; Vec3<short> m_minBBVoxels; Vec3<short> m_maxBBVoxels; Vec3<short> m_barycenter; double m_Q[3][3]; double m_D[3][3]; Vec3<double> m_barycenterPCA; }; struct Tetrahedron { public: Vec3<double> m_pts[4]; unsigned char m_data; }; //! class TetrahedronSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~TetrahedronSet(void); //! Constructor. TetrahedronSet(); const size_t GetNPrimitives() const { return m_tetrahedra.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numTetrahedraOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numTetrahedraInsideSurface; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } const Vec3<double>& GetBarycenter() const { return m_barycenter; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double GetSacle() const { return m_scale; } const double ComputeVolume() const; const double ComputeMaxVolumeError() const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void ComputePrincipalAxes(); void AlignToPrincipalAxes(); void RevertAlignToPrincipalAxes(); void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; inline bool Add(Tetrahedron& tetrahedron); PrimitiveSet* Create() const { return new TetrahedronSet(); } static const double EPS; private: void AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts); size_t m_numTetrahedraOnSurface; size_t m_numTetrahedraInsideSurface; double m_scale; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_barycenter; SArray<Tetrahedron, 8> m_tetrahedra; double m_Q[3][3]; double m_D[3][3]; }; //! class Volume { public: //! Destructor. ~Volume(void); //! Constructor. Volume(); //! Voxelize template <class T> void Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]); unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) const { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void Convert(VoxelSet& vset) const; void Convert(TetrahedronSet& tset) const; void AlignToPrincipalAxes(double (&rot)[3][3]) const; private: void FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1, const size_t j1, const size_t k1); void FillInsideSurface(); template <class T> void ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]); void Allocate(); void Free(); Vec3<double> m_minBB; Vec3<double> m_maxBB; double m_scale; size_t m_dim[3]; //>! dim size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; size_t m_numVoxelsOutsideSurface; unsigned char* m_data; }; int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0, const Vec3<double>& triver1, const Vec3<double>& triver2); template <class T> inline void ComputeAlignedPoint(const T* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt){}; template <> inline void ComputeAlignedPoint<float>(const float* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <> inline void ComputeAlignedPoint<double>(const double* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <class T> void Volume::ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]) { Vec3<double> pt; ComputeAlignedPoint(points, 0, barycenter, rot, pt); m_maxBB = pt; m_minBB = pt; for (uint32_t v = 1; v < nPoints; ++v) { ComputeAlignedPoint(points, v * stridePoints, barycenter, rot, pt); for (int32_t i = 0; i < 3; ++i) { if (pt[i] < m_minBB[i]) m_minBB[i] = pt[i]; else if (pt[i] > m_maxBB[i]) m_maxBB[i] = pt[i]; } } } template <class T> void Volume::Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]) { if (nPoints == 0) { return; } ComputeBB(points, stridePoints, nPoints, barycenter, rot); double d[3] = { m_maxBB[0] - m_minBB[0], m_maxBB[1] - m_minBB[1], m_maxBB[2] - m_minBB[2] }; double r; if (d[0] > d[1] && d[0] > d[2]) { r = d[0]; m_dim[0] = dim; m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[0]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[0]); } else if (d[1] > d[0] && d[1] > d[2]) { r = d[1]; m_dim[1] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[1]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[1]); } else { r = d[2]; m_dim[2] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[2]); m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[2]); } m_scale = r / (dim - 1); double invScale = (dim - 1) / r; Allocate(); m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; m_numVoxelsOutsideSurface = 0; Vec3<double> p[3]; size_t i, j, k; size_t i0, j0, k0; size_t i1, j1, k1; Vec3<double> boxcenter; Vec3<double> pt; const Vec3<double> boxhalfsize(0.5, 0.5, 0.5); for (size_t t = 0, ti = 0; t < nTriangles; ++t, ti += strideTriangles) { Vec3<int32_t> tri(triangles[ti + 0], triangles[ti + 1], triangles[ti + 2]); for (int32_t c = 0; c < 3; ++c) { ComputeAlignedPoint(points, tri[c] * stridePoints, barycenter, rot, pt); p[c][0] = (pt[0] - m_minBB[0]) * invScale; p[c][1] = (pt[1] - m_minBB[1]) * invScale; p[c][2] = (pt[2] - m_minBB[2]) * invScale; i = static_cast<size_t>(p[c][0] + 0.5); j = static_cast<size_t>(p[c][1] + 0.5); k = static_cast<size_t>(p[c][2] + 0.5); assert(i < m_dim[0] && i >= 0 && j < m_dim[1] && j >= 0 && k < m_dim[2] && k >= 0); if (c == 0) { i0 = i1 = i; j0 = j1 = j; k0 = k1 = k; } else { if (i < i0) i0 = i; if (j < j0) j0 = j; if (k < k0) k0 = k; if (i > i1) i1 = i; if (j > j1) j1 = j; if (k > k1) k1 = k; } } if (i0 > 0) --i0; if (j0 > 0) --j0; if (k0 > 0) --k0; if (i1 < m_dim[0]) ++i1; if (j1 < m_dim[1]) ++j1; if (k1 < m_dim[2]) ++k1; for (size_t i = i0; i < i1; ++i) { boxcenter[0] = (double)i; for (size_t j = j0; j < j1; ++j) { boxcenter[1] = (double)j; for (size_t k = k0; k < k1; ++k) { boxcenter[2] = (double)k; int32_t res = TriBoxOverlap(boxcenter, boxhalfsize, p[0], p[1], p[2]); unsigned char& value = GetVoxel(i, j, k); if (res == 1 && value == PRIMITIVE_UNDEFINED) { value = PRIMITIVE_ON_SURFACE; ++m_numVoxelsOnSurface; } } } } } FillOutsideSurface(0, 0, 0, m_dim[0], m_dim[1], 1); FillOutsideSurface(0, 0, m_dim[2] - 1, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, m_dim[0], 1, m_dim[2]); FillOutsideSurface(0, m_dim[1] - 1, 0, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, 1, m_dim[1], m_dim[2]); FillOutsideSurface(m_dim[0] - 1, 0, 0, m_dim[0], m_dim[1], m_dim[2]); FillInsideSurface(); } } #ifdef _MSC_VER #pragma warning(pop) #endif #endif // VHACD_VOLUME_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdTimer.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_TIMER_H #define VHACD_TIMER_H #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers #endif #include <windows.h> #elif __MACH__ #include <mach/clock.h> #include <mach/mach.h> #else #include <sys/time.h> #include <time.h> #endif namespace VHACD { #ifdef _WIN32 class Timer { public: Timer(void) { m_start.QuadPart = 0; m_stop.QuadPart = 0; QueryPerformanceFrequency(&m_freq); }; ~Timer(void){}; void Tic() { QueryPerformanceCounter(&m_start); } void Toc() { QueryPerformanceCounter(&m_stop); } double GetElapsedTime() // in ms { LARGE_INTEGER delta; delta.QuadPart = m_stop.QuadPart - m_start.QuadPart; return (1000.0 * delta.QuadPart) / (double)m_freq.QuadPart; } private: LARGE_INTEGER m_start; LARGE_INTEGER m_stop; LARGE_INTEGER m_freq; }; #elif __MACH__ class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &m_cclock); }; ~Timer(void) { mach_port_deallocate(mach_task_self(), m_cclock); }; void Tic() { clock_get_time(m_cclock, &m_start); } void Toc() { clock_get_time(m_cclock, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: clock_serv_t m_cclock; mach_timespec_t m_start; mach_timespec_t m_stop; }; #else class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); }; ~Timer(void){}; void Tic() { clock_gettime(CLOCK_REALTIME, &m_start); } void Toc() { clock_gettime(CLOCK_REALTIME, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: struct timespec m_start; struct timespec m_stop; }; #endif } #endif // VHACD_TIMER_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdManifoldMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MANIFOLD_MESH_H #define VHACD_MANIFOLD_MESH_H #include "vhacdCircularList.h" #include "vhacdSArray.h" #include "vhacdVector.h" namespace VHACD { class TMMTriangle; class TMMEdge; class TMMesh; class ICHull; //! Vertex data structure used in a triangular manifold mesh (TMM). class TMMVertex { public: void Initialize(); TMMVertex(void); ~TMMVertex(void); private: Vec3<double> m_pos; int32_t m_name; size_t m_id; CircularListElement<TMMEdge>* m_duplicate; // pointer to incident cone edge (or NULL) bool m_onHull; bool m_tag; TMMVertex(const TMMVertex& rhs); friend class ICHull; friend class TMMesh; friend class TMMTriangle; friend class TMMEdge; }; //! Edge data structure used in a triangular manifold mesh (TMM). class TMMEdge { public: void Initialize(); TMMEdge(void); ~TMMEdge(void); private: size_t m_id; CircularListElement<TMMTriangle>* m_triangles[2]; CircularListElement<TMMVertex>* m_vertices[2]; CircularListElement<TMMTriangle>* m_newFace; TMMEdge(const TMMEdge& rhs); friend class ICHull; friend class TMMTriangle; friend class TMMVertex; friend class TMMesh; }; //! Triangle data structure used in a triangular manifold mesh (TMM). class TMMTriangle { public: void Initialize(); TMMTriangle(void); ~TMMTriangle(void); private: size_t m_id; CircularListElement<TMMEdge>* m_edges[3]; CircularListElement<TMMVertex>* m_vertices[3]; bool m_visible; TMMTriangle(const TMMTriangle& rhs); friend class ICHull; friend class TMMesh; friend class TMMVertex; friend class TMMEdge; }; //! triangular manifold mesh data structure. class TMMesh { public: //! Returns the number of vertices> inline size_t GetNVertices() const { return m_vertices.GetSize(); } //! Returns the number of edges inline size_t GetNEdges() const { return m_edges.GetSize(); } //! Returns the number of triangles inline size_t GetNTriangles() const { return m_triangles.GetSize(); } //! Returns the vertices circular list inline const CircularList<TMMVertex>& GetVertices() const { return m_vertices; } //! Returns the edges circular list inline const CircularList<TMMEdge>& GetEdges() const { return m_edges; } //! Returns the triangles circular list inline const CircularList<TMMTriangle>& GetTriangles() const { return m_triangles; } //! Returns the vertices circular list inline CircularList<TMMVertex>& GetVertices() { return m_vertices; } //! Returns the edges circular list inline CircularList<TMMEdge>& GetEdges() { return m_edges; } //! Returns the triangles circular list inline CircularList<TMMTriangle>& GetTriangles() { return m_triangles; } //! Add vertex to the mesh CircularListElement<TMMVertex>* AddVertex() { return m_vertices.Add(); } //! Add vertex to the mesh CircularListElement<TMMEdge>* AddEdge() { return m_edges.Add(); } //! Add vertex to the mesh CircularListElement<TMMTriangle>* AddTriangle() { return m_triangles.Add(); } //! Print mesh information void Print(); //! void GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles); //! void Clear(); //! void Copy(TMMesh& mesh); //! bool CheckConsistancy(); //! bool Normalize(); //! bool Denormalize(); //! Constructor TMMesh(); //! Destructor virtual ~TMMesh(void); private: CircularList<TMMVertex> m_vertices; CircularList<TMMEdge> m_edges; CircularList<TMMTriangle> m_triangles; // not defined TMMesh(const TMMesh& rhs); friend class ICHull; }; } #endif // VHACD_MANIFOLD_MESH_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedAllocator.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_ALIGNED_ALLOCATOR #define BT_ALIGNED_ALLOCATOR ///we probably replace this with our own aligned memory allocator ///so we replace _aligned_malloc and _aligned_free with our own ///that is better portable and more predictable #include "btScalar.h" //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 #ifdef BT_DEBUG_MEMORY_ALLOCATIONS #define btAlignedAlloc(a, b) \ btAlignedAllocInternal(a, b, __LINE__, __FILE__) #define btAlignedFree(ptr) \ btAlignedFreeInternal(ptr, __LINE__, __FILE__) void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename); void btAlignedFreeInternal(void* ptr, int32_t line, char* filename); #else void* btAlignedAllocInternal(size_t size, int32_t alignment); void btAlignedFreeInternal(void* ptr); #define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment) #define btAlignedFree(ptr) btAlignedFreeInternal(ptr) #endif typedef int32_t size_type; typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment); typedef void(btAlignedFreeFunc)(void* memblock); typedef void*(btAllocFunc)(size_t size); typedef void(btFreeFunc)(void* memblock); ///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc); ///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it. void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc); ///The btAlignedAllocator is a portable class for aligned memory allocations. ///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned. template <typename T, unsigned Alignment> class btAlignedAllocator { typedef btAlignedAllocator<T, Alignment> self_type; public: //just going down a list: btAlignedAllocator() {} /* btAlignedAllocator( const self_type & ) {} */ template <typename Other> btAlignedAllocator(const btAlignedAllocator<Other, Alignment>&) {} typedef const T* const_pointer; typedef const T& const_reference; typedef T* pointer; typedef T& reference; typedef T value_type; pointer address(reference ref) const { return &ref; } const_pointer address(const_reference ref) const { return &ref; } pointer allocate(size_type n, const_pointer* hint = 0) { (void)hint; return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment)); } void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); } void deallocate(pointer ptr) { btAlignedFree(reinterpret_cast<void*>(ptr)); } void destroy(pointer ptr) { ptr->~value_type(); } template <typename O> struct rebind { typedef btAlignedAllocator<O, Alignment> other; }; template <typename O> self_type& operator=(const btAlignedAllocator<O, Alignment>&) { return *this; } friend bool operator==(const self_type&, const self_type&) { return true; } }; #endif //BT_ALIGNED_ALLOCATOR
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btConvexHullComputer.h
/* Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_CONVEX_HULL_COMPUTER_H #define BT_CONVEX_HULL_COMPUTER_H #include "btAlignedObjectArray.h" #include "btVector3.h" /// Convex hull implementation based on Preparata and Hong /// See http://code.google.com/p/bullet/issues/detail?id=275 /// Ole Kniemeyer, MAXON Computer GmbH class btConvexHullComputer { private: btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); public: class Edge { private: int32_t next; int32_t reverse; int32_t targetVertex; friend class btConvexHullComputer; public: int32_t getSourceVertex() const { return (this + reverse)->targetVertex; } int32_t getTargetVertex() const { return targetVertex; } const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex { return this + next; } const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face { return (this + reverse)->getNextEdgeOfVertex(); } const Edge* getReverseEdge() const { return this + reverse; } }; // Vertices of the output hull btAlignedObjectArray<btVector3> vertices; // Edges of the output hull btAlignedObjectArray<Edge> edges; // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons btAlignedObjectArray<int32_t> faces; /* Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken by that amount (each face is moved by "shrink" length units towards the center along its normal). If "shrinkClamp" is positive, "shrink" is clamped to not exceed "shrinkClamp * innerRadius", where "innerRadius" is the minimum distance of a face to the center of the convex hull. The returned value is the amount by which the hull has been shrunken. If it is negative, the amount was so large that the resulting convex hull is empty. The output convex hull can be found in the member variables "vertices", "edges", "faces". */ btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, false, stride, count, shrink, shrinkClamp); } // same as above, but double precision btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, true, stride, count, shrink, shrinkClamp); } }; #endif //BT_CONVEX_HULL_COMPUTER_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdSArray.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_SARRAY_H #define VHACD_SARRAY_H #include <stdio.h> #include <stdlib.h> #include <string.h> #define SARRAY_DEFAULT_MIN_SIZE 16 namespace VHACD { //! SArray. template <typename T, size_t N = 64> class SArray { public: T& operator[](size_t i) { T* const data = Data(); return data[i]; } const T& operator[](size_t i) const { const T* const data = Data(); return data[i]; } size_t Size() const { return m_size; } T* const Data() { return (m_maxSize == N) ? m_data0 : m_data; } const T* const Data() const { return (m_maxSize == N) ? m_data0 : m_data; } void Clear() { m_size = 0; delete[] m_data; m_data = 0; m_maxSize = N; } void PopBack() { --m_size; } void Allocate(size_t size) { if (size > m_maxSize) { T* temp = new T[size]; memcpy(temp, Data(), m_size * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = size; } } void Resize(size_t size) { Allocate(size); m_size = size; } void PushBack(const T& value) { if (m_size == m_maxSize) { size_t maxSize = (m_maxSize << 1); T* temp = new T[maxSize]; memcpy(temp, Data(), m_maxSize * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = maxSize; } T* const data = Data(); data[m_size++] = value; } bool Find(const T& value, size_t& pos) { T* const data = Data(); for (pos = 0; pos < m_size; ++pos) if (value == data[pos]) return true; return false; } bool Insert(const T& value) { size_t pos; if (Find(value, pos)) return false; PushBack(value); return true; } bool Erase(const T& value) { size_t pos; T* const data = Data(); if (Find(value, pos)) { for (size_t j = pos + 1; j < m_size; ++j) data[j - 1] = data[j]; --m_size; return true; } return false; } void operator=(const SArray& rhs) { if (m_maxSize < rhs.m_size) { delete[] m_data; m_maxSize = rhs.m_maxSize; m_data = new T[m_maxSize]; } m_size = rhs.m_size; memcpy(Data(), rhs.Data(), m_size * sizeof(T)); } void Initialize() { m_data = 0; m_size = 0; m_maxSize = N; } SArray(const SArray& rhs) { m_data = 0; m_size = 0; m_maxSize = N; *this = rhs; } SArray() { Initialize(); } ~SArray() { delete[] m_data; } private: T m_data0[N]; T* m_data; size_t m_size; size_t m_maxSize; }; } #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVector.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VECTOR_H #define VHACD_VECTOR_H #include <iostream> #include <math.h> namespace VHACD { //! Vector dim 3. template <typename T> class Vec3 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); T& Z(); const T& X() const; const T& Y() const; const T& Z() const; void Normalize(); T GetNorm() const; void operator=(const Vec3& rhs); void operator+=(const Vec3& rhs); void operator-=(const Vec3& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); Vec3 operator^(const Vec3& rhs) const; T operator*(const Vec3& rhs) const; Vec3 operator+(const Vec3& rhs) const; Vec3 operator-(const Vec3& rhs) const; Vec3 operator-() const; Vec3 operator*(T rhs) const; Vec3 operator/(T rhs) const; bool operator<(const Vec3& rhs) const; bool operator>(const Vec3& rhs) const; Vec3(); Vec3(T a); Vec3(T x, T y, T z); Vec3(const Vec3& rhs); /*virtual*/ ~Vec3(void); // Compute the center of this bounding box and return the diagonal length T GetCenter(const Vec3 &bmin, const Vec3 &bmax) { X() = (bmin.X() + bmax.X())*0.5; Y() = (bmin.Y() + bmax.Y())*0.5; Z() = (bmin.Z() + bmax.Z())*0.5; T dx = bmax.X() - bmin.X(); T dy = bmax.Y() - bmin.Y(); T dz = bmax.Z() - bmin.Z(); T diagonal = T(sqrt(dx*dx + dy*dy + dz*dz)); return diagonal; } // Update the min/max values relative to this point void UpdateMinMax(Vec3 &bmin,Vec3 &bmax) const { if (X() < bmin.X()) { bmin.X() = X(); } if (Y() < bmin.Y()) { bmin.Y() = Y(); } if (Z() < bmin.Z()) { bmin.Z() = Z(); } if (X() > bmax.X()) { bmax.X() = X(); } if (X() > bmax.X()) { bmax.X() = X(); } if (Y() > bmax.Y()) { bmax.Y() = Y(); } if (Z() > bmax.Z()) { bmax.Z() = Z(); } } // Returns the squared distance between these two points T GetDistanceSquared(const Vec3 &p) const { T dx = X() - p.X(); T dy = Y() - p.Y(); T dz = Z() - p.Z(); return dx*dx + dy*dy + dz*dz; } T GetDistance(const Vec3 &p) const { return sqrt(GetDistanceSquared(p)); } // Returns the raw vector data as a pointer T* GetData(void) { return m_data; } private: T m_data[3]; }; //! Vector dim 2. template <typename T> class Vec2 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); const T& X() const; const T& Y() const; void Normalize(); T GetNorm() const; void operator=(const Vec2& rhs); void operator+=(const Vec2& rhs); void operator-=(const Vec2& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); T operator^(const Vec2& rhs) const; T operator*(const Vec2& rhs) const; Vec2 operator+(const Vec2& rhs) const; Vec2 operator-(const Vec2& rhs) const; Vec2 operator-() const; Vec2 operator*(T rhs) const; Vec2 operator/(T rhs) const; Vec2(); Vec2(T a); Vec2(T x, T y); Vec2(const Vec2& rhs); /*virtual*/ ~Vec2(void); private: T m_data[2]; }; template <typename T> const bool Colinear(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c); template <typename T> const T ComputeVolume4(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c, const Vec3<T>& d); } #include "vhacdVector.inl" // template implementation #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVector.inl
#pragma once #ifndef VHACD_VECTOR_INL #define VHACD_VECTOR_INL namespace VHACD { template <typename T> inline Vec3<T> operator*(T lhs, const Vec3<T> & rhs) { return Vec3<T>(lhs * rhs.X(), lhs * rhs.Y(), lhs * rhs.Z()); } template <typename T> inline T & Vec3<T>::X() { return m_data[0]; } template <typename T> inline T & Vec3<T>::Y() { return m_data[1]; } template <typename T> inline T & Vec3<T>::Z() { return m_data[2]; } template <typename T> inline const T & Vec3<T>::X() const { return m_data[0]; } template <typename T> inline const T & Vec3<T>::Y() const { return m_data[1]; } template <typename T> inline const T & Vec3<T>::Z() const { return m_data[2]; } template <typename T> inline void Vec3<T>::Normalize() { T n = sqrt(m_data[0]*m_data[0]+m_data[1]*m_data[1]+m_data[2]*m_data[2]); if (n != 0.0) (*this) /= n; } template <typename T> inline T Vec3<T>::GetNorm() const { return sqrt(m_data[0]*m_data[0]+m_data[1]*m_data[1]+m_data[2]*m_data[2]); } template <typename T> inline void Vec3<T>::operator= (const Vec3 & rhs) { this->m_data[0] = rhs.m_data[0]; this->m_data[1] = rhs.m_data[1]; this->m_data[2] = rhs.m_data[2]; } template <typename T> inline void Vec3<T>::operator+=(const Vec3 & rhs) { this->m_data[0] += rhs.m_data[0]; this->m_data[1] += rhs.m_data[1]; this->m_data[2] += rhs.m_data[2]; } template <typename T> inline void Vec3<T>::operator-=(const Vec3 & rhs) { this->m_data[0] -= rhs.m_data[0]; this->m_data[1] -= rhs.m_data[1]; this->m_data[2] -= rhs.m_data[2]; } template <typename T> inline void Vec3<T>::operator-=(T a) { this->m_data[0] -= a; this->m_data[1] -= a; this->m_data[2] -= a; } template <typename T> inline void Vec3<T>::operator+=(T a) { this->m_data[0] += a; this->m_data[1] += a; this->m_data[2] += a; } template <typename T> inline void Vec3<T>::operator/=(T a) { this->m_data[0] /= a; this->m_data[1] /= a; this->m_data[2] /= a; } template <typename T> inline void Vec3<T>::operator*=(T a) { this->m_data[0] *= a; this->m_data[1] *= a; this->m_data[2] *= a; } template <typename T> inline Vec3<T> Vec3<T>::operator^ (const Vec3<T> & rhs) const { return Vec3<T>(m_data[1] * rhs.m_data[2] - m_data[2] * rhs.m_data[1], m_data[2] * rhs.m_data[0] - m_data[0] * rhs.m_data[2], m_data[0] * rhs.m_data[1] - m_data[1] * rhs.m_data[0]); } template <typename T> inline T Vec3<T>::operator*(const Vec3<T> & rhs) const { return (m_data[0] * rhs.m_data[0] + m_data[1] * rhs.m_data[1] + m_data[2] * rhs.m_data[2]); } template <typename T> inline Vec3<T> Vec3<T>::operator+(const Vec3<T> & rhs) const { return Vec3<T>(m_data[0] + rhs.m_data[0],m_data[1] + rhs.m_data[1],m_data[2] + rhs.m_data[2]); } template <typename T> inline Vec3<T> Vec3<T>::operator-(const Vec3<T> & rhs) const { return Vec3<T>(m_data[0] - rhs.m_data[0],m_data[1] - rhs.m_data[1],m_data[2] - rhs.m_data[2]) ; } template <typename T> inline Vec3<T> Vec3<T>::operator-() const { return Vec3<T>(-m_data[0],-m_data[1],-m_data[2]) ; } template <typename T> inline Vec3<T> Vec3<T>::operator*(T rhs) const { return Vec3<T>(rhs * this->m_data[0], rhs * this->m_data[1], rhs * this->m_data[2]); } template <typename T> inline Vec3<T> Vec3<T>::operator/ (T rhs) const { return Vec3<T>(m_data[0] / rhs, m_data[1] / rhs, m_data[2] / rhs); } template <typename T> inline Vec3<T>::Vec3(T a) { m_data[0] = m_data[1] = m_data[2] = a; } template <typename T> inline Vec3<T>::Vec3(T x, T y, T z) { m_data[0] = x; m_data[1] = y; m_data[2] = z; } template <typename T> inline Vec3<T>::Vec3(const Vec3 & rhs) { m_data[0] = rhs.m_data[0]; m_data[1] = rhs.m_data[1]; m_data[2] = rhs.m_data[2]; } template <typename T> inline Vec3<T>::~Vec3(void){}; template <typename T> inline Vec3<T>::Vec3() {} template<typename T> inline const bool Colinear(const Vec3<T> & a, const Vec3<T> & b, const Vec3<T> & c) { return ((c.Z() - a.Z()) * (b.Y() - a.Y()) - (b.Z() - a.Z()) * (c.Y() - a.Y()) == 0.0 /*EPS*/) && ((b.Z() - a.Z()) * (c.X() - a.X()) - (b.X() - a.X()) * (c.Z() - a.Z()) == 0.0 /*EPS*/) && ((b.X() - a.X()) * (c.Y() - a.Y()) - (b.Y() - a.Y()) * (c.X() - a.X()) == 0.0 /*EPS*/); } template<typename T> inline const T ComputeVolume4(const Vec3<T> & a, const Vec3<T> & b, const Vec3<T> & c, const Vec3<T> & d) { return (a-d) * ((b-d) ^ (c-d)); } template <typename T> inline bool Vec3<T>::operator<(const Vec3 & rhs) const { if (X() == rhs[0]) { if (Y() == rhs[1]) { return (Z()<rhs[2]); } return (Y()<rhs[1]); } return (X()<rhs[0]); } template <typename T> inline bool Vec3<T>::operator>(const Vec3 & rhs) const { if (X() == rhs[0]) { if (Y() == rhs[1]) { return (Z()>rhs[2]); } return (Y()>rhs[1]); } return (X()>rhs[0]); } template <typename T> inline Vec2<T> operator*(T lhs, const Vec2<T> & rhs) { return Vec2<T>(lhs * rhs.X(), lhs * rhs.Y()); } template <typename T> inline T & Vec2<T>::X() { return m_data[0]; } template <typename T> inline T & Vec2<T>::Y() { return m_data[1]; } template <typename T> inline const T & Vec2<T>::X() const { return m_data[0]; } template <typename T> inline const T & Vec2<T>::Y() const { return m_data[1]; } template <typename T> inline void Vec2<T>::Normalize() { T n = sqrt(m_data[0]*m_data[0]+m_data[1]*m_data[1]); if (n != 0.0) (*this) /= n; } template <typename T> inline T Vec2<T>::GetNorm() const { return sqrt(m_data[0]*m_data[0]+m_data[1]*m_data[1]); } template <typename T> inline void Vec2<T>::operator= (const Vec2 & rhs) { this->m_data[0] = rhs.m_data[0]; this->m_data[1] = rhs.m_data[1]; } template <typename T> inline void Vec2<T>::operator+=(const Vec2 & rhs) { this->m_data[0] += rhs.m_data[0]; this->m_data[1] += rhs.m_data[1]; } template <typename T> inline void Vec2<T>::operator-=(const Vec2 & rhs) { this->m_data[0] -= rhs.m_data[0]; this->m_data[1] -= rhs.m_data[1]; } template <typename T> inline void Vec2<T>::operator-=(T a) { this->m_data[0] -= a; this->m_data[1] -= a; } template <typename T> inline void Vec2<T>::operator+=(T a) { this->m_data[0] += a; this->m_data[1] += a; } template <typename T> inline void Vec2<T>::operator/=(T a) { this->m_data[0] /= a; this->m_data[1] /= a; } template <typename T> inline void Vec2<T>::operator*=(T a) { this->m_data[0] *= a; this->m_data[1] *= a; } template <typename T> inline T Vec2<T>::operator^ (const Vec2<T> & rhs) const { return m_data[0] * rhs.m_data[1] - m_data[1] * rhs.m_data[0]; } template <typename T> inline T Vec2<T>::operator*(const Vec2<T> & rhs) const { return (m_data[0] * rhs.m_data[0] + m_data[1] * rhs.m_data[1]); } template <typename T> inline Vec2<T> Vec2<T>::operator+(const Vec2<T> & rhs) const { return Vec2<T>(m_data[0] + rhs.m_data[0],m_data[1] + rhs.m_data[1]); } template <typename T> inline Vec2<T> Vec2<T>::operator-(const Vec2<T> & rhs) const { return Vec2<T>(m_data[0] - rhs.m_data[0],m_data[1] - rhs.m_data[1]); } template <typename T> inline Vec2<T> Vec2<T>::operator-() const { return Vec2<T>(-m_data[0],-m_data[1]) ; } template <typename T> inline Vec2<T> Vec2<T>::operator*(T rhs) const { return Vec2<T>(rhs * this->m_data[0], rhs * this->m_data[1]); } template <typename T> inline Vec2<T> Vec2<T>::operator/ (T rhs) const { return Vec2<T>(m_data[0] / rhs, m_data[1] / rhs); } template <typename T> inline Vec2<T>::Vec2(T a) { m_data[0] = m_data[1] = a; } template <typename T> inline Vec2<T>::Vec2(T x, T y) { m_data[0] = x; m_data[1] = y; } template <typename T> inline Vec2<T>::Vec2(const Vec2 & rhs) { m_data[0] = rhs.m_data[0]; m_data[1] = rhs.m_data[1]; } template <typename T> inline Vec2<T>::~Vec2(void){}; template <typename T> inline Vec2<T>::Vec2() {} /* InsideTriangle decides if a point P is Inside of the triangle defined by A, B, C. */ template<typename T> inline const bool InsideTriangle(const Vec2<T> & a, const Vec2<T> & b, const Vec2<T> & c, const Vec2<T> & p) { T ax, ay, bx, by, cx, cy, apx, apy, bpx, bpy, cpx, cpy; T cCROSSap, bCROSScp, aCROSSbp; ax = c.X() - b.X(); ay = c.Y() - b.Y(); bx = a.X() - c.X(); by = a.Y() - c.Y(); cx = b.X() - a.X(); cy = b.Y() - a.Y(); apx= p.X() - a.X(); apy= p.Y() - a.Y(); bpx= p.X() - b.X(); bpy= p.Y() - b.Y(); cpx= p.X() - c.X(); cpy= p.Y() - c.Y(); aCROSSbp = ax*bpy - ay*bpx; cCROSSap = cx*apy - cy*apx; bCROSScp = bx*cpy - by*cpx; return ((aCROSSbp >= 0.0) && (bCROSScp >= 0.0) && (cCROSSap >= 0.0)); } } #endif //VHACD_VECTOR_INL
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/FloatMath.h
#ifndef FLOAT_MATH_LIB_H #define FLOAT_MATH_LIB_H #include <float.h> #include <stdint.h> namespace FLOAT_MATH { enum FM_ClipState { FMCS_XMIN = (1<<0), FMCS_XMAX = (1<<1), FMCS_YMIN = (1<<2), FMCS_YMAX = (1<<3), FMCS_ZMIN = (1<<4), FMCS_ZMAX = (1<<5), }; enum FM_Axis { FM_XAXIS = (1<<0), FM_YAXIS = (1<<1), FM_ZAXIS = (1<<2) }; enum LineSegmentType { LS_START, LS_MIDDLE, LS_END }; const float FM_PI = 3.1415926535897932384626433832795028841971693993751f; const float FM_DEG_TO_RAD = ((2.0f * FM_PI) / 360.0f); const float FM_RAD_TO_DEG = (360.0f / (2.0f * FM_PI)); //***************** Float versions //*** //*** vectors are assumed to be 3 floats or 3 doubles representing X, Y, Z //*** quaternions are assumed to be 4 floats or 4 doubles representing X,Y,Z,W //*** matrices are assumed to be 16 floats or 16 doubles representing a standard D3D or OpenGL style 4x4 matrix //*** bounding volumes are expressed as two sets of 3 floats/double representing bmin(x,y,z) and bmax(x,y,z) //*** Plane equations are assumed to be 4 floats or 4 doubles representing Ax,By,Cz,D FM_Axis fm_getDominantAxis(const float normal[3]); FM_Axis fm_getDominantAxis(const double normal[3]); void fm_decomposeTransform(const float local_transform[16],float trans[3],float rot[4],float scale[3]); void fm_decomposeTransform(const double local_transform[16],double trans[3],double rot[4],double scale[3]); void fm_multiplyTransform(const float *pA,const float *pB,float *pM); void fm_multiplyTransform(const double *pA,const double *pB,double *pM); void fm_inverseTransform(const float matrix[16],float inverse_matrix[16]); void fm_inverseTransform(const double matrix[16],double inverse_matrix[16]); void fm_identity(float matrix[16]); // set 4x4 matrix to identity. void fm_identity(double matrix[16]); // set 4x4 matrix to identity. void fm_inverseRT(const float matrix[16], const float pos[3], float t[3]); // inverse rotate translate the point. void fm_inverseRT(const double matrix[16],const double pos[3],double t[3]); // inverse rotate translate the point. void fm_transform(const float matrix[16], const float pos[3], float t[3]); // rotate and translate this point. void fm_transform(const double matrix[16],const double pos[3],double t[3]); // rotate and translate this point. float fm_getDeterminant(const float matrix[16]); double fm_getDeterminant(const double matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,float pDst[16],const float matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,double pDst[16],const float matrix[16]); void fm_rotate(const float matrix[16],const float pos[3],float t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_rotate(const double matri[16],const double pos[3],double t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_eulerToMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerToMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_getAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); void fm_getAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); void fm_getAABBCenter(const float bmin[3],const float bmax[3],float center[3]); void fm_getAABBCenter(const double bmin[3],const double bmax[3],double center[3]); void fm_transformAABB(const float bmin[3],const float bmax[3],const float matrix[16],float tbmin[3],float tbmax[3]); void fm_transformAABB(const double bmin[3],const double bmax[3],const double matrix[16],double tbmin[3],double tbmax[3]); void fm_eulerToQuat(float x,float y,float z,float quat[4]); // convert euler angles to quaternion. void fm_eulerToQuat(double x,double y,double z,double quat[4]); // convert euler angles to quaternion. void fm_quatToEuler(const float quat[4],float &ax,float &ay,float &az); void fm_quatToEuler(const double quat[4],double &ax,double &ay,double &az); void fm_eulerToQuat(const float euler[3],float quat[4]); // convert euler angles to quaternion. Angles must be radians not degrees! void fm_eulerToQuat(const double euler[3],double quat[4]); // convert euler angles to quaternion. void fm_scale(float x,float y,float z,float matrix[16]); // apply scale to the matrix. void fm_scale(double x,double y,double z,double matrix[16]); // apply scale to the matrix. void fm_eulerToQuatDX(float x,float y,float z,float quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToQuatDX(double x,double y,double z,double quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToMatrixDX(float x,float y,float z,float matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_eulerToMatrixDX(double x,double y,double z,double matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_quatToMatrix(const float quat[4],float matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatToMatrix(const double quat[4],double matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatRotate(const float quat[4],const float v[3],float r[3]); // rotate a vector directly by a quaternion. void fm_quatRotate(const double quat[4],const double v[3],double r[3]); // rotate a vector directly by a quaternion. void fm_getTranslation(const float matrix[16],float t[3]); void fm_getTranslation(const double matrix[16],double t[3]); void fm_setTranslation(const float *translation,float matrix[16]); void fm_setTranslation(const double *translation,double matrix[16]); void fm_multiplyQuat(const float *qa,const float *qb,float *quat); void fm_multiplyQuat(const double *qa,const double *qb,double *quat); void fm_matrixToQuat(const float matrix[16],float quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w void fm_matrixToQuat(const double matrix[16],double quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w float fm_sphereVolume(float radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) double fm_sphereVolume(double radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) float fm_cylinderVolume(float radius,float h); double fm_cylinderVolume(double radius,double h); float fm_capsuleVolume(float radius,float h); double fm_capsuleVolume(double radius,double h); float fm_distance(const float p1[3],const float p2[3]); double fm_distance(const double p1[3],const double p2[3]); float fm_distanceSquared(const float p1[3],const float p2[3]); double fm_distanceSquared(const double p1[3],const double p2[3]); float fm_distanceSquaredXZ(const float p1[3],const float p2[3]); double fm_distanceSquaredXZ(const double p1[3],const double p2[3]); float fm_computePlane(const float p1[3],const float p2[3],const float p3[3],float *n); // return D double fm_computePlane(const double p1[3],const double p2[3],const double p3[3],double *n); // return D float fm_distToPlane(const float plane[4],const float pos[3]); // computes the distance of this point from the plane. double fm_distToPlane(const double plane[4],const double pos[3]); // computes the distance of this point from the plane. float fm_dot(const float p1[3],const float p2[3]); double fm_dot(const double p1[3],const double p2[3]); void fm_cross(float cross[3],const float a[3],const float b[3]); void fm_cross(double cross[3],const double a[3],const double b[3]); void fm_computeNormalVector(float n[3],const float p1[3],const float p2[3]); // as P2-P1 normalized. void fm_computeNormalVector(double n[3],const double p1[3],const double p2[3]); // as P2-P1 normalized. bool fm_computeWindingOrder(const float p1[3],const float p2[3],const float p3[3]); // returns true if the triangle is clockwise. bool fm_computeWindingOrder(const double p1[3],const double p2[3],const double p3[3]); // returns true if the triangle is clockwise. float fm_normalize(float n[3]); // normalize this vector and return the distance double fm_normalize(double n[3]); // normalize this vector and return the distance float fm_normalizeQuat(float n[4]); // normalize this quat double fm_normalizeQuat(double n[4]); // normalize this quat void fm_matrixMultiply(const float A[16],const float B[16],float dest[16]); void fm_matrixMultiply(const double A[16],const double B[16],double dest[16]); void fm_composeTransform(const float position[3],const float quat[4],const float scale[3],float matrix[16]); void fm_composeTransform(const double position[3],const double quat[4],const double scale[3],double matrix[16]); float fm_computeArea(const float p1[3],const float p2[3],const float p3[3]); double fm_computeArea(const double p1[3],const double p2[3],const double p3[3]); void fm_lerp(const float p1[3],const float p2[3],float dest[3],float lerpValue); void fm_lerp(const double p1[3],const double p2[3],double dest[3],double lerpValue); bool fm_insideTriangleXZ(const float test[3],const float p1[3],const float p2[3],const float p3[3]); bool fm_insideTriangleXZ(const double test[3],const double p1[3],const double p2[3],const double p3[3]); bool fm_insideAABB(const float pos[3],const float bmin[3],const float bmax[3]); bool fm_insideAABB(const double pos[3],const double bmin[3],const double bmax[3]); bool fm_insideAABB(const float obmin[3],const float obmax[3],const float tbmin[3],const float tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax bool fm_insideAABB(const double obmin[3],const double obmax[3],const double tbmin[3],const double tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax uint32_t fm_clipTestPoint(const float bmin[3],const float bmax[3],const float pos[3]); uint32_t fm_clipTestPoint(const double bmin[3],const double bmax[3],const double pos[3]); uint32_t fm_clipTestPointXZ(const float bmin[3],const float bmax[3],const float pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestPointXZ(const double bmin[3],const double bmax[3],const double pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],const float p3[3],uint32_t &andCode); uint32_t fm_clipTestAABB(const double bmin[3],const double bmax[3],const double p1[3],const double p2[3],const double p3[3],uint32_t &andCode); bool fm_lineTestAABBXZ(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABBXZ(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); bool fm_lineTestAABB(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABB(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); void fm_initMinMax(const float p[3],float bmin[3],float bmax[3]); void fm_initMinMax(const double p[3],double bmin[3],double bmax[3]); void fm_initMinMax(float bmin[3],float bmax[3]); void fm_initMinMax(double bmin[3],double bmax[3]); void fm_minmax(const float p[3],float bmin[3],float bmax[3]); // accumulate to a min-max value void fm_minmax(const double p[3],double bmin[3],double bmax[3]); // accumulate to a min-max value // Computes the diagonal length of the bounding box and then inflates the bounding box on all sides // by the ratio provided. void fm_inflateMinMax(float bmin[3], float bmax[3], float ratio); void fm_inflateMinMax(double bmin[3], double bmax[3], double ratio); float fm_solveX(const float plane[4],float y,float z); // solve for X given this plane equation and the other two components. double fm_solveX(const double plane[4],double y,double z); // solve for X given this plane equation and the other two components. float fm_solveY(const float plane[4],float x,float z); // solve for Y given this plane equation and the other two components. double fm_solveY(const double plane[4],double x,double z); // solve for Y given this plane equation and the other two components. float fm_solveZ(const float plane[4],float x,float y); // solve for Z given this plane equation and the other two components. double fm_solveZ(const double plane[4],double x,double y); // solve for Z given this plane equation and the other two components. bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. const float *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. float plane[4]); bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. const double *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. double plane[4]); bool fm_computeCentroid(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. float *center); bool fm_computeCentroid(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. double *center); float fm_computeBestFitAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); // returns the diagonal distance double fm_computeBestFitAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); // returns the diagonal distance float fm_computeBestFitSphere(uint32_t vcount,const float *points,uint32_t pstride,float center[3]); double fm_computeBestFitSphere(uint32_t vcount,const double *points,uint32_t pstride,double center[3]); bool fm_lineSphereIntersect(const float center[3],float radius,const float p1[3],const float p2[3],float intersect[3]); bool fm_lineSphereIntersect(const double center[3],double radius,const double p1[3],const double p2[3],double intersect[3]); bool fm_intersectRayAABB(const float bmin[3],const float bmax[3],const float pos[3],const float dir[3],float intersect[3]); bool fm_intersectLineSegmentAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],float intersect[3]); bool fm_lineIntersectsTriangle(const float rayStart[3],const float rayEnd[3],const float p1[3],const float p2[3],const float p3[3],float sect[3]); bool fm_lineIntersectsTriangle(const double rayStart[3],const double rayEnd[3],const double p1[3],const double p2[3],const double p3[3],double sect[3]); bool fm_rayIntersectsTriangle(const float origin[3],const float dir[3],const float v0[3],const float v1[3],const float v2[3],float &t); bool fm_rayIntersectsTriangle(const double origin[3],const double dir[3],const double v0[3],const double v1[3],const double v2[3],double &t); bool fm_raySphereIntersect(const float center[3],float radius,const float pos[3],const float dir[3],float distance,float intersect[3]); bool fm_raySphereIntersect(const double center[3],double radius,const double pos[3],const double dir[3],double distance,double intersect[3]); void fm_catmullRom(float out_vector[3],const float p1[3],const float p2[3],const float p3[3],const float *p4, const float s); void fm_catmullRom(double out_vector[3],const double p1[3],const double p2[3],const double p3[3],const double *p4, const double s); bool fm_intersectAABB(const float bmin1[3],const float bmax1[3],const float bmin2[3],const float bmax2[3]); bool fm_intersectAABB(const double bmin1[3],const double bmax1[3],const double bmin2[3],const double bmax2[3]); // computes the rotation quaternion to go from unit-vector v0 to unit-vector v1 void fm_rotationArc(const float v0[3],const float v1[3],float quat[4]); void fm_rotationArc(const double v0[3],const double v1[3],double quat[4]); float fm_distancePointLineSegment(const float Point[3],const float LineStart[3],const float LineEnd[3],float intersection[3],LineSegmentType &type,float epsilon); double fm_distancePointLineSegment(const double Point[3],const double LineStart[3],const double LineEnd[3],double intersection[3],LineSegmentType &type,double epsilon); bool fm_colinear(const double p1[3],const double p2[3],const double p3[3],double epsilon=0.999); // true if these three points in a row are co-linear bool fm_colinear(const float p1[3],const float p2[3],const float p3[3],float epsilon=0.999f); bool fm_colinear(const float a1[3],const float a2[3],const float b1[3],const float b2[3],float epsilon=0.999f); // true if these two line segments are co-linear. bool fm_colinear(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double epsilon=0.999); // true if these two line segments are co-linear. enum IntersectResult { IR_DONT_INTERSECT, IR_DO_INTERSECT, IR_COINCIDENT, IR_PARALLEL, }; IntersectResult fm_intersectLineSegments2d(const float a1[3], const float a2[3], const float b1[3], const float b2[3], float intersectionPoint[3]); IntersectResult fm_intersectLineSegments2d(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double intersectionPoint[3]); IntersectResult fm_intersectLineSegments2dTime(const float a1[3], const float a2[3], const float b1[3], const float b2[3],float &t1,float &t2); IntersectResult fm_intersectLineSegments2dTime(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double &t1,double &t2); // Plane-Triangle splitting enum PlaneTriResult { PTR_ON_PLANE, PTR_FRONT, PTR_BACK, PTR_SPLIT, }; PlaneTriResult fm_planeTriIntersection(const float plane[4], // the plane equation in Ax+By+Cz+D format const float *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* float epsilon, // the co-planer epsilon value. float *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle float *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. PlaneTriResult fm_planeTriIntersection(const double plane[4], // the plane equation in Ax+By+Cz+D format const double *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* double epsilon, // the co-planer epsilon value. double *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle double *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. void fm_intersectPointPlane(const float p1[3],const float p2[3],float *split,const float plane[4]); void fm_intersectPointPlane(const double p1[3],const double p2[3],double *split,const double plane[4]); PlaneTriResult fm_getSidePlane(const float p[3],const float plane[4],float epsilon); PlaneTriResult fm_getSidePlane(const double p[3],const double plane[4],double epsilon); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3],float quat[4],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3],double quat[4],bool bruteForce=true); void fm_computeBestFitABB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3]); void fm_computeBestFitABB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3]); //** Note, if the returned capsule height is less than zero, then you must represent it is a sphere of size radius. void fm_computeBestFitCapsule(uint32_t vcount,const float *points,uint32_t pstride,float &radius,float &height,float matrix[16],bool bruteForce=true); void fm_computeBestFitCapsule(uint32_t vcount,const double *points,uint32_t pstride,float &radius,float &height,double matrix[16],bool bruteForce=true); void fm_planeToMatrix(const float plane[4],float matrix[16]); // convert a plane equation to a 4x4 rotation matrix. Reference vector is 0,1,0 void fm_planeToQuat(const float plane[4],float quat[4],float pos[3]); // convert a plane equation to a quaternion and translation void fm_planeToMatrix(const double plane[4],double matrix[16]); // convert a plane equation to a 4x4 rotation matrix void fm_planeToQuat(const double plane[4],double quat[4],double pos[3]); // convert a plane equation to a quaternion and translation inline void fm_doubleToFloat3(const double p[3],float t[3]) { t[0] = (float) p[0]; t[1] = (float)p[1]; t[2] = (float)p[2]; }; inline void fm_floatToDouble3(const float p[3],double t[3]) { t[0] = (double)p[0]; t[1] = (double)p[1]; t[2] = (double)p[2]; }; void fm_eulerMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices); double fm_computeMeshVolume(const double *vertices,uint32_t tcount,const uint32_t *indices); #define FM_DEFAULT_GRANULARITY 0.001f // 1 millimeter is the default granularity class fm_VertexIndex { public: virtual uint32_t getIndex(const float pos[3],bool &newPos) = 0; // get welded index for this float vector[3] virtual uint32_t getIndex(const double pos[3],bool &newPos) = 0; // get welded index for this double vector[3] virtual const float * getVerticesFloat(void) const = 0; virtual const double * getVerticesDouble(void) const = 0; virtual const float * getVertexFloat(uint32_t index) const = 0; virtual const double * getVertexDouble(uint32_t index) const = 0; virtual uint32_t getVcount(void) const = 0; virtual bool isDouble(void) const = 0; virtual bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices) = 0; }; fm_VertexIndex * fm_createVertexIndex(double granularity,bool snapToGrid); // create an indexed vertex system for doubles fm_VertexIndex * fm_createVertexIndex(float granularity,bool snapToGrid); // create an indexed vertext system for floats void fm_releaseVertexIndex(fm_VertexIndex *vindex); class fm_Triangulate { public: virtual const double * triangulate3d(uint32_t pcount, const double *points, uint32_t vstride, uint32_t &tcount, bool consolidate, double epsilon) = 0; virtual const float * triangulate3d(uint32_t pcount, const float *points, uint32_t vstride, uint32_t &tcount, bool consolidate, float epsilon) = 0; }; fm_Triangulate * fm_createTriangulate(void); void fm_releaseTriangulate(fm_Triangulate *t); const float * fm_getPoint(const float *points,uint32_t pstride,uint32_t index); const double * fm_getPoint(const double *points,uint32_t pstride,uint32_t index); bool fm_insideTriangle(float Ax, float Ay,float Bx, float By,float Cx, float Cy,float Px, float Py); bool fm_insideTriangle(double Ax, double Ay,double Bx, double By,double Cx, double Cy,double Px, double Py); float fm_areaPolygon2d(uint32_t pcount,const float *points,uint32_t pstride); double fm_areaPolygon2d(uint32_t pcount,const double *points,uint32_t pstride); bool fm_pointInsidePolygon2d(uint32_t pcount,const float *points,uint32_t pstride,const float *point,uint32_t xindex=0,uint32_t yindex=1); bool fm_pointInsidePolygon2d(uint32_t pcount,const double *points,uint32_t pstride,const double *point,uint32_t xindex=0,uint32_t yindex=1); uint32_t fm_consolidatePolygon(uint32_t pcount,const float *points,uint32_t pstride,float *dest,float epsilon=0.999999f); // collapses co-linear edges. uint32_t fm_consolidatePolygon(uint32_t pcount,const double *points,uint32_t pstride,double *dest,double epsilon=0.999999); // collapses co-linear edges. bool fm_computeSplitPlane(uint32_t vcount,const double *vertices,uint32_t tcount,const uint32_t *indices,double *plane); bool fm_computeSplitPlane(uint32_t vcount,const float *vertices,uint32_t tcount,const uint32_t *indices,float *plane); void fm_nearestPointInTriangle(const float *pos,const float *p1,const float *p2,const float *p3,float *nearest); void fm_nearestPointInTriangle(const double *pos,const double *p1,const double *p2,const double *p3,double *nearest); float fm_areaTriangle(const float *p1,const float *p2,const float *p3); double fm_areaTriangle(const double *p1,const double *p2,const double *p3); void fm_subtract(const float *A,const float *B,float *diff); // compute A-B and store the result in 'diff' void fm_subtract(const double *A,const double *B,double *diff); // compute A-B and store the result in 'diff' void fm_multiply(float *A,float scaler); void fm_multiply(double *A,double scaler); void fm_add(const float *A,const float *B,float *sum); void fm_add(const double *A,const double *B,double *sum); void fm_copy3(const float *source,float *dest); void fm_copy3(const double *source,double *dest); // re-indexes an indexed triangle mesh but drops unused vertices. The output_indices can be the same pointer as the input indices. // the output_vertices can point to the input vertices if you desire. The output_vertices buffer should be at least the same size // is the input buffer. The routine returns the new vertex count after re-indexing. uint32_t fm_copyUniqueVertices(uint32_t vcount,const float *input_vertices,float *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); uint32_t fm_copyUniqueVertices(uint32_t vcount,const double *input_vertices,double *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const float *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const double *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_samePlane(const float p1[4],const float p2[4],float normalEpsilon=0.01f,float dEpsilon=0.001f,bool doubleSided=false); // returns true if these two plane equations are identical within an epsilon bool fm_samePlane(const double p1[4],const double p2[4],double normalEpsilon=0.01,double dEpsilon=0.001,bool doubleSided=false); void fm_OBBtoAABB(const float obmin[3],const float obmax[3],const float matrix[16],float abmin[3],float abmax[3]); // a utility class that will tessellate a mesh. class fm_Tesselate { public: virtual const uint32_t * tesselate(fm_VertexIndex *vindex,uint32_t tcount,const uint32_t *indices,float longEdge,uint32_t maxDepth,uint32_t &outcount) = 0; }; fm_Tesselate * fm_createTesselate(void); void fm_releaseTesselate(fm_Tesselate *t); void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const float *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. float *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const double *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. double *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices bool fm_isValidTriangle(const float *p1,const float *p2,const float *p3,float epsilon=0.00001f); bool fm_isValidTriangle(const double *p1,const double *p2,const double *p3,double epsilon=0.00001f); }; // end of namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdCircularList.inl
#pragma once #ifndef HACD_CIRCULAR_LIST_INL #define HACD_CIRCULAR_LIST_INL namespace VHACD { template < typename T > inline bool CircularList<T>::Delete(CircularListElement<T> * element) { if (!element) { return false; } if (m_size > 1) { CircularListElement<T> * next = element->GetNext(); CircularListElement<T> * prev = element->GetPrev(); delete element; m_size--; if (element == m_head) { m_head = next; } next->GetPrev() = prev; prev->GetNext() = next; return true; } else if (m_size == 1) { delete m_head; m_size--; m_head = 0; return true; } else { return false; } } template < typename T > inline bool CircularList<T>::Delete() { if (m_size > 1) { CircularListElement<T> * next = m_head->GetNext(); CircularListElement<T> * prev = m_head->GetPrev(); delete m_head; m_size--; m_head = next; next->GetPrev() = prev; prev->GetNext() = next; return true; } else if (m_size == 1) { delete m_head; m_size--; m_head = 0; return true; } else { return false; } } template < typename T > inline CircularListElement<T> * CircularList<T>::Add(const T * data) { if (m_size == 0) { if (data) { m_head = new CircularListElement<T>(*data); } else { m_head = new CircularListElement<T>(); } m_head->GetNext() = m_head->GetPrev() = m_head; } else { CircularListElement<T> * next = m_head->GetNext(); CircularListElement<T> * element = m_head; if (data) { m_head = new CircularListElement<T>(*data); } else { m_head = new CircularListElement<T>(); } m_head->GetNext() = next; m_head->GetPrev() = element; element->GetNext() = m_head; next->GetPrev() = m_head; } m_size++; return m_head; } template < typename T > inline CircularListElement<T> * CircularList<T>::Add(const T & data) { const T * pData = &data; return Add(pData); } template < typename T > inline bool CircularList<T>::Next() { if (m_size == 0) { return false; } m_head = m_head->GetNext(); return true; } template < typename T > inline bool CircularList<T>::Prev() { if (m_size == 0) { return false; } m_head = m_head->GetPrev(); return true; } template < typename T > inline CircularList<T>::CircularList(const CircularList& rhs) { if (rhs.m_size > 0) { CircularListElement<T> * current = rhs.m_head; do { current = current->GetNext(); Add(current->GetData()); } while ( current != rhs.m_head ); } } template < typename T > inline const CircularList<T>& CircularList<T>::operator=(const CircularList& rhs) { if (&rhs != this) { Clear(); if (rhs.m_size > 0) { CircularListElement<T> * current = rhs.m_head; do { current = current->GetNext(); Add(current->GetData()); } while ( current != rhs.m_head ); } } return (*this); } } #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdCircularList.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_CIRCULAR_LIST_H #define VHACD_CIRCULAR_LIST_H #include <stdlib.h> namespace VHACD { //! CircularListElement class. template <typename T> class CircularListElement { public: T& GetData() { return m_data; } const T& GetData() const { return m_data; } CircularListElement<T>*& GetNext() { return m_next; } CircularListElement<T>*& GetPrev() { return m_prev; } const CircularListElement<T>*& GetNext() const { return m_next; } const CircularListElement<T>*& GetPrev() const { return m_prev; } //! Constructor CircularListElement(const T& data) { m_data = data; } CircularListElement(void) {} //! Destructor ~CircularListElement(void) {} private: T m_data; CircularListElement<T>* m_next; CircularListElement<T>* m_prev; CircularListElement(const CircularListElement& rhs); }; //! CircularList class. template <typename T> class CircularList { public: CircularListElement<T>*& GetHead() { return m_head; } const CircularListElement<T>* GetHead() const { return m_head; } bool IsEmpty() const { return (m_size == 0); } size_t GetSize() const { return m_size; } const T& GetData() const { return m_head->GetData(); } T& GetData() { return m_head->GetData(); } bool Delete(); bool Delete(CircularListElement<T>* element); CircularListElement<T>* Add(const T* data = 0); CircularListElement<T>* Add(const T& data); bool Next(); bool Prev(); void Clear() { while (Delete()) ; }; const CircularList& operator=(const CircularList& rhs); //! Constructor CircularList() { m_head = 0; m_size = 0; } CircularList(const CircularList& rhs); //! Destructor ~CircularList(void) { Clear(); }; private: CircularListElement<T>* m_head; //!< a pointer to the head of the circular list size_t m_size; //!< number of element in the circular list }; } #include "vhacdCircularList.inl" #endif // VHACD_CIRCULAR_LIST_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedObjectArray.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_OBJECT_ARRAY__ #define BT_OBJECT_ARRAY__ #include "btAlignedAllocator.h" #include "btScalar.h" // has definitions like SIMD_FORCE_INLINE ///If the platform doesn't support placement new, you can disable BT_USE_PLACEMENT_NEW ///then the btAlignedObjectArray doesn't support objects with virtual methods, and non-trivial constructors/destructors ///You can enable BT_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator= ///see discussion here: http://continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1231 and ///http://www.continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1240 #define BT_USE_PLACEMENT_NEW 1 //#define BT_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise... #define BT_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful #ifdef BT_USE_MEMCPY #include <memory.h> #include <string.h> #endif //BT_USE_MEMCPY #ifdef BT_USE_PLACEMENT_NEW #include <new> //for placement new #endif //BT_USE_PLACEMENT_NEW ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template <typename T> //template <class T> class btAlignedObjectArray { btAlignedAllocator<T, 16> m_allocator; int32_t m_size; int32_t m_capacity; T* m_data; //PCK: added this line bool m_ownsMemory; #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR public: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other) { copyFromArray(other); return *this; } #else //BT_ALLOW_ARRAY_COPY_OPERATOR private: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other); #endif //BT_ALLOW_ARRAY_COPY_OPERATOR protected: SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { return (size ? size * 2 : 1); } SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const { int32_t i; for (i = start; i < end; ++i) #ifdef BT_USE_PLACEMENT_NEW new (&dest[i]) T(m_data[i]); #else dest[i] = m_data[i]; #endif //BT_USE_PLACEMENT_NEW } SIMD_FORCE_INLINE void init() { //PCK: added this line m_ownsMemory = true; m_data = 0; m_size = 0; m_capacity = 0; } SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { int32_t i; for (i = first; i < last; i++) { m_data[i].~T(); } } SIMD_FORCE_INLINE void* allocate(int32_t size) { if (size) return m_allocator.allocate(size); return 0; } SIMD_FORCE_INLINE void deallocate() { if (m_data) { //PCK: enclosed the deallocation in this block if (m_ownsMemory) { m_allocator.deallocate(m_data); } m_data = 0; } } public: btAlignedObjectArray() { init(); } ~btAlignedObjectArray() { clear(); } ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. btAlignedObjectArray(const btAlignedObjectArray& otherArray) { init(); int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } /// return the number of elements in the array SIMD_FORCE_INLINE int32_t size() const { return m_size; } SIMD_FORCE_INLINE const T& at(int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& at(int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE const T& operator[](int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& operator[](int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void clear() { destroy(0, size()); deallocate(); init(); } SIMD_FORCE_INLINE void pop_back() { btAssert(m_size > 0); m_size--; m_data[m_size].~T(); } ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T()) { int32_t curSize = size(); if (newsize < curSize) { for (int32_t i = newsize; i < curSize; i++) { m_data[i].~T(); } } else { if (newsize > size()) { reserve(newsize); } #ifdef BT_USE_PLACEMENT_NEW for (int32_t i = curSize; i < newsize; i++) { new (&m_data[i]) T(fillData); } #endif //BT_USE_PLACEMENT_NEW } m_size = newsize; } SIMD_FORCE_INLINE T& expandNonInitializing() { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; return m_data[sz]; } SIMD_FORCE_INLINE T& expand(const T& fillValue = T()) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; #ifdef BT_USE_PLACEMENT_NEW new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) #endif return m_data[sz]; } SIMD_FORCE_INLINE void push_back(const T& _Val) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } #ifdef BT_USE_PLACEMENT_NEW new (&m_data[m_size]) T(_Val); #else m_data[size()] = _Val; #endif //BT_USE_PLACEMENT_NEW m_size++; } /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() SIMD_FORCE_INLINE int32_t capacity() const { return m_capacity; } SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage if (capacity() < _Count) { // not enough room, reallocate T* s = (T*)allocate(_Count); copy(0, size(), s); destroy(0, size()); deallocate(); //PCK: added this line m_ownsMemory = true; m_data = s; m_capacity = _Count; } } class less { public: bool operator()(const T& a, const T& b) { return (a < b); } }; template <typename L> void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi) { // lo is the lower index, hi is the upper index // of the region of array a that is to be sorted int32_t i = lo, j = hi; T x = m_data[(lo + hi) / 2]; // partition do { while (CompareFunc(m_data[i], x)) i++; while (CompareFunc(x, m_data[j])) j--; if (i <= j) { swap(i, j); i++; j--; } } while (i <= j); // recursion if (lo < j) quickSortInternal(CompareFunc, lo, j); if (i < hi) quickSortInternal(CompareFunc, i, hi); } template <typename L> void quickSort(const L& CompareFunc) { //don't sort 0 or 1 elements if (size() > 1) { quickSortInternal(CompareFunc, 0, size() - 1); } } ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ template <typename L> void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc) { /* PRE: a[k+1..N] is a heap */ /* POST: a[k..N] is a heap */ T temp = pArr[k - 1]; /* k has child(s) */ while (k <= n / 2) { int32_t child = 2 * k; if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { child++; } /* pick larger child */ if (CompareFunc(temp, pArr[child - 1])) { /* move child up */ pArr[k - 1] = pArr[child - 1]; k = child; } else { break; } } pArr[k - 1] = temp; } /*downHeap*/ void swap(int32_t index0, int32_t index1) { #ifdef BT_USE_MEMCPY char temp[sizeof(T)]; memcpy(temp, &m_data[index0], sizeof(T)); memcpy(&m_data[index0], &m_data[index1], sizeof(T)); memcpy(&m_data[index1], temp, sizeof(T)); #else T temp = m_data[index0]; m_data[index0] = m_data[index1]; m_data[index1] = temp; #endif //BT_USE_PLACEMENT_NEW } template <typename L> void heapSort(const L& CompareFunc) { /* sort a[0..N-1], N.B. 0 to N-1 */ int32_t k; int32_t n = m_size; for (k = n / 2; k > 0; k--) { downHeap(m_data, k, n, CompareFunc); } /* a[1..N] is now a heap */ while (n >= 1) { swap(0, n - 1); /* largest of a[0..n-1] */ n = n - 1; /* restore a[1..i-1] heap */ downHeap(m_data, 1, n, CompareFunc); } } ///non-recursive binary search, assumes sorted array int32_t findBinarySearch(const T& key) const { int32_t first = 0; int32_t last = size() - 1; //assume sorted array while (first <= last) { int32_t mid = (first + last) / 2; // compute mid point. if (key > m_data[mid]) first = mid + 1; // repeat search in top half. else if (key < m_data[mid]) last = mid - 1; // repeat search in bottom half. else return mid; // found it. return position ///// } return size(); // failed to find key } int32_t findLinearSearch(const T& key) const { int32_t index = size(); int32_t i; for (i = 0; i < size(); i++) { if (m_data[i] == key) { index = i; break; } } return index; } void remove(const T& key) { int32_t findIndex = findLinearSearch(key); if (findIndex < size()) { swap(findIndex, size() - 1); pop_back(); } } //PCK: whole function void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity) { clear(); m_ownsMemory = false; m_data = (T*)buffer; m_size = size; m_capacity = capacity; } void copyFromArray(const btAlignedObjectArray& otherArray) { int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } }; #endif //BT_OBJECT_ARRAY__
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdICHull.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_ICHULL_H #define VHACD_ICHULL_H #include "vhacdManifoldMesh.h" #include "vhacdVector.h" namespace VHACD { //! Incremental Convex Hull algorithm (cf. http://cs.smith.edu/~orourke/books/ftp.html ). enum ICHullError { ICHullErrorOK = 0, ICHullErrorCoplanarPoints, ICHullErrorNoVolume, ICHullErrorInconsistent, ICHullErrorNotEnoughPoints }; class ICHull { public: static const double sc_eps; //! bool IsFlat() { return m_isFlat; } //! Returns the computed mesh TMMesh& GetMesh() { return m_mesh; } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point) { return AddPoints(&point, 1); } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point, int32_t id); //! Add points to the convex-hull bool AddPoints(const Vec3<double>* points, size_t nPoints); //! ICHullError Process(); //! ICHullError Process(const uint32_t nPointsCH, const double minVolume = 0.0); //! bool IsInside(const Vec3<double>& pt0, const double eps = 0.0); //! const ICHull& operator=(ICHull& rhs); //! Constructor ICHull(); //! Destructor ~ICHull(void){}; private: //! DoubleTriangle builds the initial double triangle. It first finds 3 noncollinear points and makes two faces out of them, in opposite order. It then finds a fourth point that is not coplanar with that face. The vertices are stored in the face structure in counterclockwise order so that the volume between the face and the point is negative. Lastly, the 3 newfaces to the fourth point are constructed and the data structures are cleaned up. ICHullError DoubleTriangle(); //! MakeFace creates a new face structure from three vertices (in ccw order). It returns a pointer to the face. CircularListElement<TMMTriangle>* MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold); //! CircularListElement<TMMTriangle>* MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); //! bool ProcessPoint(); //! bool ComputePointVolume(double& totalVolume, bool markVisibleFaces); //! bool FindMaxVolumePoint(const double minVolume = 0.0); //! bool CleanEdges(); //! bool CleanVertices(uint32_t& addedPoints); //! bool CleanTriangles(); //! bool CleanUp(uint32_t& addedPoints); //! bool MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); void Clear(); private: static const int32_t sc_dummyIndex; TMMesh m_mesh; SArray<CircularListElement<TMMEdge>*> m_edgesToDelete; SArray<CircularListElement<TMMEdge>*> m_edgesToUpdate; SArray<CircularListElement<TMMTriangle>*> m_trianglesToDelete; Vec3<double> m_normal; bool m_isFlat; ICHull(const ICHull& rhs); }; } #endif // VHACD_ICHULL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/public/VHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_H #define VHACD_H #define VHACD_VERSION_MAJOR 2 #define VHACD_VERSION_MINOR 3 // Changes for version 2.3 // // m_gamma : Has been removed. This used to control the error metric to merge convex hulls. Now it uses the 'm_maxConvexHulls' value instead. // m_maxConvexHulls : This is the maximum number of convex hulls to produce from the merge operation; replaces 'm_gamma'. // // Note that decomposition depth is no longer a user provided value. It is now derived from the // maximum number of hulls requested. // // As a convenience to the user, each convex hull produced now includes the volume of the hull as well as it's center. // // This version supports a convenience method to automatically make V-HACD run asynchronously in a background thread. // To get a fully asynchronous version, call 'CreateVHACD_ASYNC' instead of 'CreateVHACD'. You get the same interface however, // now when computing convex hulls, it is no longer a blocking operation. All callback messages are still returned // in the application's thread so you don't need to worry about mutex locks or anything in that case. // To tell if the operation is complete, the application should call 'IsReady'. This will return true if // the last approximation operation is complete and will dispatch any pending messages. // If you call 'Compute' while a previous operation was still running, it will automatically cancel the last request // and begin a new one. To cancel a currently running approximation just call 'Cancel'. #include <stdint.h> namespace VHACD { class IVHACD { public: class IUserCallback { public: virtual ~IUserCallback(){}; virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) = 0; }; class IUserLogger { public: virtual ~IUserLogger(){}; virtual void Log(const char* const msg) = 0; }; class ConvexHull { public: double* m_points; uint32_t* m_triangles; uint32_t m_nPoints; uint32_t m_nTriangles; double m_volume; double m_center[3]; }; class Parameters { public: Parameters(void) { Init(); } void Init(void) { m_resolution = 100000; m_concavity = 0.001; m_planeDownsampling = 4; m_convexhullDownsampling = 4; m_alpha = 0.05; m_beta = 0.05; m_pca = 0; m_mode = 0; // 0: voxel-based (recommended), 1: tetrahedron-based m_maxNumVerticesPerCH = 64; m_minVolumePerCH = 0.0001; m_callback = 0; m_logger = 0; m_convexhullApproximation = true; m_oclAcceleration = true; m_maxConvexHulls = 1024; m_projectHullVertices = true; // This will project the output convex hull vertices onto the original source mesh to increase the floating point accuracy of the results } double m_concavity; double m_alpha; double m_beta; double m_minVolumePerCH; IUserCallback* m_callback; IUserLogger* m_logger; uint32_t m_resolution; uint32_t m_maxNumVerticesPerCH; uint32_t m_planeDownsampling; uint32_t m_convexhullDownsampling; uint32_t m_pca; uint32_t m_mode; uint32_t m_convexhullApproximation; uint32_t m_oclAcceleration; uint32_t m_maxConvexHulls; bool m_projectHullVertices; }; class Constraint { public: uint32_t mHullA; // Convex Hull A index uint32_t mHullB; // Convex Hull B index double mConstraintPoint[3]; // The point of intersection between the two convex hulls }; virtual void Cancel() = 0; virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual bool Compute(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual uint32_t GetNConvexHulls() const = 0; virtual void GetConvexHull(const uint32_t index, ConvexHull& ch) const = 0; virtual void Clean(void) = 0; // release internally allocated memory virtual void Release(void) = 0; // release IVHACD virtual bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0) = 0; virtual bool OCLRelease(IUserLogger* const logger = 0) = 0; // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const = 0; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) = 0; // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const = 0; // In synchronous mode (non-multi-threaded) the state is always 'ready' // In asynchronous mode, this returns true if the background thread is not still actively computing // a new solution. In an asynchronous config the 'IsReady' call will report any update or log // messages in the caller's current thread. virtual bool IsReady(void) const { return true; } protected: virtual ~IVHACD(void) {} }; IVHACD* CreateVHACD(void); IVHACD* CreateVHACD_ASYNC(void); } #endif // VHACD_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAssetUtils.h" #include "NvBlast.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" #include "NvBlastGlobals.h" #include "math.h" using namespace Nv::Blast; /** Fill the chunk and bond descriptors from an asset. \param[out] chunkDescsWritten the number of chunk descriptors written to chunkDescs \param[out] bondDescsWritten the number of bond descriptors written to bondDescs \param[out] chunkDescs user-supplied buffer of NvBlastChunkDesc. Size must be at least NvBlastAssetGetChunkCount(asset, logFn) \param[out] bondDescs user-supplied buffer of NvBlastBondDesc. Size must be at least NvBlastAssetGetBondCount(asset, logFn) \param[in] asset asset from which to extract descriptors */ static void fillChunkAndBondDescriptorsFromAsset ( uint32_t& chunkDescsWritten, uint32_t& bondDescsWritten, NvBlastChunkDesc* chunkDescs, NvBlastBondDesc* bondDescs, const NvBlastAsset* asset ) { chunkDescsWritten = 0; bondDescsWritten = 0; // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* assetChunk = NvBlastAssetGetChunks(asset, logLL); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { NvBlastChunkDesc& chunkDesc = chunkDescs[chunkDescsWritten++]; memcpy(chunkDesc.centroid, assetChunk->centroid, sizeof(float) * 3); chunkDesc.volume = assetChunk->volume; chunkDesc.parentChunkDescIndex = assetChunk->parentChunkIndex; chunkDesc.flags = 0; // To be filled in below chunkDesc.userData = assetChunk->userData; } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); const NvBlastBond* assetBond = NvBlastAssetGetBonds(asset, logLL); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { NvBlastBondDesc& bondDesc = bondDescs[bondDescsWritten++]; memcpy(&bondDesc.bond, assetBond, sizeof(NvBlastBond)); } // Walk the graph and restore connection descriptors const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); for (uint32_t i = 0; i < graph.nodeCount; ++i) { const int32_t currentChunk = graph.chunkIndices[i]; if (isInvalidIndex(currentChunk)) { continue; } chunkDescs[currentChunk].flags |= NvBlastChunkDesc::SupportFlag; // Filling in chunk flags here for (uint32_t j = graph.adjacencyPartition[i]; j < graph.adjacencyPartition[i + 1]; ++j) { NvBlastBondDesc& bondDesc = bondDescs[graph.adjacentBondIndices[j]]; bondDesc.chunkIndices[0] = currentChunk; const uint32_t adjacentChunkIndex = graph.chunkIndices[graph.adjacentNodeIndices[j]]; bondDesc.chunkIndices[1] = adjacentChunkIndex; } } } /** Scale a 3-vector v in-place. \param[in,out] v The vector to scale. \param[in] s The scale. Represents the diagonal elements of a diagonal matrix. The result will be v <- s*v. */ static inline void scale(NvcVec3& v, const NvcVec3& s) { v.x *= s.x; v.y *= s.y; v.z *= s.z; } /** Rotate a 3-vector v in-place using a rotation represented by a quaternion q. \param[in,out] v The vector to rotate. \param[in] q The quaternion representation the rotation. The format of q is { x, y, z, w } where (x,y,z) is the vector part and w is the scalar part. The quaternion q MUST be normalized. */ static inline void rotate(NvcVec3& v, const NvcQuat& q) { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = q.w * q.w - 0.5f; const float dot2 = (q.x * vx + q.y * vy + q.z * vz); v.x = vx * w2 + (q.y * vz - q.z * vy) * q.w + q.x * dot2; v.y = vy * w2 + (q.z * vx - q.x * vz) * q.w + q.y * dot2; v.z = vz * w2 + (q.x * vy - q.y * vx) * q.w + q.z * dot2; } /** Translate a 3-vector v in-place. \param[in,out] v The vector to translate. \param[in] t The translation. The result will be v <- v+t. */ static inline void translate(NvcVec3& v, const NvcVec3& t) { v.x += t.x; v.y += t.y; v.z += t.z; } NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds ( const NvBlastAsset* asset, const uint32_t* externalBoundChunks, uint32_t externalBoundChunkCount, const NvcVec3* bondDirections, const uint32_t* bondUserData ) { const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const uint32_t oldBondCount = NvBlastAssetGetBondCount(asset, logLL); const uint32_t newBondCount = oldBondCount + externalBoundChunkCount; NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(newBondCount * sizeof(NvBlastBondDesc))); // Create chunk descs uint32_t chunkDescsWritten; uint32_t bondDescsWritten; fillChunkAndBondDescriptorsFromAsset(chunkDescsWritten, bondDescsWritten, chunkDescs, bondDescs, asset); // Add world bonds uint32_t bondCount = oldBondCount; for (uint32_t i = 0; i < externalBoundChunkCount; i++) { NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; const uint32_t chunkIndex = externalBoundChunks[i]; bondDesc.chunkIndices[0] = chunkIndex; bondDesc.chunkIndices[1] = invalidIndex<uint32_t>(); memcpy(&bondDesc.bond.normal, bondDirections + i, sizeof(float) * 3); bondDesc.bond.area = 1.0f; // Should be set by user memcpy(&bondDesc.bond.centroid, chunkDescs[chunkIndex].centroid, sizeof(float) * 3); bondDesc.bond.userData = bondUserData != nullptr ? bondUserData[i] : 0; } // Create new asset NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; void* scratch = NVBLAST_ALLOC(NvBlastGetRequiredScratchForCreateAsset(&assetDesc, logLL)); NvBlastAsset* newAsset = NvBlastCreateAsset(NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&assetDesc, logLL)), &assetDesc, scratch, logLL); // Free buffers NVBLAST_FREE(scratch); NVBLAST_FREE(bondDescs); NVBLAST_FREE(chunkDescs); return newAsset; } NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset) { return NvBlastExtAssetUtilsMergeAssets(&asset, nullptr, nullptr, nullptr, 1, nullptr, 0, nullptr, nullptr, 0); } NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, uint32_t componentCount, const NvBlastExtAssetUtilsBondDesc* newBondDescs, uint32_t newBondCount, uint32_t* chunkIndexOffsets, uint32_t* chunkReorderMap, uint32_t chunkReorderMapSize ) { // Count the total number of chunks and bonds in the new asset uint32_t totalChunkCount = 0; uint32_t totalBondCount = newBondCount; for (uint32_t c = 0; c < componentCount; ++c) { totalChunkCount += NvBlastAssetGetChunkCount(components[c], logLL); totalBondCount += NvBlastAssetGetBondCount(components[c], logLL); } // Allocate space for chunk and bond descriptors NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(totalChunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(totalBondCount * sizeof(NvBlastBondDesc))); // Create a list of chunk index offsets per component uint32_t* offsetStackAlloc = static_cast<uint32_t*>(NvBlastAlloca(componentCount * sizeof(uint32_t))); if (chunkIndexOffsets == nullptr) { chunkIndexOffsets = offsetStackAlloc; // Use local stack alloc if no array is provided } // Fill the chunk and bond descriptors from the components uint32_t chunkCount = 0; uint32_t bondCount = 0; for (uint32_t c = 0; c < componentCount; ++c) { chunkIndexOffsets[c] = chunkCount; uint32_t componentChunkCount; uint32_t componentBondCount; fillChunkAndBondDescriptorsFromAsset(componentChunkCount, componentBondCount, chunkDescs + chunkCount, bondDescs + bondCount, components[c]); // Fix chunks' parent indices for (uint32_t i = 0; i < componentChunkCount; ++i) { if (!isInvalidIndex(chunkDescs[chunkCount + i].parentChunkDescIndex)) { chunkDescs[chunkCount + i].parentChunkDescIndex += chunkCount; } } // Fix bonds' chunk indices for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[bondCount + i]; for (int j = 0; j < 2; ++j) { if (!isInvalidIndex(bondDesc.chunkIndices[j])) { bondDesc.chunkIndices[j] += chunkCount; } } } // Transform geometric data if (scales != nullptr) { const NvcVec3& S = scales[c]; NvcVec3 cofS = { S.y * S.z, S.z * S.x, S.x * S.y }; float absDetS = S.x * S.y * S.z; const float sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; for (uint32_t i = 0; i < componentChunkCount; ++i) { scale(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), S); chunkDescs[chunkCount + i].volume *= absDetS; } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; scale(reinterpret_cast<NvcVec3&>(bond.normal), cofS); float renorm = sqrtf(bond.normal[0] * bond.normal[0] + bond.normal[1] * bond.normal[1] + bond.normal[2] * bond.normal[2]); bond.area *= renorm; if (renorm != 0) { renorm = sgnDetS / renorm; bond.normal[0] *= renorm; bond.normal[1] *= renorm; bond.normal[2] *= renorm; } scale(reinterpret_cast<NvcVec3&>(bond.centroid), S); } } if (rotations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { rotate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), rotations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; rotate(reinterpret_cast<NvcVec3&>(bond.normal), rotations[c]); // Normal can be transformed this way since we aren't scaling rotate(reinterpret_cast<NvcVec3&>(bond.centroid), rotations[c]); } } if (translations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { translate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), translations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { translate(reinterpret_cast<NvcVec3&>(bondDescs[bondCount + i].bond.centroid), translations[c]); } } chunkCount += componentChunkCount; bondCount += componentBondCount; } // Fill the bond descriptors from the new bond descs for (uint32_t b = 0; b < newBondCount; ++b) { const NvBlastExtAssetUtilsBondDesc& newBondDesc = newBondDescs[b]; NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; memcpy(&bondDesc.bond, &newBondDesc.bond, sizeof(NvBlastBond)); bondDesc.chunkIndices[0] = !isInvalidIndex(newBondDesc.chunkIndices[0]) ? newBondDesc.chunkIndices[0] + chunkIndexOffsets[newBondDesc.componentIndices[0]] : invalidIndex<uint32_t>(); bondDesc.chunkIndices[1] = !isInvalidIndex(newBondDesc.chunkIndices[1]) ? newBondDesc.chunkIndices[1] + chunkIndexOffsets[newBondDesc.componentIndices[1]] : invalidIndex<uint32_t>(); } // Create new asset desriptor NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; // Massage the descriptors so that they are valid for scratch creation void* scratch = NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc)); // Enough for NvBlastEnsureAssetExactSupportCoverage and NvBlastReorderAssetDescChunks NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); if (chunkReorderMapSize < chunkCount) { if (chunkReorderMap != nullptr) { // Chunk reorder map is not large enough. Fill it with invalid indices and don't use it. memset(chunkReorderMap, 0xFF, chunkReorderMapSize * sizeof(uint32_t)); NVBLAST_LOG_WARNING("NvBlastExtAssetUtilsMergeAssets: insufficient chunkReorderMap array passed in. NvBlastReorderAssetDescChunks will not be used."); } chunkReorderMap = nullptr; // Don't use } if (chunkReorderMap != nullptr) { NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, true, scratch, logLL); } NVBLAST_FREE(scratch); return assetDesc; } /** Multiply a 3-vector v in-place by value. \param[in,out] v The vector to multiply. \param[in] m The 3x3 matrix. */ static inline void multiply(NvcVec3& v, float value) { v.x *= value; v.y *= value; v.z *= value; } /** Get Vec3 length */ static inline float length(const NvcVec3& p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } /** Transform a point in-place: scale, rotate, then translate \param[in,out] p The point to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. \param[in] T The translation vector. */ static inline void transform(NvcVec3& p, const NvcVec3& S, const NvcQuat& R, const NvcVec3& T) { scale(p, S); rotate(p, R); translate(p, T); } /** Transform a vector in-place: scale, then rotate \param[in,out] v The vector to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. */ static inline void transform(NvcVec3& v, const NvcVec3& S, const NvcQuat& R) { scale(v, S); rotate(v, R); } void NvBlastExtAssetTransformInPlace(NvBlastAsset* asset, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { // Local copies of scaling (S), rotation (R), and translation (T) NvcVec3 S = { 1, 1, 1 }; NvcQuat R = { 0, 0, 0, 1 }; NvcVec3 T = { 0, 0, 0 }; NvcVec3 cofS = { 1, 1, 1 }; float absDetS = 1; float sgnDetS = 1; { if (rotation) { R = *rotation; } if (scaling) { S = *scaling; cofS.x = S.y * S.z; cofS.y = S.z * S.x; cofS.z = S.x * S.y; absDetS = S.x * S.y * S.z; sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; } if (translation) { T = *translation; } } // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); NvBlastChunk* assetChunk = const_cast<NvBlastChunk*>(NvBlastAssetGetChunks(asset, logLL)); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { transform(reinterpret_cast<NvcVec3&>(assetChunk->centroid), S, R, T); assetChunk->volume *= absDetS; // Use |detS| to keep the volume positive } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); NvBlastBond* assetBond = const_cast<NvBlastBond*>(NvBlastAssetGetBonds(asset, logLL)); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { transform(reinterpret_cast<NvcVec3&>(assetBond->centroid), S, R, T); NvcVec3& normal = reinterpret_cast<NvcVec3&>(assetBond->normal); transform(normal, cofS, R); const float l = length(normal); assetBond->area *= l; multiply(normal, l > 0.f ? sgnDetS / l : 1.f); } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/stress/NvBlastExtStressSolver.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtStressSolver.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NsFPU.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "stress.h" #include "buffer.h" #include "simd/simd_device_query.h" #include <algorithm> #define USE_SCALAR_IMPL 0 #define WARM_START 1 #define GRAPH_INTERGRIRY_CHECK 0 #if GRAPH_INTERGRIRY_CHECK #include <set> #endif namespace Nv { namespace Blast { using namespace nvidia; static_assert(sizeof(NvVec3) == sizeof(NvcVec3), "sizeof(NvVec3) must equal sizeof(NvcVec3)."); static_assert(offsetof(NvVec3, x) == offsetof(NvcVec3, x) && offsetof(NvVec3, y) == offsetof(NvcVec3, y) && offsetof(NvVec3, z) == offsetof(NvcVec3, z), "Elements of NvVec3 and NvcVec3 must have the same struct offset."); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Conjugate Gradient Solver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// class ConjugateGradientImpulseSolver { public: ConjugateGradientImpulseSolver(uint32_t nodeCount, uint32_t maxBondCount) { m_bonds.reserve(maxBondCount); m_impulses.reserve(maxBondCount); reset(nodeCount); } void getBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { NVBLAST_ASSERT(bond < m_impulses.size()); const AngLin6& f = m_impulses[bond]; *(NvcVec3*)&impulseAngular = f.ang; *(NvcVec3*)&impulseLinear = f.lin; } void getBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { NVBLAST_ASSERT(bond < m_bonds.size()); const SolverBond& b = m_bonds[bond]; node0 = b.nodes[0]; node1 = b.nodes[1]; } uint32_t getBondCount() const { return m_bonds.size(); } uint32_t getNodeCount() const { return m_nodes.size(); } void setNodeMassInfo(uint32_t node, const NvVec3& CoM, float mass, float inertia) { NVBLAST_ASSERT(node < m_nodes.size()); SolverNodeS& n = m_nodes[node]; n.CoM = { CoM.x, CoM.y, CoM.z }; n.mass = std::max(mass, 0.0f); // No negative masses, but 0 is meaningful (== infinite) n.inertia = std::max(inertia, 0.0f); // Ditto for inertia m_forceColdStart = true; } void initialize() { StressProcessor::DataParams params; params.centerBonds = true; params.equalizeMasses = true; m_stressProcessor.prepare(m_nodes.begin(), m_nodes.size(), m_bonds.begin(), m_bonds.size(), params); } void setNodeVelocities(uint32_t node, const NvVec3& velocityLinear, const NvVec3& velocityAngular) { NVBLAST_ASSERT(node < m_velocities.size()); AngLin6& v = m_velocities[node]; v.ang = { velocityAngular.x, velocityAngular.y, velocityAngular.z }; v.lin = { velocityLinear.x, velocityLinear.y, velocityLinear.z }; m_inputsChanged = true; } uint32_t addBond(uint32_t node0, uint32_t node1, const NvVec3& bondCentroid) { SolverBond b; b.nodes[0] = node0; b.nodes[1] = node1; b.centroid = { bondCentroid.x, bondCentroid.y, bondCentroid.z }; m_bonds.pushBack(b); m_impulses.push_back({{0,0,0},{0,0,0}}); m_forceColdStart = true; return m_bonds.size() - 1; } void replaceWithLast(uint32_t bondIndex) { m_bonds.replaceWithLast(bondIndex); if ((size_t)bondIndex + 2 < m_impulses.size()) { m_impulses[bondIndex] = m_impulses.back(); m_impulses.resize(m_impulses.size() - 1); } m_stressProcessor.removeBond(bondIndex); } void reset(uint32_t nodeCount) { m_nodes.resize(nodeCount); memset(m_nodes.begin(), 0, sizeof(SolverNodeS)*nodeCount); m_velocities.resize(nodeCount); memset(m_velocities.data(), 0, sizeof(AngLin6)*nodeCount); clearBonds(); m_error_sq = {FLT_MAX, FLT_MAX}; m_converged = false; m_forceColdStart = true; m_inputsChanged = true; } void clearBonds() { m_bonds.clear(); m_impulses.resize(0); m_forceColdStart = true; } void solve(uint32_t iterationCount, bool warmStart = true) { StressProcessor::SolverParams params; params.maxIter = iterationCount; params.tolerance = 0.001f; params.warmStart = warmStart && !m_forceColdStart; m_converged = (m_stressProcessor.solve(m_impulses.data(), m_velocities.data(), params, &m_error_sq) >= 0); m_forceColdStart = false; m_inputsChanged = false; } bool calcError(float& linear, float& angular) const { linear = sqrtf(m_error_sq.lin); angular = sqrtf(m_error_sq.ang); return m_converged; } private: Array<SolverNodeS>::type m_nodes; Array<SolverBond>::type m_bonds; StressProcessor m_stressProcessor; POD_Buffer<AngLin6> m_velocities; POD_Buffer<AngLin6> m_impulses; AngLin6ErrorSq m_error_sq; bool m_converged; bool m_forceColdStart; bool m_inputsChanged; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Graph Processor /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #if GRAPH_INTERGRIRY_CHECK #define CHECK_GRAPH_INTEGRITY checkGraphIntegrity() #else #define CHECK_GRAPH_INTEGRITY ((void)0) #endif class SupportGraphProcessor { public: struct BondData { uint32_t node0; uint32_t node1; uint32_t blastBondIndex; // linear stresses float stressNormal; // negative values represent compression pressure, positive represent tension float stressShear; // The normal used to compute stress values // Can be different than the bond normal if graph reduction is used // and multiple bonds are grouped together nvidia::NvVec3 normal; // Centroid used to compute node offsets, instead of assuming the bond is halfway between node positions. // This also allows the bonds to the world node to be drawn nvidia::NvVec3 centroid; }; struct NodeData { float mass; float volume; NvVec3 localPos; NvVec3 localVel; uint32_t solverNode; uint32_t neighborsCount; }; struct SolverNodeData { uint32_t supportNodesCount; NvVec3 localPos; union { float mass; int32_t indexShift; }; float volume; }; struct SolverBondData { InlineArray<uint32_t, 8>::type blastBondIndices; }; SupportGraphProcessor(uint32_t nodeCount, uint32_t maxBondCount) : m_solver(nodeCount, maxBondCount), m_nodesDirty(true), m_bondsDirty(true) { m_nodesData.resize(nodeCount); m_bondsData.reserve(maxBondCount); m_solverNodesData.resize(nodeCount); m_solverBondsData.reserve(maxBondCount); m_solverBondsMap.reserve(maxBondCount); m_blastBondIndexMap.resize(maxBondCount); memset(m_blastBondIndexMap.begin(), 0xFF, m_blastBondIndexMap.size() * sizeof(uint32_t)); resetVelocities(); } const NodeData& getNodeData(uint32_t node) const { return m_nodesData[node]; } const BondData& getBondData(uint32_t bond) const { return m_bondsData[bond]; } const SolverNodeData& getSolverNodeData(uint32_t node) const { return m_solverNodesData[node]; } const SolverBondData& getSolverBondData(uint32_t bond) const { return m_solverBondsData[bond]; } void getSolverInternalBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { m_solver.getBondImpulses(bond, impulseLinear, impulseAngular); } void getSolverInternalBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { m_solver.getBondNodes(bond, node0, node1); } uint32_t getBondCount() const { return m_bondsData.size(); } uint32_t getNodeCount() const { return m_nodesData.size();; } uint32_t getSolverBondCount() const { return m_solverBondsData.size(); } uint32_t getSolverNodeCount() const { return m_solverNodesData.size();; } uint32_t getOverstressedBondCount() const { return m_overstressedBondCount; } void calcSolverBondStresses( uint32_t bondIdx, float bondArea, float nodeDist, const nvidia::NvVec3& bondNormal, float& stressNormal, float& stressShear) const { if (!canTakeDamage(bondArea)) { stressNormal = stressShear = 0.0f; return; } // impulseLinear in the direction of the bond normal is stressNormal, perpendicular is stressShear // ignore impulseAngular for now, not sure how to account for that // convert to pressure to factor out area NvVec3 impulseLinear, impulseAngular; getSolverInternalBondImpulses(bondIdx, impulseLinear, impulseAngular); const float normalComponentLinear = impulseLinear.dot(bondNormal); stressNormal = normalComponentLinear / bondArea; const float impulseLinearMagSqr = impulseLinear.magnitudeSquared(); stressShear = sqrtf(impulseLinearMagSqr - normalComponentLinear * normalComponentLinear) / bondArea; // impulseAngular in the direction of the bond normal is twist, perpendicular is bend // take abs() of the dot product because only the magnitude of the twist matters, not direction const float normalComponentAngular = abs(impulseAngular.dot(bondNormal)); const float twist = normalComponentAngular / bondArea; const float impulseAngularMagSqr = impulseAngular.magnitudeSquared(); const float bend = sqrtf(impulseAngularMagSqr - normalComponentAngular * normalComponentAngular) / bondArea; // interpret angular pressure as a composition of linear pressures // dividing by nodeDist for scaling const float twistContribution = twist * 2.0f / nodeDist; stressShear += twistContribution; const float bendContribution = bend * 2.0f / nodeDist; stressNormal += copysignf(bendContribution, stressNormal); } float mapStressToRange(float stress, float elasticLimit, float fatalLimit) const { if (stress < elasticLimit) { return 0.5f * stress / elasticLimit; } else { return fatalLimit > elasticLimit ? 0.5f + 0.5f * (stress - elasticLimit) / (fatalLimit - elasticLimit) : 1.0f; } } float getSolverBondStressPct(uint32_t bondIdx, const float* bondHealths, const ExtStressSolverSettings& settings, ExtStressSolver::DebugRenderMode mode) const { // sum up the stress of all underlying bonds involved in this stress solver bond float compressionStress, tensionStress, shearStress; float stress = -1.0f; const auto& blastBondIndices = m_solverBondsData[bondIdx].blastBondIndices; for (const auto blastBondIndex : blastBondIndices) { // only consider the stress values on bonds that are intact if (bondHealths[blastBondIndex] > 0.0f && getBondStress(blastBondIndex, compressionStress, tensionStress, shearStress)) { if (mode == ExtStressSolver::STRESS_PCT_COMPRESSION || mode == ExtStressSolver::STRESS_PCT_MAX) { compressionStress = mapStressToRange(compressionStress, settings.compressionElasticLimit, settings.compressionFatalLimit); stress = std::max(compressionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_TENSION || mode == ExtStressSolver::STRESS_PCT_MAX) { tensionStress = mapStressToRange(tensionStress, settings.tensionElasticLimit, settings.tensionFatalLimit); stress = std::max(tensionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_SHEAR || mode == ExtStressSolver::STRESS_PCT_MAX) { shearStress = mapStressToRange(shearStress, settings.shearElasticLimit, settings.shearFatalLimit); stress = std::max(shearStress, stress); } // all bonds in the group share the same stress values, no need to keep iterating break; } } // return a value < 0.0f if all bonds are broken return stress; } void setNodeInfo(uint32_t node, float mass, float volume, NvVec3 localPos) { m_nodesData[node].mass = mass; m_nodesData[node].volume = volume; m_nodesData[node].localPos = localPos; m_nodesDirty = true; } void setNodeNeighborsCount(uint32_t node, uint32_t neighborsCount) { // neighbors count is expected to be the number of nodes on 1 island/actor. m_nodesData[node].neighborsCount = neighborsCount; // check for too huge aggregates (happens after island's split) if (!m_nodesDirty) { m_nodesDirty |= (m_solverNodesData[m_nodesData[node].solverNode].supportNodesCount > neighborsCount / 2); } } void addNodeForce(uint32_t node, const NvVec3& force, ExtForceMode::Enum mode) { const float mass = m_nodesData[node].mass; if (mass > 0) { // NOTE - passing in acceleration as velocity. The impulse solver's output will be interpreted as force. m_nodesData[node].localVel += (mode == ExtForceMode::FORCE) ? force/mass : force; } } void addBond(uint32_t node0, uint32_t node1, uint32_t blastBondIndex) { if (isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { const BondData data = { node0, node1, blastBondIndex, 0.0f }; m_bondsData.pushBack(data); m_blastBondIndexMap[blastBondIndex] = m_bondsData.size() - 1; } } void removeBondIfExists(uint32_t blastBondIndex) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex)) { const BondData& bond = m_bondsData[bondIndex]; const uint32_t solverNode0 = m_nodesData[bond.node0].solverNode; const uint32_t solverNode1 = m_nodesData[bond.node1].solverNode; bool isBondInternal = (solverNode0 == solverNode1); if (isBondInternal) { // internal bond sadly requires graph resync (it never happens on reduction level '0') m_nodesDirty = true; } else if (!m_nodesDirty) { // otherwise it's external bond, we can remove it manually and keep graph synced // we don't need to spend time there if (m_nodesDirty == true), graph will be resynced anyways BondKey solverBondKey(solverNode0, solverNode1); auto entry = m_solverBondsMap.find(solverBondKey); if (entry) { const uint32_t solverBondIndex = entry->second; auto& blastBondIndices = m_solverBondsData[solverBondIndex].blastBondIndices; blastBondIndices.findAndReplaceWithLast(blastBondIndex); if (blastBondIndices.empty()) { // all bonds associated with this solver bond were removed, so let's remove solver bond m_solverBondsData.replaceWithLast(solverBondIndex); m_solver.replaceWithLast(solverBondIndex); if (m_solver.getBondCount() > 0) { // update 'previously last' solver bond mapping uint32_t node0, node1; m_solver.getBondNodes(solverBondIndex, node0, node1); m_solverBondsMap[BondKey(node0, node1)] = solverBondIndex; } m_solverBondsMap.erase(solverBondKey); } } CHECK_GRAPH_INTEGRITY; } // remove bond from graph processor's list m_blastBondIndexMap[blastBondIndex] = invalidIndex<uint32_t>(); m_bondsData.replaceWithLast(bondIndex); m_blastBondIndexMap[m_bondsData[bondIndex].blastBondIndex] = m_bondsData.size() > bondIndex ? bondIndex : invalidIndex<uint32_t>(); } } void setGraphReductionLevel(uint32_t level) { m_graphReductionLevel = level; m_nodesDirty = true; } uint32_t getGraphReductionLevel() const { return m_graphReductionLevel; } void solve(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds, bool warmStart = true) { sync(bonds); for (const NodeData& node : m_nodesData) { m_solver.setNodeVelocities(node.solverNode, node.localVel, NvVec3(NvZero)); } m_solver.solve(settings.maxSolverIterationsPerFrame, warmStart); resetVelocities(); updateBondStress(settings, bondHealth, bonds); } bool calcError(float& linear, float& angular) const { return m_solver.calcError(linear, angular); } bool getBondStress(uint32_t blastBondIndex, float& compression, float& tension, float& shear) const { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (isInvalidIndex(bondIndex)) { return false; } // compression and tension are mutually exclusive since they operate in opposite directions // they both measure stress parallel to the bond normal direction // compression is the force resisting two nodes being pushed together (it pushes them apart) // tension is the force resisting two nodes being pulled apart (it pulls them together) if (m_bondsData[bondIndex].stressNormal <= 0.0f) { compression = -m_bondsData[bondIndex].stressNormal; tension = 0.0f; } else { compression = 0.0f; tension = m_bondsData[bondIndex].stressNormal; } // shear is independent and can co-exist with compression and tension shear = m_bondsData[bondIndex].stressShear; // the force perpendicular to the bond normal direction return true; } // Convert from Blast bond index to internal stress solver bond index // Will be InvalidIndex if the internal bond was removed from the stress solver uint32_t getInternalBondIndex(uint32_t blastBondIndex) { return m_blastBondIndexMap[blastBondIndex]; } private: void resetVelocities() { for (auto& node : m_nodesData) { node.localVel = NvVec3(NvZero); } } void updateBondStress(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds) { m_overstressedBondCount = 0; Array<uint32_t>::type bondIndicesToRemove; bondIndicesToRemove.reserve(getBondCount()); for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { // calculate the total area of all bonds involved so pressure can be calculated float totalArea = 0.0f; // calculate an average normal and centroid for all bonds as well, weighted by their area nvidia::NvVec3 bondNormal(NvZero); nvidia::NvVec3 bondCentroid(NvZero); nvidia::NvVec3 averageNodeDisp(NvZero); const auto& blastBondIndices = m_solverBondsData[i].blastBondIndices; for (auto blastBondIndex : blastBondIndices) { if (bondHealth[blastBondIndex] > 0.0f) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; const BondData& bond = m_bondsData[bondIndex]; const nvidia::NvVec3 nodeDisp = m_nodesData[bond.node1].localPos - m_nodesData[bond.node0].localPos; // the current health of a bond is the effective area remaining const float remainingArea = bondHealth[blastBondIndex]; const NvBlastBond& blastBond = bonds[blastBondIndex]; // Align normal(s) with node displacement, so that compressive/tensile distinction is correct const nvidia::NvVec3 assetBondNormal(blastBond.normal[0], blastBond.normal[1], blastBond.normal[2]); const nvidia::NvVec3 blastBondNormal = std::copysignf(1.0f, assetBondNormal.dot(nodeDisp))*assetBondNormal; const nvidia::NvVec3 blastBondCentroid(blastBond.centroid[0], blastBond.centroid[1], blastBond.centroid[2]); if (!canTakeDamage(remainingArea)) // Check unbreakable limit { totalArea = kUnbreakableLimit; // Don't add this in, in case of overflow bondNormal = blastBondNormal; bondCentroid = blastBondCentroid; averageNodeDisp = nodeDisp; break; } bondNormal += blastBondNormal*remainingArea; bondCentroid += blastBondCentroid*remainingArea; averageNodeDisp += nodeDisp*remainingArea; totalArea += remainingArea; } else { // if the bond is broken, try to remove it after processing is complete bondIndicesToRemove.pushBack(blastBondIndex); } } if (totalArea == 0.0f) { continue; } // normalized the aggregate normal now that all contributing bonds have been combined bondNormal.normalizeSafe(); // divide by total area for the weighted position, if the area is valid if (canTakeDamage(totalArea)) { bondCentroid /= totalArea; averageNodeDisp /= totalArea; } // bonds are looked at as a whole group, // so regardless of the current health of an individual one they are either all over stressed or none are float stressNormal, stressShear; calcSolverBondStresses(i, totalArea, averageNodeDisp.magnitude(), bondNormal, stressNormal, stressShear); NVBLAST_ASSERT(!std::isnan(stressNormal) && !std::isnan(stressShear)); if ( -stressNormal > settings.compressionElasticLimit || stressNormal > settings.tensionElasticLimit || stressShear > settings.shearElasticLimit ) { m_overstressedBondCount += blastBondIndices.size(); } // store the stress values for all the bonds involved for (auto blastBondIndex : blastBondIndices) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex) && bondHealth[blastBondIndex] > 0.0f) { BondData& bond = m_bondsData[bondIndex]; NVBLAST_ASSERT(getNodeData(bond.node0).solverNode != getNodeData(bond.node1).solverNode); NVBLAST_ASSERT(bond.blastBondIndex == blastBondIndex); bond.stressNormal = stressNormal; bond.stressShear = stressShear; // store the normal used to calc stresses so it can be used later to determine forces bond.normal = bondNormal; // store the bond centroid bond.centroid = bondCentroid; } } } // now that processing is done, remove any dead bonds for (uint32_t bondIndex : bondIndicesToRemove) { removeBondIfExists(bondIndex); } } void sync(const NvBlastBond* bonds) { if (m_nodesDirty) { syncNodes(bonds); m_solver.initialize(); } if (m_bondsDirty) { syncBonds(bonds); } CHECK_GRAPH_INTEGRITY; } void syncNodes(const NvBlastBond* bonds) { // init with 1<->1 blast nodes to solver nodes mapping m_solverNodesData.resize(m_nodesData.size()); for (uint32_t i = 0; i < m_nodesData.size(); ++i) { m_nodesData[i].solverNode = i; m_solverNodesData[i].supportNodesCount = 1; m_solverNodesData[i].indexShift = 0; } // for static nodes aggregate size per graph reduction level is lower, it // falls behind on few levels. (can be made as parameter) const uint32_t STATIC_NODES_COUNT_PENALTY = 2 << 2; // reducing graph by aggregating nodes level by level // NOTE (@anovoselov): Recently, I found a flow in the algorithm below. In very rare situations aggregate (solver node) // can contain more then one connected component. I didn't notice it to produce any visual artifacts and it's // unlikely to influence stress solvement a lot. Possible solution is to merge *whole* solver nodes, that // will raise complexity a bit (at least will add another loop on nodes for every reduction level. for (uint32_t k = 0; k < m_graphReductionLevel; k++) { const uint32_t maxAggregateSize = 1 << (k + 1); for (const BondData& bond : m_bondsData) { NodeData& node0 = m_nodesData[bond.node0]; NodeData& node1 = m_nodesData[bond.node1]; if (node0.solverNode == node1.solverNode) continue; SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode]; SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode]; const int countPenalty = 1; // This was being set to STATIC_NODES_COUNT_PENALTY for static nodes, may want to revisit const uint32_t aggregateSize = std::min<uint32_t>(maxAggregateSize, node0.neighborsCount / 2); if (solverNode0.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode1.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode0.supportNodesCount >= solverNode1.supportNodesCount) { solverNode1.supportNodesCount--; solverNode0.supportNodesCount++; node1.solverNode = node0.solverNode; } else if (solverNode1.supportNodesCount >= solverNode0.supportNodesCount) { solverNode1.supportNodesCount++; solverNode0.supportNodesCount--; node0.solverNode = node1.solverNode; } } } // Solver Nodes now sparse, a lot of empty ones. Rearrange them by moving all non-empty to the front // 2 passes used for that { uint32_t currentNode = 0; for (; currentNode < m_solverNodesData.size(); ++currentNode) { if (m_solverNodesData[currentNode].supportNodesCount > 0) continue; // 'currentNode' is free // search next occupied node uint32_t k = currentNode + 1; for (; k < m_solverNodesData.size(); ++k) { if (m_solverNodesData[k].supportNodesCount > 0) { // replace currentNode and keep indexShift m_solverNodesData[currentNode].supportNodesCount = m_solverNodesData[k].supportNodesCount; m_solverNodesData[k].indexShift = k - currentNode; m_solverNodesData[k].supportNodesCount = 0; break; } } if (k == m_solverNodesData.size()) { break; } } for (auto& node : m_nodesData) { node.solverNode -= m_solverNodesData[node.solverNode].indexShift; } // now, we know total solver nodes count and which nodes are aggregated into them m_solverNodesData.resize(currentNode); } // calculate all needed data for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.supportNodesCount = 0; solverNode.localPos = NvVec3(NvZero); solverNode.mass = 0.0f; solverNode.volume = 0.0f; } for (NodeData& node : m_nodesData) { SolverNodeData& solverNode = m_solverNodesData[node.solverNode]; solverNode.supportNodesCount++; solverNode.localPos += node.localPos; solverNode.mass += node.mass; solverNode.volume += node.volume; } for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.localPos /= (float)solverNode.supportNodesCount; } m_solver.reset(m_solverNodesData.size()); for (uint32_t nodeIndex = 0; nodeIndex < m_solverNodesData.size(); ++nodeIndex) { const SolverNodeData& solverNode = m_solverNodesData[nodeIndex]; const float R = NvPow(solverNode.volume * 3.0f * NvInvPi / 4.0f, 1.0f / 3.0f); // sphere volume approximation const float inertia = solverNode.mass * (R * R * 0.4f); // sphere inertia tensor approximation: I = 2/5 * M * R^2 ; invI = 1 / I; m_solver.setNodeMassInfo(nodeIndex, solverNode.localPos, solverNode.mass, inertia); } m_nodesDirty = false; syncBonds(bonds); } void syncBonds(const NvBlastBond* bonds) { // traverse all blast bonds and aggregate m_solver.clearBonds(); m_solverBondsMap.clear(); m_solverBondsData.clear(); for (BondData& bond : m_bondsData) { const NodeData& node0 = m_nodesData[bond.node0]; const NodeData& node1 = m_nodesData[bond.node1]; // reset stress, bond structure changed and internal bonds stress won't be updated during updateBondStress() bond.stressNormal = 0.0f; bond.stressShear = 0.0f; // initialize normal and centroid using blast values bond.normal = *(NvVec3*)bonds[bond.blastBondIndex].normal; bond.centroid = *(NvVec3*)bonds[bond.blastBondIndex].centroid; // fix normal direction to point from node0 to node1 bond.normal *= std::copysignf(1.0f, bond.normal.dot(node1.localPos - node1.localPos)); if (node0.solverNode == node1.solverNode) continue; // skip (internal) BondKey key(node0.solverNode, node1.solverNode); auto entry = m_solverBondsMap.find(key); SolverBondData* data; if (!entry) { m_solverBondsData.pushBack(SolverBondData()); data = &m_solverBondsData.back(); m_solverBondsMap[key] = m_solverBondsData.size() - 1; m_solver.addBond(node0.solverNode, node1.solverNode, bond.centroid); } else { data = &m_solverBondsData[entry->second]; } data->blastBondIndices.pushBack(bond.blastBondIndex); } m_bondsDirty = false; } #if GRAPH_INTERGRIRY_CHECK void checkGraphIntegrity() { NVBLAST_ASSERT(m_solver.getBondCount() == m_solverBondsData.size()); NVBLAST_ASSERT(m_solver.getNodeCount() == m_solverNodesData.size()); std::set<uint64_t> solverBonds; for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { const auto& bondData = m_solver.getBondData(i); BondKey key(bondData.node0, bondData.node1); NVBLAST_ASSERT(solverBonds.find(key) == solverBonds.end()); solverBonds.emplace(key); auto entry = m_solverBondsMap.find(key); NVBLAST_ASSERT(entry != nullptr); const auto& solverBond = m_solverBondsData[entry->second]; for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; BondKey key2(m_nodesData[b.node0].solverNode, m_nodesData[b.node1].solverNode); NVBLAST_ASSERT(key2 == key); } } } for (auto& solverBond : m_solverBondsData) { for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; NVBLAST_ASSERT(m_nodesData[b.node0].solverNode != m_nodesData[b.node1].solverNode); } } } uint32_t mappedBondCount = 0; for (uint32_t i = 0; i < m_blastBondIndexMap.size(); i++) { const auto& bondIndex = m_blastBondIndexMap[i]; if (!isInvalidIndex(bondIndex)) { mappedBondCount++; NVBLAST_ASSERT(m_bondsData[bondIndex].blastBondIndex == i); } } NVBLAST_ASSERT(m_bondsData.size() == mappedBondCount); } #endif struct BondKey { uint32_t node0; uint32_t node1; BondKey(uint32_t n0, uint32_t n1) : node0(n0), node1(n1) {} operator uint64_t() const { // Szudzik's function return node0 >= node1 ? (uint64_t)node0 * node0 + node0 + node1 : (uint64_t)node1 * node1 + node0; } }; ConjugateGradientImpulseSolver m_solver; Array<SolverNodeData>::type m_solverNodesData; Array<SolverBondData>::type m_solverBondsData; uint32_t m_graphReductionLevel; bool m_nodesDirty; bool m_bondsDirty; uint32_t m_overstressedBondCount; HashMap<BondKey, uint32_t>::type m_solverBondsMap; Array<uint32_t>::type m_blastBondIndexMap; Array<BondData>::type m_bondsData; Array<NodeData>::type m_nodesData; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ExtStressSolver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** */ class ExtStressSolverImpl final : public ExtStressSolver { NV_NOCOPY(ExtStressSolverImpl) public: ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings); virtual void release() override; //////// ExtStressSolverImpl interface //////// virtual void setAllNodesInfoFromLL(float density = 1.0f) override; virtual void setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) override; virtual void setSettings(const ExtStressSolverSettings& settings) override { m_settings = settings; inheritSettingsLimits(); } virtual const ExtStressSolverSettings& getSettings() const override { return m_settings; } virtual bool addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual void addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual bool addGravity(const NvBlastActor& actor, NvcVec3 localGravity) override; virtual bool addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) override; virtual void update() override; virtual uint32_t getOverstressedBondCount() const override { return m_graphProcessor->getOverstressedBondCount(); } virtual void generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) override; virtual uint32_t generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) override; void reset() override { m_reset = true; } virtual float getStressErrorLinear() const override { return m_errorLinear; } virtual float getStressErrorAngular() const override { return m_errorAngular; } virtual bool converged() const override { return m_converged; } virtual uint32_t getFrameCount() const override { return m_framesCount; } virtual uint32_t getBondCount() const override { return m_graphProcessor->getSolverBondCount(); } virtual bool getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) override; virtual bool notifyActorCreated(const NvBlastActor& actor) override; virtual void notifyActorDestroyed(const NvBlastActor& actor) override; virtual const DebugBuffer fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) override; private: ~ExtStressSolverImpl(); //////// private methods //////// void solve(); void fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands); void initialize(); void iterate(); void removeBrokenBonds(); template<class T> T* getScratchArray(uint32_t size); bool generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1); void inheritSettingsLimits() { NVBLAST_ASSERT(m_settings.compressionElasticLimit >= 0.0f && m_settings.compressionFatalLimit >= 0.0f); // check if any optional limits need to inherit from the compression values if (m_settings.tensionElasticLimit < 0.0f) { m_settings.tensionElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.tensionFatalLimit < 0.0f) { m_settings.tensionFatalLimit = m_settings.compressionFatalLimit; } if (m_settings.shearElasticLimit < 0.0f) { m_settings.shearElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.shearFatalLimit < 0.0f) { m_settings.shearFatalLimit = m_settings.compressionFatalLimit; } } //////// data //////// const NvBlastFamily& m_family; HashSet<const NvBlastActor*>::type m_activeActors; ExtStressSolverSettings m_settings; NvBlastSupportGraph m_graph; bool m_isDirty; bool m_reset; const float* m_bondHealths; const float* m_cachedBondHealths; const NvBlastBond* m_bonds; SupportGraphProcessor* m_graphProcessor; float m_errorAngular; float m_errorLinear; bool m_converged; uint32_t m_framesCount; Array<NvBlastBondFractureData>::type m_bondFractureBuffer; Array<uint8_t>::type m_scratch; Array<DebugLine>::type m_debugLineBuffer; }; template<class T> NV_INLINE T* ExtStressSolverImpl::getScratchArray(uint32_t size) { const uint32_t scratchSize = sizeof(T) * size; if (m_scratch.size() < scratchSize) { m_scratch.resize(scratchSize); } return reinterpret_cast<T*>(m_scratch.begin()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtStressSolverImpl::ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings) : m_family(family), m_settings(settings), m_isDirty(false), m_reset(false), m_errorAngular(std::numeric_limits<float>::max()), m_errorLinear(std::numeric_limits<float>::max()), m_converged(false), m_framesCount(0) { // this needs to be called any time settings change, including when they are first set inheritSettingsLimits(); const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); m_graph = NvBlastAssetGetSupportGraph(asset, logLL); const uint32_t bondCount = NvBlastAssetGetBondCount(asset, logLL); m_bondFractureBuffer.reserve(bondCount); { NvBlastActor* actor; NvBlastFamilyGetActors(&actor, 1, &family, logLL); m_bondHealths = NvBlastActorGetBondHealths(actor, logLL); m_cachedBondHealths = NvBlastActorGetCachedBondHeaths(actor, logLL); m_bonds = NvBlastAssetGetBonds(asset, logLL); } m_graphProcessor = NVBLAST_NEW(SupportGraphProcessor)(m_graph.nodeCount, bondCount); // traverse graph and fill bond info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) continue; uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { m_graphProcessor->addBond(node0, node1, bondIndex); } } } } ExtStressSolverImpl::~ExtStressSolverImpl() { NVBLAST_DELETE(m_graphProcessor, SupportGraphProcessor); } ExtStressSolver* ExtStressSolver::create(const NvBlastFamily& family, const ExtStressSolverSettings& settings) { return NVBLAST_NEW(ExtStressSolverImpl) (family, settings); } void ExtStressSolverImpl::release() { NVBLAST_DELETE(this, ExtStressSolverImpl); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Actors & Graph Data /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::setAllNodesInfoFromLL(float density) { const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); // traverse graph and fill node info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { const uint32_t chunkIndex0 = m_graph.chunkIndices[node0]; if (chunkIndex0 >= chunkCount) { // chunkIndex is invalid means it is static node (represents world) m_graphProcessor->setNodeInfo(node0, 0.0f, 0.0f, NvVec3(NvZero)); } else { // fill node info const NvBlastChunk& chunk = chunks[chunkIndex0]; const float volume = chunk.volume; const float mass = volume * density; const NvVec3 localPos = *reinterpret_cast<const NvVec3*>(chunk.centroid); m_graphProcessor->setNodeInfo(node0, mass, volume, localPos); } } } void ExtStressSolverImpl::setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) { m_graphProcessor->setNodeInfo(graphNode, mass, volume, toNvShared(localPos)); } bool ExtStressSolverImpl::getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) { // otherwise allocate enough space and query the Blast SDK const NvBlastActor* actor = NvBlastFamilyGetActorByIndex(&m_family, actorIndex, logLL); if (actor == nullptr) { return false; } const uint32_t nodeCount = NvBlastActorGetGraphNodeCount(actor, logLL); uint32_t* nodeIndices = getScratchArray<uint32_t>(nodeCount); const uint32_t retCount = NvBlastActorGetGraphNodeIndices(nodeIndices, nodeCount, actor, logLL); NVBLAST_ASSERT(retCount == nodeCount); // get the mapping between support chunks and actor indices // this is the fastest way to tell if two node/chunks are part of the same actor const uint32_t* actorIndices = NvBlastFamilyGetChunkActorIndices(&m_family, logLL); if (actorIndices == nullptr) { return false; } // walk the visible nodes for the actor looking for bonds that broke this frame nvidia::NvVec3 totalForce(0.0f); nvidia::NvVec3 totalTorque(0.0f); for (uint32_t n = 0; n < nodeCount; n++) { // find bonds that broke this frame (health <= 0 but internal stress bond index is still valid) const uint32_t nodeIdx = nodeIndices[n]; for (uint32_t i = m_graph.adjacencyPartition[nodeIdx]; i < m_graph.adjacencyPartition[nodeIdx + 1]; i++) { // check if the bond is broken first of all const uint32_t blastBondIndex = m_graph.adjacentBondIndices[i]; if (m_bondHealths[blastBondIndex] > 0.0f) { continue; } // broken bonds that have invalid internal indices broke before this frame const uint32_t internalBondIndex = m_graphProcessor->getInternalBondIndex(blastBondIndex); if (isInvalidIndex(internalBondIndex)) { continue; } // make sure the other node in the bond isn't part of the same actor // forces should only be applied due to bonds breaking between actors, not within const uint32_t chunkIdx = m_graph.chunkIndices[nodeIdx]; const uint32_t otherNodeIdx = m_graph.adjacentNodeIndices[i]; const uint32_t otherChunkIdx = m_graph.chunkIndices[otherNodeIdx]; if (!isInvalidIndex(chunkIdx) && !isInvalidIndex(otherChunkIdx) && actorIndices[chunkIdx] == actorIndices[otherChunkIdx]) { continue; } // this bond should contribute forces to the output const auto bondData = m_graphProcessor->getBondData(internalBondIndex); NVBLAST_ASSERT(blastBondIndex == bondData.blastBondIndex); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(internalBondIndex, node0, node1); NVBLAST_ASSERT(bondData.node0 == internalBondData.node0 && bondData.node1 == internalBondData.node1); // accumulators for forces just from this bond nvidia::NvVec3 nvLinearPressure(0.0f); nvidia::NvVec3 nvAngularPressure(0.0f); // deal with linear forces const float excessCompression = bondData.stressNormal + m_settings.compressionFatalLimit; const float excessTension = bondData.stressNormal - m_settings.tensionFatalLimit; if (excessCompression < 0.0f) { nvLinearPressure += excessCompression * bondData.normal; } else if (excessTension > 0.0f) { // tension is in the negative direction of the linear impulse nvLinearPressure += excessTension * bondData.normal; } const float excessShear = bondData.stressShear - m_settings.shearFatalLimit; if (excessShear > 0.0f) { NvVec3 impulseLinear, impulseAngular; m_graphProcessor->getSolverInternalBondImpulses(internalBondIndex, impulseLinear, impulseAngular); const nvidia::NvVec3 shearDir = impulseLinear - impulseLinear.dot(bondData.normal)*bondData.normal; nvLinearPressure += excessShear * shearDir.getNormalized(); } if (nvLinearPressure.magnitudeSquared() > FLT_EPSILON) { const float* bondCenter = m_bonds[blastBondIndex].centroid; const nvidia::NvVec3 forceOffset = nvidia::NvVec3(bondCenter[0], bondCenter[1], bondCenter[3]) - toNvShared(com); const nvidia::NvVec3 torqueFromForce = forceOffset.cross(nvLinearPressure); nvAngularPressure += torqueFromForce; } // add the contributions from this bond to the total forces for the actor // multiply by the area to convert back to force from pressure const float bondRemainingArea = m_cachedBondHealths[blastBondIndex]; NVBLAST_ASSERT(bondRemainingArea <= m_bonds[blastBondIndex].area); const float sign = otherNodeIdx > nodeIdx ? 1.0f : -1.0f; totalForce += nvLinearPressure * (sign*bondRemainingArea); totalTorque += nvAngularPressure * (sign*bondRemainingArea); } } // convert to the output format and return true if non-zero forces were accumulated force = fromNvShared(totalForce); torque = fromNvShared(totalTorque); return (totalForce.magnitudeSquared() + totalTorque.magnitudeSquared()) > 0.0f; } bool ExtStressSolverImpl::notifyActorCreated(const NvBlastActor& actor) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { // update neighbors { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { m_graphProcessor->setNodeNeighborsCount(graphNodeIndices[i], nodeCount); } } m_activeActors.insert(&actor); m_isDirty = true; return true; } return false; } void ExtStressSolverImpl::notifyActorDestroyed(const NvBlastActor& actor) { if (m_activeActors.erase(&actor)) { m_isDirty = true; } } void ExtStressSolverImpl::removeBrokenBonds() { // traverse graph and remove dead bonds for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) { m_graphProcessor->removeBondIfExists(bondIndex); } } } } m_isDirty = false; } void ExtStressSolverImpl::initialize() { if (m_reset) { m_framesCount = 0; } if (m_isDirty) { removeBrokenBonds(); } if (m_settings.graphReductionLevel != m_graphProcessor->getGraphReductionLevel()) { m_graphProcessor->setGraphReductionLevel(m_settings.graphReductionLevel); } } bool ExtStressSolverImpl::addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) { float bestDist = FLT_MAX; uint32_t bestNode = invalidIndex<uint32_t>(); const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const float sqrDist = (toNvShared(localPosition) - m_graphProcessor->getNodeData(node).localPos).magnitudeSquared(); if (sqrDist < bestDist) { bestDist = sqrDist; bestNode = node; } } if (!isInvalidIndex(bestNode)) { m_graphProcessor->addNodeForce(bestNode, toNvShared(localForce), mode); return true; } } return false; } void ExtStressSolverImpl::addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) { m_graphProcessor->addNodeForce(graphNode, toNvShared(localForce), mode); } bool ExtStressSolverImpl::addGravity(const NvBlastActor& actor, NvcVec3 localGravity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; m_graphProcessor->addNodeForce(node, toNvShared(localGravity), ExtForceMode::ACCELERATION); } return true; } return false; } bool ExtStressSolverImpl::addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); // Apply centrifugal force for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const auto& localPos = m_graphProcessor->getNodeData(node).localPos; // a = w x (w x r) const NvVec3 centrifugalAcceleration = toNvShared(localAngularVelocity) .cross(toNvShared(localAngularVelocity).cross(localPos - toNvShared(localCenterMass))); m_graphProcessor->addNodeForce(node, centrifugalAcceleration, ExtForceMode::ACCELERATION); } return true; } return false; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Update /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::update() { initialize(); solve(); m_framesCount++; } void ExtStressSolverImpl::solve() { NV_SIMD_GUARD; m_graphProcessor->solve(m_settings, m_bondHealths, m_bonds, WARM_START && !m_reset); m_reset = false; m_converged = m_graphProcessor->calcError(m_errorLinear, m_errorAngular); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // check if this bond is over stressed in any way and generate a fracture command if it is bool ExtStressSolverImpl::generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1) { const float bondHealth = m_bondHealths[bondIndex]; float stressCompression, stressTension, stressShear; if (bondHealth > 0.0f && m_graphProcessor->getBondStress(bondIndex, stressCompression, stressTension, stressShear)) { // compression and tension are mutually exclusive, only one can be positive at a time since they act in opposite directions float stressMultiplier = 0.0f; if (stressCompression > m_settings.compressionElasticLimit) { const float excessStress = stressCompression - m_settings.compressionElasticLimit; const float compressionDenom = m_settings.compressionFatalLimit - m_settings.compressionElasticLimit; const float compressionMultiplier = excessStress / (compressionDenom > 0.0f ? compressionDenom : 1.0f); stressMultiplier += compressionMultiplier; } else if (stressTension > m_settings.tensionElasticLimit) { const float excessStress = stressTension - m_settings.tensionElasticLimit; const float tensionDenom = m_settings.tensionFatalLimit - m_settings.tensionElasticLimit; const float tensionMultiplier = excessStress / (tensionDenom > 0.0f ? tensionDenom : 1.0f); stressMultiplier += tensionMultiplier; } // shear can co-exist with either compression or tension so must be accounted for independently of them if (stressShear > m_settings.shearElasticLimit) { const float excessStress = stressShear - m_settings.shearElasticLimit; const float shearDenom = m_settings.shearFatalLimit - m_settings.shearElasticLimit; const float shearMultiplier = excessStress / (shearDenom > 0.0f ? shearDenom : 1.0f); stressMultiplier += shearMultiplier; } if (stressMultiplier > 0.0f) { // bond health/area is reduced by excess pressure to approximate micro bonds in the material breaking const float bondDamage = bondHealth * stressMultiplier; const NvBlastBondFractureData data = { 0, node0, node1, bondDamage }; m_bondFractureBuffer.pushBack(data); // cache off the current health value for this bond // so it can be used to calculate forces to apply if it breaks later NvBlastActorCacheBondHeath(&actor, bondIndex, logLL); return true; } } return false; } void ExtStressSolverImpl::fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); uint32_t commandCount = 0; if (graphNodeCount > 1 && m_graphProcessor->getOverstressedBondCount() > 0) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node0 = graphNodeIndices[i]; for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { const uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { const uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (generateStressDamage(actor, bondIndex, node0, node1)) { commandCount++; } } } } } commands.chunkFractureCount = 0; commands.chunkFractures = nullptr; commands.bondFractureCount = commandCount; commands.bondFractures = commandCount > 0 ? m_bondFractureBuffer.end() - commandCount : nullptr; } void ExtStressSolverImpl::generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { m_bondFractureBuffer.clear(); fillFractureCommands(actor, commands); } uint32_t ExtStressSolverImpl::generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) { if (m_graphProcessor->getOverstressedBondCount() == 0) return 0; m_bondFractureBuffer.clear(); uint32_t index = 0; for (auto it = m_activeActors.getIterator(); !it.done() && index < bufferSize; ++it) { const NvBlastActor* actor = *it; NvBlastFractureBuffers& nextCommand = commandsBuffer[index]; fillFractureCommands(*actor, nextCommand); if (nextCommand.bondFractureCount > 0) { actorBuffer[index] = actor; index++; } } return index; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { return ((uint32_t)(color.w * 255) << 24) | // A ((uint32_t)(color.x * 255) << 16) | // R ((uint32_t)(color.y * 255) << 8) | // G ((uint32_t)(color.z * 255)); // B } static float Lerp(float v0, float v1, float val) { return v0 * (1 - val) + v1 * val; } inline float clamp01(float v) { return v < 0.0f ? 0.0f : (v > 1.0f ? 1.0f : v); } inline NvVec4 colorConvertHSVAtoRGBA(float h, float s, float v, float a) { const float t = 6.0f * (h - std::floor(h)); const int n = (int)t; const float m = t - (float)n; const float c = 1.0f - s; const float b[6] = { 1.0f, 1.0f - s * m, c, c, 1.0f - s * (1.0f - m), 1.0f }; return NvVec4(v * b[n % 6], v * b[(n + 4) % 6], v * b[(n + 2) % 6], a); // n % 6 protects against roundoff errors } inline uint32_t bondHealthColor(float stressPct) { stressPct = clamp01(stressPct); constexpr float BOND_HEALTHY_HUE = 1.0f/3.0f; // Green constexpr float BOND_ELASTIC_HUE = 0.0f; // Red constexpr float BOND_STRESSED_HUE = 2.0f/3.0f; // Blue constexpr float BOND_FATAL_HUE = 5.0f/6.0f; // Magenta const float hue = stressPct < 0.5f ? Lerp(BOND_HEALTHY_HUE, BOND_ELASTIC_HUE, 2.0f * stressPct) : Lerp(BOND_STRESSED_HUE, BOND_FATAL_HUE, 2.0f * stressPct - 1.0f); return NvVec4ToU32Color(colorConvertHSVAtoRGBA(hue, 1.0f, 1.0f, 1.0f)); } const ExtStressSolver::DebugBuffer ExtStressSolverImpl::fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) { NV_UNUSED(scale); const uint32_t BOND_UNBREAKABLE_COLOR = NvVec4ToU32Color(NvVec4(0.0f, 0.682f, 1.0f, 1.0f)); ExtStressSolver::DebugBuffer debugBuffer = { nullptr, 0 }; if (m_isDirty) return debugBuffer; m_debugLineBuffer.clear(); Array<uint8_t>::type& nodesSet = m_scratch; nodesSet.resize(m_graphProcessor->getSolverNodeCount()); memset(nodesSet.begin(), 0, nodesSet.size() * sizeof(uint8_t)); for (uint32_t i = 0; i < nodeCount; ++i) { NVBLAST_ASSERT(m_graphProcessor->getNodeData(nodes[i]).solverNode < nodesSet.size()); nodesSet[m_graphProcessor->getNodeData(nodes[i]).solverNode] = 1; } const uint32_t bondCount = m_graphProcessor->getSolverBondCount(); for (uint32_t i = 0; i < bondCount; ++i) { const auto& bondData = m_graphProcessor->getBondData(i); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(i, node0, node1); if (nodesSet[node0] != 0) { //NVBLAST_ASSERT(nodesSet[node1] != 0); const auto& solverNode0 = m_graphProcessor->getSolverNodeData(node0); const auto& solverNode1 = m_graphProcessor->getSolverNodeData(node1); const NvcVec3 p0 = fromNvShared(solverNode0.mass > 0.0f ? solverNode0.localPos : bondData.centroid); const NvcVec3 p1 = fromNvShared(solverNode1.mass > 0.0f ? solverNode1.localPos : bondData.centroid); // don't render lines for broken bonds const float stressPct = m_graphProcessor->getSolverBondStressPct(i, m_bondHealths, m_settings, mode); if (stressPct >= 0.0f) { const uint32_t color = canTakeDamage(m_bondHealths[bondData.blastBondIndex]) ? bondHealthColor(stressPct) : BOND_UNBREAKABLE_COLOR; m_debugLineBuffer.pushBack(DebugLine(p0, p1, color)); } } } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageAcceleratorAABBTree.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvVec4.h" #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtDamageAcceleratorAABBTree* ExtDamageAcceleratorAABBTree::create(const NvBlastAsset* asset) { ExtDamageAcceleratorAABBTree* tree = NVBLAST_NEW(Nv::Blast::ExtDamageAcceleratorAABBTree) (); tree->build(asset); return tree; } void ExtDamageAcceleratorAABBTree::release() { NVBLAST_DELETE(this, ExtDamageAcceleratorAABBTree); } void ExtDamageAcceleratorAABBTree::build(const NvBlastAsset* asset) { NVBLAST_ASSERT(m_root == nullptr); const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); const uint32_t N = NvBlastAssetGetBondCount(asset, logLL); m_indices.resizeUninitialized(N); m_points.resizeUninitialized(N); m_segments.resizeUninitialized(N); m_bonds.resizeUninitialized(N); m_nodes.reserve(2 * N); for (uint32_t node0 = 0; node0 < graph.nodeCount; ++node0) { for (uint32_t j = graph.adjacencyPartition[node0]; j < graph.adjacencyPartition[node0 + 1]; ++j) { uint32_t bondIndex = graph.adjacentBondIndices[j]; uint32_t node1 = graph.adjacentNodeIndices[j]; if (node0 < node1) { const NvBlastBond& bond = bonds[bondIndex]; const NvVec3& p = (reinterpret_cast<const NvVec3&>(bond.centroid)); m_points[bondIndex] = p; m_indices[bondIndex] = bondIndex; m_bonds[bondIndex].node0 = node0; m_bonds[bondIndex].node1 = node1; // filling bond segments as a connection of 2 chunk centroids const uint32_t chunk0 = graph.chunkIndices[node0]; const uint32_t chunk1 = graph.chunkIndices[node1]; if (isInvalidIndex(chunk1)) { // for world node we don't have it's centroid, so approximate with projection on bond normal m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); const NvVec3 normal = (reinterpret_cast<const NvVec3&>(bond.normal)); m_segments[bondIndex].p1 = m_segments[bondIndex].p0 + normal * (p - m_segments[bondIndex].p0).dot(normal) * 2; } else { m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); m_segments[bondIndex].p1 = (reinterpret_cast<const NvVec3&>(chunks[chunk1].centroid)); } } } } int rootIndex = N > 0 ? createNode(0, N - 1, 0) : -1; m_root = rootIndex >= 0 ? &m_nodes[rootIndex] : nullptr; } int ExtDamageAcceleratorAABBTree::createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth) { if (startIdx > endIdx) return -1; Node node; node.first = startIdx; node.last = endIdx; // calc node bounds node.pointsBound = NvBounds3::empty(); node.segmentsBound = NvBounds3::empty(); for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; node.pointsBound.include(m_points[idx]); node.segmentsBound.include(m_segments[idx].p0); node.segmentsBound.include(m_segments[idx].p1); } // select axis of biggest extent const NvVec3 ext = node.pointsBound.getExtents(); uint32_t axis = 0; for (uint32_t k = 1; k < 3; k++) { if (ext[k] > ext[axis]) { axis = k; } } // split on selected axis and partially sort around the middle const uint32_t mid = startIdx + (endIdx - startIdx) / 2; std::nth_element(m_indices.begin() + startIdx, m_indices.begin() + mid, m_indices.begin() + endIdx + 1, [&](uint32_t lhs, uint32_t rhs) { return m_points[lhs][axis] < m_points[rhs][axis]; }); const uint32_t BUCKET = 32; if (endIdx - startIdx > BUCKET && mid > startIdx && mid < endIdx) { node.child[0] = createNode(startIdx, mid, depth + 1); node.child[1] = createNode(mid + 1, endIdx, depth + 1); } else { node.child[0] = -1; node.child[1] = -1; } m_nodes.pushBack(node); return m_nodes.size() - 1; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Queries /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtDamageAcceleratorAABBTree::findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const { if (m_root) { if (segments) findSegmentsInBounds(*m_root, callback, bounds); else findPointsInBounds(*m_root, callback, bounds); callback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.pointsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.pointsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_points[idx])) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findPointsInBounds(m_nodes[node.child[c]], callback, bounds); } } void ExtDamageAcceleratorAABBTree::findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.segmentsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.segmentsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_segments[idx].p0) || bounds.contains(m_segments[idx].p1)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsInBounds(m_nodes[node.child[c]], callback, bounds); } } bool intersectSegmentPlane(const NvVec3& v1, const NvVec3& v2, const NvPlane& p) { const bool s1 = p.distance(v1) > 0.f; const bool s2 = p.distance(v2) > 0.f; return (s1 && !s2) || (s2 && !s1); } bool intersectBoundsPlane(const NvBounds3& b, const NvPlane& p) { const NvVec3 extents = b.getExtents(); const NvVec3 center = b.getCenter(); float r = extents.x * NvAbs(p.n.x) + extents.y * NvAbs(p.n.y) + extents.z * NvAbs(p.n.z); float s = p.n.dot(center) + p.d; return NvAbs(s) <= r; } void ExtDamageAcceleratorAABBTree::findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const { if (m_root) { findSegmentsPlaneIntersected(*m_root, resultCallback, plane); resultCallback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const { if (!intersectBoundsPlane(node.segmentsBound, plane)) { return; } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (intersectSegmentPlane(m_segments[idx].p0, m_segments[idx].p1, plane)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsPlaneIntersected(m_nodes[node.child[c]], callback, plane); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { uint32_t c = 0; c |= (int)(color.w * 255); c <<= 8; c |= (int)(color.z * 255); c <<= 8; c |= (int)(color.y * 255); c <<= 8; c |= (int)(color.x * 255); return c; } Nv::Blast::DebugBuffer ExtDamageAcceleratorAABBTree::fillDebugRender(int depth, bool segments) { Nv::Blast::DebugBuffer debugBuffer = { nullptr, 0 }; m_debugLineBuffer.clear(); if (m_root) { fillDebugBuffer(*m_root, 0, depth, segments); } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } void ExtDamageAcceleratorAABBTree::fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments) { if (depth < 0 || currentDepth == depth) { const NvVec4 LEAF_COLOR(1.0f, 1.0f, 1.0f, 1.0f); const NvVec4 NON_LEAF_COLOR(0.3f, 0.3f, 0.3f, 1.0f); // draw box const NvBounds3 bounds = segments ? node.segmentsBound : node.pointsBound; const NvVec3 center = bounds.getCenter(); const NvVec3 extents = bounds.getExtents(); const int vs[] = { 0,3,5,6 }; for (int i = 0; i < 4; i++) { int v = vs[i]; for (int d = 1; d < 8; d <<= 1) { auto flip = [](int x, int k) { return ((x >> k) & 1) * 2.f - 1.f; }; const float s = std::pow(0.99f, currentDepth); NvVec3 p0 = center + s * extents.multiply(NvVec3(flip(v, 0), flip(v, 1), flip(v, 2))); NvVec3 p1 = center + s * extents.multiply(NvVec3(flip(v^d, 0), flip(v^d, 1), flip(v^d, 2))); m_debugLineBuffer.pushBack(Nv::Blast::DebugLine( reinterpret_cast<NvcVec3&>(p0), reinterpret_cast<NvcVec3&>(p1), NvVec4ToU32Color(LEAF_COLOR * (1.f - (currentDepth + 1) * 0.1f))) ); } } } for (uint32_t i = 0; i < 2; ++i) { if (node.child[i] >= 0) { fillDebugBuffer(m_nodes[node.child[i]], currentDepth + 1, depth, segments); } } } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlast.h" #include "NvBlastArray.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorAABBTree final : public ExtDamageAcceleratorInternal { public: //////// ctor //////// ExtDamageAcceleratorAABBTree() : m_root(nullptr) { } virtual ~ExtDamageAcceleratorAABBTree() { } static ExtDamageAcceleratorAABBTree* create(const NvBlastAsset* asset); //////// interface //////// virtual void release() override; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, false); } virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, true); } virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const override; virtual Nv::Blast::DebugBuffer fillDebugRender(int depth, bool segments) override; virtual void* getImmediateScratch(size_t size) override { m_scratch.resizeUninitialized(size); return m_scratch.begin(); } private: // no copy/assignment ExtDamageAcceleratorAABBTree(ExtDamageAcceleratorAABBTree&); ExtDamageAcceleratorAABBTree& operator=(const ExtDamageAcceleratorAABBTree& tree); // Tree node struct Node { int child[2]; uint32_t first; uint32_t last; nvidia::NvBounds3 pointsBound; nvidia::NvBounds3 segmentsBound; }; void build(const NvBlastAsset* asset); int createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth); void pushResult(ResultCallback& callback, uint32_t pointIndex) const { callback.push(pointIndex, m_bonds[pointIndex].node0, m_bonds[pointIndex].node1); } void findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const; void findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const; void fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments); //////// data //////// Node* m_root; Array<Node>::type m_nodes; Array<uint32_t>::type m_indices; Array<nvidia::NvVec3>::type m_points; struct Segment { nvidia::NvVec3 p0; nvidia::NvVec3 p1; }; Array<Segment>::type m_segments; struct BondData { uint32_t node0; uint32_t node1; }; Array<BondData>::type m_bonds; Array<Nv::Blast::DebugLine>::type m_debugLineBuffer; Array<char>::type m_scratch; }; } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageShaders.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageShaders.h" #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastMath.h" #include "NvBlastGeometry.h" #include "NvBlastAssert.h" #include "NvBlastFixedQueue.h" #include "NvBlastFixedBitmap.h" #include "NvBlast.h" #include <cmath> // for abs() on linux #include <new> using namespace Nv::Blast; using namespace Nv::Blast::VecMath; using namespace nvidia; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Profiles /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*ProfileFunction)(float, float, float, float); float falloffProfile(float min, float max, float x, float f = 1.0f) { if (x > max) return 0.0f; if (x < min) return f; float y = 1.0f - (x - min) / (max - min); return y * f; } float cutterProfile(float min, float max, float x, float f = 1.0f) { if (x > max || x < min) return 0.0f; return f; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*DamageFunction)(const float pos[3], const void* damageDescBuffer); template <ProfileFunction profileFn, typename DescT = NvBlastExtRadialDamageDesc> float pointDistanceDamage(const float pos[3], const void* damageDescBuffer) { const DescT& desc = *static_cast<const DescT*>(damageDescBuffer); float relativePosition[3]; sub(desc.position, pos, relativePosition); const float distance = sqrtf(dot(relativePosition, relativePosition)); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } // Distance from point 'p' to line segment '(a, b)' float distanceToSegment(const float p[3], const float a[3], const float b[3]) { float v[3]; sub(b, a, v); float w[3]; sub(p, a, w); const float c1 = dot(v, w); if (c1 <= 0) return length(w); const float c2 = dot(v, v); if (c2 < c1) return dist(p, b); const float t = c1 / c2; mul(v, t); return dist(v, w); } template <ProfileFunction profileFn> float capsuleDistanceDamage(const float pos[3], const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const float distance = distanceToSegment(pos, desc.position0, desc.position1); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // AABB Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef NvBounds3(*BoundFunction)(const void* damageDesc); NvBounds3 sphereBounds(const void* damageDesc) { const NvBlastExtRadialDamageDesc& desc = *static_cast<const NvBlastExtRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p = (reinterpret_cast<const nvidia::NvVec3&>(desc.position)); return nvidia::NvBounds3::centerExtents(p, nvidia::NvVec3(desc.maxRadius, desc.maxRadius, desc.maxRadius)); } NvBounds3 capsuleBounds(const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& p1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); NvBounds3 b = NvBounds3::empty(); b.include(p0); b.include(p1); b.fattenFast(desc.maxRadius); return b; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Graph Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn, BoundFunction boundsFn> void RadialProfileGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); uint32_t outCount = 0; auto processBondFn = [&](uint32_t bondIndex, uint32_t node0, uint32_t node1) { // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const float totalBondDamage = damageFn(bond.centroid, programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = node0; outCommand.nodeIndex1 = node1; outCommand.health = totalBondDamage; } } }; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { nvidia::NvBounds3 bounds = boundsFn(programParams->damageDesc); const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtProgramParams* programParams) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_programParams(programParams) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const float totalBondDamage = damageFn(bond.centroid, m_programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = totalBondDamage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtProgramParams* m_programParams; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, programParams); damageAccelerator->findBondCentroidsInBounds(bounds, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; processBondFn(bondIndex, currentNodeIndex, adjacentNodeIndex); } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Single Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn> void RadialProfileSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const float totalDamage = damageFn(chunk.centroid, programParams->damageDesc); if (totalDamage > 0.0f && chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = totalDamage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Shaders Instantiation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<falloffProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<cutterProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<cutterProfile>>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<capsuleDistanceDamage<falloffProfile>, capsuleBounds>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<capsuleDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Shear Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtShearDamageDesc& desc = *static_cast<const NvBlastExtShearDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); if (!isInvalidIndex(chunkIndices[closestNode])) { uint32_t nodeIndex = closestNode; float maxDist = 0.0f; uint32_t nextNode = invalidIndex<uint32_t>(); if (chunkFractureCount < chunkFractureCountMax) { const uint32_t chunkIndex = chunkIndices[nodeIndex]; const NvBlastChunk& chunk = assetChunks[chunkIndex]; NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(chunk.centroid, programParams->damageDesc); } do { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; if (!canTakeDamage(familyBondHealths[bondIndex])) continue; float shear = 1 * std::abs(1 - std::abs(VecMath::dot(desc.normal, bond.normal))); float d[3]; VecMath::sub(bond.centroid, desc.position, d); float ahead = VecMath::dot(d, desc.normal); if (ahead > maxDist) { maxDist = ahead; nextNode = neighbourIndex; } const float damage = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(bond.centroid, programParams->damageDesc); if (damage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = nodeIndex; frac.nodeIndex1 = neighbourIndex; frac.health = shear * damage; } } if (nodeIndex == nextNode) break; nodeIndex = nextNode; } while (!isInvalidIndex(nextNode)); } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = chunkFractureCount; } void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Triangle Intersection Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define SMALL_NUMBER (1.e-4f) bool intersectSegmentTriangle(const NvVec3& p, const NvVec3& q, const NvVec3& a, const NvVec3& b, const NvVec3& c, const NvPlane& trianglePlane) { const NvVec3 N = trianglePlane.n; const float D = trianglePlane.d; NvVec3 intersectPoint; float t = (-D - (p.dot(N))) / ((q - p).dot(N)); // If the parameter value is not between 0 and 1, there is no intersection if (t > -SMALL_NUMBER && t < 1.f + SMALL_NUMBER) { intersectPoint = p + t * (q - p); } else { return false; } // Compute the normal of the triangle const NvVec3 TriNorm = (b - a).cross(c - a); // Compute twice area of triangle ABC const float AreaABCInv = 1.0f / (N.dot(TriNorm)); // Compute v contribution const float AreaPBC = N.dot((b - intersectPoint).cross(c - intersectPoint)); const float v = AreaPBC * AreaABCInv; if (v <= 0.f) return false; // Compute w contribution const float AreaPCA = N.dot((c - intersectPoint).cross(a - intersectPoint)); const float w = AreaPCA * AreaABCInv; if (w <= 0.f) return false; const float u = 1.0f - v - w; return u > 0.f; } void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const uint32_t* chunkIndices = actor->chunkIndices; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); uint32_t outCount = 0; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtTriangleIntersectionDamageDesc& desc) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_desc(desc) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const uint32_t chunkIndex0 = m_actor->chunkIndices[bondData.node0]; const uint32_t chunkIndex1 = m_actor->chunkIndices[bondData.node1]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex1].centroid)); if(intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = m_desc.damage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtTriangleIntersectionDamageDesc& m_desc; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, desc); damageAccelerator->findBondSegmentsPlaneIntersected(trianglePlane, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const uint32_t chunkIndex0 = chunkIndices[currentNodeIndex]; const uint32_t chunkIndex1 = chunkIndices[adjacentNodeIndex]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); if (intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = currentNodeIndex; outCommand.nodeIndex1 = adjacentNodeIndex; outCommand.health = desc.damage; } } } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t subChunkIndex = chunk.firstChildIndex; subChunkIndex < chunk.childIndexStop; subChunkIndex++) { const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex].centroid)); const nvidia::NvVec3& c1 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex + 1].centroid)); if (chunkFractureCount < chunkFractureCountMax && intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; break; } } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Impact Spread Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; // Find nearest chunk. uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); // Breadth-first support graph traversal. For radial falloff metric distance is measured along the edges of the graph ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; NVBLAST_ASSERT_WITH_MESSAGE(damageAccelerator, "This shader requires damage accelerator passed"); if (!isInvalidIndex(chunkIndices[closestNode]) && damageAccelerator) { struct NodeData { uint32_t index; float distance; }; // Calculating scratch size and requesting it from the accelerator const uint32_t bondCount = actor->adjacencyPartition[actor->assetNodeCount]; const size_t nodeQueueSize = align16(FixedQueue<NodeData>::requiredMemorySize(actor->graphNodeCount)); const size_t visitedBitmapSize = align16(FixedBitmap::requiredMemorySize(bondCount)); const size_t scratchSize = 16 + nodeQueueSize + visitedBitmapSize; void* scratch = damageAccelerator->getImmediateScratch(scratchSize); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment FixedQueue<NodeData>* nodeQueue = new (scratch)FixedQueue<NodeData>(actor->graphNodeCount); scratch = pointerOffset(scratch, align16(nodeQueueSize)); FixedBitmap* visitedBitmap = new (scratch)FixedBitmap(bondCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(bondCount))); // initalize traversal nodeQueue->pushBack({ closestNode, 0.f }); visitedBitmap->clear(); while (!nodeQueue->empty()) { NodeData currentNode = nodeQueue->popFront(); const uint32_t startIndex = adjacencyPartition[currentNode.index]; const uint32_t stopIndex = adjacencyPartition[currentNode.index + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); if (!canTakeDamage(familyBondHealths[bondIndex])) continue; if (visitedBitmap->test(bondIndex)) continue; visitedBitmap->set(bondIndex); const uint32_t chunkIndex0 = chunkIndices[currentNode.index]; const uint32_t chunkIndex1 = chunkIndices[neighbourIndex]; const nvidia::NvVec3& c0 = reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid); bool isNeighbourWorldChunk = isInvalidIndex(chunkIndex1); const nvidia::NvVec3& c1 = isNeighbourWorldChunk ? bondCentroid : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); const float distance = (c1 - c0).magnitude() * (isNeighbourWorldChunk ? 2.f : 1.f); float totalDistance = currentNode.distance + distance; float totalDamage = desc.damage * falloffProfile(desc.minRadius, desc.maxRadius, totalDistance); if (totalDamage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = currentNode.index; frac.nodeIndex1 = neighbourIndex; frac.health = totalDamage; if (!isNeighbourWorldChunk) { nodeQueue->pushBack({ neighbourIndex, totalDistance }); } } } } } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); if (chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageShaders.h" #include "NvBounds3.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorInternal : public NvBlastExtDamageAccelerator { public: struct QueryBondData { uint32_t bond; uint32_t node0; uint32_t node1; }; class ResultCallback { public: ResultCallback(QueryBondData* buffer, uint32_t count) : m_bondBuffer(buffer), m_bondMaxCount(count), m_bondCount(0) {} virtual void processResults(const QueryBondData* bondBuffer, uint32_t count) = 0; void push(uint32_t bond, uint32_t node0, uint32_t node1) { m_bondBuffer[m_bondCount].bond = bond; m_bondBuffer[m_bondCount].node0 = node0; m_bondBuffer[m_bondCount].node1 = node1; m_bondCount++; if (m_bondCount == m_bondMaxCount) { dispatch(); } } void dispatch() { if (m_bondCount) { processResults(m_bondBuffer, m_bondCount); m_bondCount = 0; } } private: QueryBondData* m_bondBuffer; uint32_t m_bondMaxCount; uint32_t m_bondCount; }; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const = 0; virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const = 0; virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const = 0; // Non-thread safe! Multiple calls return the same memory. virtual void* getImmediateScratch(size_t size) = 0; }; } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAccelerators.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //#include "NvBlastExtDamageAcceleratorOctree.h" //#include "NvBlastExtDamageAcceleratorKdtree.h" #include "NvBlastExtDamageAcceleratorAABBTree.h" NvBlastExtDamageAccelerator* NvBlastExtDamageAcceleratorCreate(const NvBlastAsset* asset, int type) { switch (type) { case 0: return nullptr; default: return Nv::Blast::ExtDamageAcceleratorAABBTree::create(asset); break; } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtInputStream.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtInputStream.h" Nv::Blast::ExtInputStream::ExtInputStream(std::istream &inputStream) : m_inputStream(inputStream) { } size_t Nv::Blast::ExtInputStream::tryRead(void* buffer, size_t /*minBytes*/, size_t maxBytes) { m_inputStream.read((char *) buffer, maxBytes); if (m_inputStream.fail()) { // Throw exception, log error // NVBLAST_LOG_ERROR("Failure when reading from stream"); } // Since we're using a blocking read above, if we don't have maxBytes we're probably done if ((size_t) m_inputStream.gcount() < maxBytes) { // NVBLAST_LOG_ERROR("Failed to read requested number of bytes during blocking read."); } return m_inputStream.gcount(); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerialization.h" #include <cstring> #define ExtSerializerBoilerplate(_name, _description, _objectTypeID, _encodingID) \ virtual const char* getName() const override { return _name; } \ virtual const char* getDescription() const override { return _description; } \ virtual uint32_t getObjectTypeID() const override { return _objectTypeID; } \ virtual uint32_t getEncodingID() const override { return _encodingID; } #define ExtSerializerReadOnly(_name) \ virtual bool isReadOnly() const override { return true; } \ virtual uint64_t serializeIntoBuffer \ ( \ void*& buffer, \ ExtSerialization::BufferProvider& bufferProvider, \ const void* object, \ uint64_t offset = 0 \ ) override \ { \ NVBLAST_LOG_WARNING(#_name "::serializeIntoBuffer: serializer is read-only."); \ NV_UNUSED(buffer); \ NV_UNUSED(bufferProvider); \ NV_UNUSED(object); \ NV_UNUSED(offset); \ return 0; \ } #define ExtSerializerDefaultFactoryAndRelease(_classname) \ static ExtSerializer* create() \ { \ return NVBLAST_NEW(_classname) (); \ } \ virtual void release() override \ { \ NVBLAST_DELETE(this, _classname); \ } namespace Nv { namespace Blast { /** Serializer internal interface */ class ExtSerializer { public: virtual ~ExtSerializer() {} /** return the name of this serializer. */ virtual const char* getName() const = 0; /** return a description of this serializer. */ virtual const char* getDescription() const = 0; /** return an identifier for the type of object handled. */ virtual uint32_t getObjectTypeID() const = 0; /** return an identifier for serialization format. */ virtual uint32_t getEncodingID() const = 0; /** Whether or not this serializer supports writing. Legacy formats, for example, may not. \return true iff this serialization does not support writing. */ virtual bool isReadOnly() const { return false; } /** Deserialize from a buffer into a newly allocated object. \param[in] buffer Pointer to the buffer to read. \param[in] size Size of the buffer to read. \return object pointer; returns null if failed to deserialize. */ virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) = 0; /** Serialize into a buffer. Allocates the buffer internally using the ExtSerialization::BufferProvider callack interface. \param[out] buffer Pointer to the buffer created. \param[in] bufferProvider The buffer provider callback interface to use. \param[in] object Object pointer. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) = 0; /** Release the serializer and free associated memory. */ virtual void release() = 0; }; /** Internal serialization manager interface */ class ExtSerializationInternal : public ExtSerialization { public: /** Internal interfaces to register and unregister a serializer, used by modules to automatically register all of their serializers with a serialization manager. */ virtual bool registerSerializer(ExtSerializer& serializer) = 0; virtual bool unregisterSerializer(ExtSerializer& serializer) = 0; /** Find a registered serializer for the given object type and encoding. \param[in] objectTypeID ID for the requested object type. \param[in] encodingID ID for the requested encoding (see EncodingID). \return a registered serializer if found, NULL otherwise. */ virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) = 0; //// Enums //// enum { HeaderSize = 128 }; }; template<typename Factory, size_t N> size_t ExtSerializationLoadSet(Nv::Blast::ExtSerializationInternal& serialization, Factory(&factories)[N]) { size_t count = 0; for (auto f : factories) { Nv::Blast::ExtSerializer* serializer = f(); if (serializer != nullptr) { if (serialization.registerSerializer(*serializer)) { ++count; } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to register serailizer:"); NVBLAST_LOG_ERROR(serializer->getName()); serializer->release(); } } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to create serailizer."); } } return count; } class ExtIStream { public: enum Flags { LittleEndian = (1 << 0), Fail = (1 << 1) }; ExtIStream(const void* buffer, size_t size) : m_buf(reinterpret_cast<const char*>(buffer)), m_flags(0) { m_cur = m_buf; m_end = m_buf + size; const uint16_t x = LittleEndian; m_flags = *reinterpret_cast<const char*>(&x); } bool advance(ptrdiff_t diff) { m_cur += diff; if (m_cur < m_buf) { m_cur = m_buf; m_flags |= Fail; return false; } else if (m_cur > m_end) { m_cur = m_end; m_flags |= Fail; return false; } return true; } const void* view() { return m_cur; } bool read(void* buffer, size_t size) { if (!canRead(size)) return false; std::memcpy(buffer, m_cur, size); m_cur += size; return true; } size_t tellg() const { return m_cur - m_buf; } size_t left() const { return m_end - m_cur; } bool eof() const { return m_cur >= m_end; } bool fail() const { return (m_flags & Fail) != 0; } private: const char* m_buf; const char* m_cur; const char* m_end; uint32_t m_flags; bool isLittleEndian() const { return (m_flags & LittleEndian) != 0; } bool canRead(size_t size) const { return m_cur + size <= m_end; } template<typename T> friend ExtIStream& operator >> (ExtIStream& s, T& x); }; template<typename T> NV_INLINE ExtIStream& operator >> (ExtIStream& s, T& x) { if (s.canRead(sizeof(T))) { if (s.isLittleEndian()) { x = *reinterpret_cast<const T*>(s.m_cur); s.m_cur += sizeof(T); } else { char* b = reinterpret_cast<char*>(&x) + sizeof(T); for (size_t n = sizeof(T); n--;) *--b = *s.m_cur++; } } else { s.m_flags |= ExtIStream::Fail; } return s; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtOutputStream.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "kj/io.h" #include <ostream> namespace Nv { namespace Blast { class ExtOutputStream : public kj::OutputStream { public: ExtOutputStream() = delete; ExtOutputStream(std::ostream &outputStream); virtual void write(const void* buffer, size_t size) override; private: std::ostream &m_outputStream; }; } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastTkFramework.h" #include "NvBlastTkAsset.h" #include "NvBlast.h" namespace Nv { namespace Blast { // Legacy IDs struct ExtTkSerializationLegacyID { enum Enum { Framework = NVBLAST_FOURCC('T', 'K', 'F', 'W'), //!< TkFramework identifier token, used in serialization Asset = NVBLAST_FOURCC('A', 'S', 'S', 'T'), //!< TkAsset identifier token, used in serialization Family = NVBLAST_FOURCC('A', 'C', 'T', 'F'), //!< TkFamily identifier token, used in serialization }; }; // Legacy object format versions struct ExtTkSerializationLegacyAssetVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; struct ExtTkSerializationLegacyFamilyVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; static bool deserializeTkObjectHeader(uint32_t& legacyTypeID, uint32_t& legacyVersion, NvBlastID& objID, uint64_t& userIntData, ExtIStream& stream) { // Read framework ID uint32_t fwkID = 0; // Initialize to silence some compilers stream >> fwkID; if (fwkID != ExtTkSerializationLegacyID::Framework) { NVBLAST_LOG_ERROR("deserializeTkObjectHeader: stream does not contain a BlastTk legacy object."); return false; } // Read object class ID stream >> legacyTypeID; // Read object class version and ensure it's current stream >> legacyVersion; // Object ID stream.read(objID.data, sizeof(NvBlastID)); // Serializable user data uint32_t lsd, msd; stream >> lsd >> msd; userIntData = static_cast<uint64_t>(msd) << 32 | static_cast<uint64_t>(lsd); return !stream.fail(); } TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework) { // Deserializer header uint32_t legacyTypeID; uint32_t legacyVersion; NvBlastID objID; uint64_t userIntData; if (!deserializeTkObjectHeader(legacyTypeID, legacyVersion, objID, userIntData, stream)) { return nullptr; } if (legacyTypeID != ExtTkSerializationLegacyID::Asset) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream does not contain a BlastTk legacy asset."); return nullptr; } if (legacyVersion > ExtTkSerializationLegacyAssetVersion::Current) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream contains a BlastTk legacy asset which is in an unknown version."); return nullptr; } // LL asset uint32_t assetSize; stream >> assetSize; NvBlastAsset* llAsset = static_cast<NvBlastAsset*>(NVBLAST_ALLOC_NAMED(assetSize, "deserializeTkAsset")); stream.read(reinterpret_cast<char*>(llAsset), assetSize); // Joint descs uint32_t jointDescCount; stream >> jointDescCount; std::vector<TkAssetJointDesc> jointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescs.size(); ++i) { TkAssetJointDesc& jointDesc = jointDescs[i]; stream >> jointDesc.nodeIndices[0]; stream >> jointDesc.nodeIndices[1]; stream >> jointDesc.attachPositions[0].x; stream >> jointDesc.attachPositions[0].y; stream >> jointDesc.attachPositions[0].z; stream >> jointDesc.attachPositions[1].x; stream >> jointDesc.attachPositions[1].y; stream >> jointDesc.attachPositions[1].z; } if (stream.fail()) { NVBLAST_FREE(llAsset); return nullptr; } TkAsset* asset = framework.createAsset(llAsset, jointDescs.data(), (uint32_t)jointDescs.size(), true); NvBlastID zeroID; memset(zeroID.data, 0, sizeof(zeroID)); if (!memcmp(zeroID.data, objID.data, sizeof(NvBlastID))) { asset->setID(objID); } asset->userIntData = userIntData; return asset; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "capnp/serialize.h" #include "NvBlastExtInputStream.h" #include "NvBlastExtOutputStream.h" #include "NvBlastArray.h" #include "NvBlastExtSerialization.h" namespace Nv { namespace Blast { template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> class ExtSerializationCAPN { public: static TObject* deserializeFromBuffer(const unsigned char* input, uint64_t size); static TObject* deserializeFromStream(std::istream& inputStream); static uint64_t serializationBufferSize(const TObject* object); static bool serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize); static bool serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider = nullptr, uint64_t offset = 0); static bool serializeIntoStream(const TObject* object, std::ostream& outputStream); private: // Specialized static bool serializeIntoBuilder(TSerializationBuilder& objectBuilder, const TObject* object); static bool serializeIntoMessage(capnp::MallocMessageBuilder& message, const TObject* object); static TObject* deserializeFromStreamReader(capnp::InputStreamMessageReader& message); }; template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromBuffer(const unsigned char* input, uint64_t size) { kj::ArrayPtr<const unsigned char> source(input, size); kj::ArrayInputStream inputStream(source); Nv::Blast::Array<uint64_t>::type scratch(static_cast<uint32_t>(size)); kj::ArrayPtr<capnp::word> scratchArray((capnp::word*) scratch.begin(), size); capnp::InputStreamMessageReader message(inputStream, capnp::ReaderOptions(), scratchArray); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromStream(std::istream& inputStream) { ExtInputStream readStream(inputStream); capnp::InputStreamMessageReader message(readStream); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> uint64_t ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializationBufferSize(const TObject* object) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return 0; } return computeSerializedSizeInWords(message) * sizeof(uint64_t); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { usedSize = 0; return false; } uint64_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); if (maxSize < messageSize) { NVBLAST_LOG_ERROR("When attempting to serialize into an existing buffer, the provided buffer was too small."); usedSize = 0; return false; } kj::ArrayPtr<unsigned char> outputBuffer(buffer, maxSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); usedSize = messageSize; return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider, uint64_t offset) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { buffer = nullptr; size = 0; return false; } const uint64_t blockSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); size = blockSize + offset; buffer = static_cast<unsigned char *>(bufferProvider != nullptr ? bufferProvider->requestBuffer(size) : NVBLAST_ALLOC(size)); kj::ArrayPtr<unsigned char> outputBuffer(buffer + offset, blockSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoStream(const TObject* object, std::ostream& outputStream) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return false; } ExtOutputStream blastOutputStream(outputStream); writeMessage(blastOutputStream, message); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerialization.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastExtSerializationInternal.h" namespace Nv { namespace Blast { class ExtSerializationImpl : public ExtSerializationInternal { public: // Default buffer provider class AllocBufferProvider : public ExtSerialization::BufferProvider { public: virtual void* requestBuffer(size_t size) override; }; ExtSerializationImpl(); ~ExtSerializationImpl(); // ExtSerialization interface begin virtual bool setSerializationEncoding(uint32_t encodingID) override; virtual uint32_t getSerializationEncoding() const override; virtual void setBufferProvider(BufferProvider* bufferProvider) override; virtual bool peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) override; virtual const void* skipObject(uint64_t& bufferSize, const void* buffer) override; virtual void* deserializeFromBuffer(const void* buffer, uint64_t size, uint32_t* objectTypeIDPtr = nullptr) override; virtual uint64_t serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) override; virtual void release() override; // ExtSerialization interface end // ExtSerializationInternal interface begin virtual bool registerSerializer(ExtSerializer& serializer) override; virtual bool unregisterSerializer(ExtSerializer& serializer) override; virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) override; // ExtSerializationInternal interface end private: char* writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const; const char* readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const; //// Static data //// static const char* s_identifier; static const char* s_version; static AllocBufferProvider s_defaultBufferProvider; //// Member data //// HashMap<uint64_t, ExtSerializer*>::type m_serializers; uint32_t m_serializationEncoding; BufferProvider* m_bufferProvider; }; //////// ExtSerializationImpl static member variables //////// /** Module identifying header. This should never change. */ const char* ExtSerializationImpl::s_identifier = "NVidia(r) GameWorks Blast(tm) v."; const char* ExtSerializationImpl::s_version = "1"; ExtSerializationImpl::AllocBufferProvider ExtSerializationImpl::s_defaultBufferProvider; //////// Local utility functions //////// static NV_INLINE uint64_t generateKey(uint32_t objectTypeID, uint32_t encodingID) { return static_cast<uint64_t>(encodingID) << 32 | static_cast<uint64_t>(objectTypeID); } static NV_INLINE uint64_t generateKey(const ExtSerializer& serializer) { return generateKey(serializer.getObjectTypeID(), serializer.getEncodingID()); } static NV_INLINE void writeIDToBuffer(char* buffer, uint32_t id) { for (int i = 0; i < 4; ++i, id >>= 8) { *buffer++ = static_cast<char>(id & 0xFF); } } static NV_INLINE uint32_t readIDFromBuffer(const char* buffer) { return NVBLAST_FOURCC(buffer[0], buffer[1], buffer[2], buffer[3]); } static NV_INLINE void writeU64InHexToBuffer(char* buffer, uint64_t val) { for (char* curr = buffer + 16; curr-- > buffer; val >>= 4) { *curr = "0123456789ABCDEF"[val & 0xF]; } } static NV_INLINE uint64_t readU64InHexFromBuffer(const char* buffer) { uint64_t val = 0; for (const char* curr = buffer; curr < buffer + 16; ++curr) { const char c = *curr; const char msn = c >> 4; const char mask = ((88 >> msn) & 1) - 1; const unsigned char digit = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xA\xB\xC\xD\xE\xF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"[((msn - 3) & 1) << 4 | (c & 0xF)] | mask; if (digit == 0xFF) { return 0; // Not a hexidecimal digit } val = val << 4 | digit; } return val; } //////// ExtSerialization member functions //////// ExtSerializationImpl::ExtSerializationImpl() : m_serializationEncoding(EncodingID::CapnProtoBinary), m_bufferProvider(&s_defaultBufferProvider) { } ExtSerializationImpl::~ExtSerializationImpl() { // Release and remove all registered serializers Array<ExtSerializer*>::type registeredSerializers; registeredSerializers.reserve(m_serializers.size()); for (auto it = m_serializers.getIterator(); !it.done(); ++it) { registeredSerializers.pushBack(it->second); } m_serializers.clear(); for (uint32_t i = 0; i < registeredSerializers.size(); ++i) { registeredSerializers[i]->release(); } } char* ExtSerializationImpl::writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const { if (bufferSize < HeaderSize) { return nullptr; } char* stop = buffer + HeaderSize; size_t versionLen = strlen(s_version); if (versionLen > 63) { versionLen = 63; } memset(buffer, ' ', HeaderSize); memcpy(buffer, s_identifier, 32); buffer += 32; memcpy(buffer, s_version, versionLen); buffer += 64; writeIDToBuffer(buffer, objectTypeID); buffer += 5; writeIDToBuffer(buffer, encodingID); buffer += 5; writeU64InHexToBuffer(buffer, dataSize); buffer += 16; *(stop - 1) = '\n'; return stop; } const char* ExtSerializationImpl::readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const { if (bufferSize < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: header terminator not found."); return nullptr; } const char* stop = buffer + HeaderSize; if (memcmp(buffer, s_identifier, 32)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file identifier does not match expected value."); return nullptr; } buffer += 32; const char* s = strchr(buffer, ' '); if (s == nullptr) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file format error reading serializer library version."); } if (memcmp(buffer, s_version, s - buffer)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file version does not match serializer library version."); return nullptr; } buffer += 64; if (objectTypeID != nullptr) { *objectTypeID = readIDFromBuffer(buffer); } buffer += 5; if (encodingID != nullptr) { *encodingID = readIDFromBuffer(buffer); } buffer += 5; if (dataSize != nullptr) { *dataSize = readU64InHexFromBuffer(buffer); } buffer += 16; return stop; } bool ExtSerializationImpl::registerSerializer(ExtSerializer& serializer) { return m_serializers.insert(generateKey(serializer), &serializer); } bool ExtSerializationImpl::unregisterSerializer(ExtSerializer& serializer) { const uint64_t key = generateKey(serializer); const auto entry = m_serializers.find(key); if (entry == nullptr) { return false; } entry->second->release(); return m_serializers.erase(key); } ExtSerializer* ExtSerializationImpl::findSerializer(uint32_t objectTypeID, uint32_t encodingID) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); return entry != nullptr ? entry->second : nullptr; } bool ExtSerializationImpl::setSerializationEncoding(uint32_t encodingID) { m_serializationEncoding = encodingID; return true; } uint32_t ExtSerializationImpl::getSerializationEncoding() const { return m_serializationEncoding; } void ExtSerializationImpl::setBufferProvider(BufferProvider* bufferProvider) { m_bufferProvider = bufferProvider != nullptr ? bufferProvider : &s_defaultBufferProvider; } bool ExtSerializationImpl::peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) { return nullptr != readHeaderFromBuffer(objectTypeID, encodingID, dataSize, reinterpret_cast<const char*>(buffer), bufferSize); } const void* ExtSerializationImpl::skipObject(uint64_t& bufferSize, const void* buffer) { uint64_t dataSize; const char* next = readHeaderFromBuffer(nullptr, nullptr, &dataSize, static_cast<const char*>(buffer), bufferSize); if (next == nullptr) { return nullptr; } next += dataSize; const uint64_t skipSize = next - static_cast<const char*>(buffer); NVBLAST_CHECK_ERROR(skipSize <= bufferSize, "Object size in buffer is too large for given buffer size.", return nullptr); bufferSize -= skipSize; return next; } void* ExtSerializationImpl::deserializeFromBuffer(const void* buffer, uint64_t bufferSize, uint32_t* objectTypeIDPtr) { uint32_t objectTypeID; uint32_t encodingID; uint64_t dataSize; void* result = nullptr; buffer = readHeaderFromBuffer(&objectTypeID, &encodingID, &dataSize, reinterpret_cast<const char*>(buffer), bufferSize); if (buffer != nullptr) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); if (entry != nullptr && entry->second != nullptr) { result = entry->second->deserializeFromBuffer(buffer, dataSize); } } if (objectTypeIDPtr != nullptr) { *objectTypeIDPtr = result != nullptr ? objectTypeID : 0; } return result; } uint64_t ExtSerializationImpl::serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) { if (!m_serializationEncoding) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: no serialization encoding has been set."); return false; // No encoding available } auto entry = m_serializers.find(generateKey(objectTypeID, m_serializationEncoding)); if (entry == nullptr || entry->second == nullptr) { return false; } const uint64_t size = entry->second->serializeIntoBuffer(buffer, *m_bufferProvider, object, HeaderSize); if (size < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: failed to write data to buffer."); return 0; } writeHeaderIntoBuffer(reinterpret_cast<char*>(buffer), HeaderSize, objectTypeID, m_serializationEncoding, size - HeaderSize); return size; } void ExtSerializationImpl::release() { NVBLAST_DELETE(this, ExtSerializationImpl); } //////// ExtSerializationImpl::AllocBufferProvider member functions //////// void* ExtSerializationImpl::AllocBufferProvider::requestBuffer(size_t size) { return NVBLAST_ALLOC(size); } } // namespace Blast } // namespace Nv Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate() { Nv::Blast::ExtSerializationImpl* serialization = NVBLAST_NEW(Nv::Blast::ExtSerializationImpl) (); // Automatically load LL serializers NvBlastExtLlSerializerLoadSet(*serialization); return serialization; }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtInputStream.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "kj/io.h" #include <istream> namespace Nv { namespace Blast { class ExtInputStream : public kj::InputStream { public: ExtInputStream() = delete; ExtInputStream(std::istream &inputStream); // Returns a read of maxBytes. This is supposed to be happy doing partial reads, but currently isn't. virtual size_t tryRead(void* buffer, size_t minBytes, size_t maxBytes) override; private: std::istream &m_inputStream; }; } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerializerCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerializationCAPN.h" #include "NvBlastAsset.h" #include "NvBlastFamily.h" #include "AssetDTO.h" #include "FamilyDTO.h" /** Specializations of ExtSerializationCAPN for Blast LL */ namespace Nv { namespace Blast { //// Nv::Blast::Asset //// template<> NV_INLINE bool ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuilder(Serialization::Asset::Builder& assetBuilder, const Asset* asset) { return AssetDTO::serialize(assetBuilder, asset); } template<> NV_INLINE bool ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const Asset* asset) { Serialization::Asset::Builder assetBuilder = message.initRoot<Serialization::Asset>(); return serializeIntoBuilder(assetBuilder, asset); } template<> NV_INLINE Asset* ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::Asset::Reader reader = message.getRoot<Serialization::Asset>(); return AssetDTO::deserialize(reader); } //// Nv::Blast::FamilyHeader //// template<> NV_INLINE bool ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuilder(Serialization::Family::Builder& familyBuilder, const FamilyHeader* family) { return FamilyDTO::serialize(familyBuilder, family); } template<> NV_INLINE bool ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const FamilyHeader* family) { Serialization::Family::Builder familyBuilder = message.initRoot<Serialization::Family>(); return serializeIntoBuilder(familyBuilder, family); } template<> NV_INLINE FamilyHeader* ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::Family::Reader reader = message.getRoot<Serialization::Family>(); return FamilyDTO::deserialize(reader); } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerializationCAPN.h" #include "NvBlastTkAsset.h" #include "TkAssetDTO.h" /** Specializations of ExtSerializationCAPN for BlastTk */ namespace Nv { namespace Blast { //// Nv::Blast::TkAsset //// template<> NV_INLINE bool ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuilder(Serialization::TkAsset::Builder& assetBuilder, const TkAsset* asset) { return TkAssetDTO::serialize(assetBuilder, asset); } template<> NV_INLINE TkAsset* ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::TkAsset::Reader reader = message.getRoot<Serialization::TkAsset>(); return TkAssetDTO::deserialize(reader); } template<> NV_INLINE bool ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const TkAsset* asset) { Serialization::TkAsset::Builder assetBuilder = message.initRoot<Serialization::TkAsset>(); return serializeIntoBuilder(assetBuilder, asset); } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtTkSerialization.h" #include "NvBlastExtTkSerializerCAPN.h" #include "NvBlastExtTkSerializerRAW.h" namespace Nv { namespace Blast { TkFramework* sExtTkSerializerFramework = nullptr; class ExtTkSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_CPNB", "Blast high-level asset (Nv::Blast::TkAsset) serialization using Cap'n Proto binary format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtTkSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuffer(reinterpret_cast<const TkAsset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExTkSerializerAsset_RAW : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_RAW", "Blast high-level asset (Nv::Blast::TkAsset) serialization using raw memory format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExTkSerializerAsset_RAW); ExtSerializerReadOnly(ExTkSerializerAsset_RAW); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { ExtIStream stream(buffer, size); return deserializeTkAsset(stream, *sExtTkSerializerFramework); } }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization) { Nv::Blast::sExtTkSerializerFramework = &framework; Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtTkSerializerAsset_CPNB::create, Nv::Blast::ExTkSerializerAsset_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::TkObjectTypeID::Asset); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once /** Raw serialization function declarations for BlastTk */ #include <stdint.h> namespace Nv { namespace Blast { // Forward declarations class TkAsset; class TkFramework; class ExtIStream; //// Nv::Blast::TkAsset //// TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework); } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtOutputStream.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtOutputStream.h" Nv::Blast::ExtOutputStream::ExtOutputStream(std::ostream &outputStream): m_outputStream(outputStream) { } void Nv::Blast::ExtOutputStream::write(const void* buffer, size_t size) { m_outputStream.write((char *) buffer, size); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastExtLlSerializerCAPN.h" namespace Nv { namespace Blast { class ExtLlSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLAsset_CPNB", "Blast low-level asset (NvBlastAsset) serialization using Cap'n Proto binary format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuffer(reinterpret_cast<const Asset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerFamily_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLFamily_CPNB", "Blast low-level family (NvBlastFamily) serialization using Cap'n Proto binary format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuffer(reinterpret_cast<const FamilyHeader*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerObject_RAW : public ExtSerializer { public: virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(buffer); if (static_cast<uint64_t>(block->size) > size) { return nullptr; } void* llobject = NVBLAST_ALLOC(block->size); return memcpy(llobject, block, block->size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(object); const uint64_t size = block->size + offset; buffer = bufferProvider.requestBuffer(size); if (buffer == nullptr) { return 0; } memcpy(static_cast<char*>(buffer) + offset, object, block->size); return size; } }; class ExtLlSerializerAsset_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLAsset_RAW", "Blast low-level asset (NvBlastAsset) serialization using raw memory format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_RAW); }; class ExtLlSerializerFamily_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLFamily_RAW", "Blast low-level family (NvBlastFamily) serialization using raw memory format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_RAW); }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization) { Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtLlSerializerAsset_CPNB::create, Nv::Blast::ExtLlSerializerAsset_RAW::create, Nv::Blast::ExtLlSerializerFamily_CPNB::create, Nv::Blast::ExtLlSerializerFamily_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::LlObjectTypeID::Asset); } uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family) { return serialization.serializeIntoBuffer(buffer, family, Nv::Blast::LlObjectTypeID::Family); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(NvBlastBond, NvBlastBond, Nv::Blast::Serialization::NvBlastBond)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvVec3DTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtTkSerialization-capn.h" #include "NvVec3.h" DTO_CLASS(NvVec3, nvidia::NvVec3, Nv::Blast::Serialization::NvVec3)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "ActorDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" namespace Nv { namespace Blast { bool ActorDTO::serialize(Nv::Blast::Serialization::Actor::Builder builder, const Nv::Blast::Actor* poco) { builder.setFamilyOffset(poco->getFamilyOffset()); builder.setFirstVisibleChunkIndex(poco->getFirstVisibleChunkIndex()); builder.setVisibleChunkCount(poco->getVisibleChunkCount()); builder.setFirstGraphNodeIndex(poco->getFirstGraphNodeIndex()); builder.setGraphNodeCount(poco->getGraphNodeCount()); builder.setLeafChunkCount(poco->getLeafChunkCount()); return true; } Nv::Blast::Actor* ActorDTO::deserialize(Nv::Blast::Serialization::Actor::Reader reader) { NV_UNUSED(reader); return nullptr; } bool ActorDTO::deserializeInto(Nv::Blast::Serialization::Actor::Reader reader, Nv::Blast::Actor* poco) { poco->setFamilyOffset(reader.getFamilyOffset()); poco->setFirstVisibleChunkIndex(reader.getFirstVisibleChunkIndex()); poco->setVisibleChunkCount(reader.getVisibleChunkCount()); poco->setFirstGraphNodeIndex(reader.getFirstGraphNodeIndex()); poco->setGraphNodeCount(reader.getGraphNodeCount()); poco->setLeafChunkCount(reader.getLeafChunkCount()); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxTransformDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxTransform.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxTransform, physx::PxTransform, Nv::Blast::Serialization::PxTransform)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxMeshScaleDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxMeshScaleDTO.h" #include "PxVec3DTO.h" #include "PxQuatDTO.h" namespace Nv { namespace Blast { bool PxMeshScaleDTO::serialize(Nv::Blast::Serialization::PxMeshScale::Builder builder, const physx::PxMeshScale * poco) { PxVec3DTO::serialize(builder.getScale(), &poco->scale); PxQuatDTO::serialize(builder.getRotation(), &poco->rotation); return true; } physx::PxMeshScale* PxMeshScaleDTO::deserialize(Nv::Blast::Serialization::PxMeshScale::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxMeshScaleDTO::deserializeInto(Nv::Blast::Serialization::PxMeshScale::Reader reader, physx::PxMeshScale * poco) { PxVec3DTO::deserializeInto(reader.getScale(), &poco->scale); PxQuatDTO::deserializeInto(reader.getRotation(), &poco->rotation); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxConvexMeshGeometryDTO.h" #include "PxMeshScaleDTO.h" #include "NvBlastAssert.h" #include "NvBlastExtKJPxInputStream.h" #include "NvBlastExtKJPxOutputStream.h" #include "PxConvexMeshDesc.h" #include "NvBlastExtSerialization.h" #include "PxVec3.h" #include <algorithm> #include <vector> #include "PxPhysics.h" #include "NvBlastPxCallbacks.h" #include "PxDefaultStreams.h" namespace Nv { namespace Blast { extern physx::PxPhysics* sExtPxSerializerPhysics; extern physx::PxCooking* sExtPxSerializerCooking; bool PxConvexMeshGeometryDTO::serialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Builder builder, const physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); PxMeshScaleDTO::serialize(builder.getScale(), &poco->scale); //TODO: Use cooking.cookConvexMesh to cook the mesh to a stream - then get that backing buffer and put it into the Data field physx::PxConvexMeshDesc desc; desc.points.data = poco->convexMesh->getVertices(); desc.points.count = poco->convexMesh->getNbVertices(); desc.points.stride = sizeof(physx::PxVec3); std::vector<uint32_t> indicesScratch; std::vector<physx::PxHullPolygon> hullPolygonsScratch; hullPolygonsScratch.resize(poco->convexMesh->getNbPolygons()); uint32_t indexCount = 0; for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); if (polygon.mNbVerts) { indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts); } } indicesScratch.resize(indexCount); for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); for (uint32_t j = 0; j < polygon.mNbVerts; j++) { indicesScratch[polygon.mIndexBase + j] = poco->convexMesh->getIndexBuffer()[polygon.mIndexBase + j]; } hullPolygonsScratch[i] = polygon; } desc.indices.count = indexCount; desc.indices.data = indicesScratch.data(); desc.indices.stride = sizeof(uint32_t); desc.polygons.count = poco->convexMesh->getNbPolygons(); desc.polygons.data = hullPolygonsScratch.data(); desc.polygons.stride = sizeof(physx::PxHullPolygon); physx::PxDefaultMemoryOutputStream outStream(NvBlastGetPxAllocatorCallback()); if (!sExtPxSerializerCooking->cookConvexMesh(desc, outStream)) { return false; } kj::ArrayPtr<unsigned char> cookedBuffer(outStream.getData(), outStream.getSize()); builder.setConvexMesh(cookedBuffer); return true; } physx::PxConvexMeshGeometry* PxConvexMeshGeometryDTO::deserialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); NV_UNUSED(reader); return nullptr; } bool PxConvexMeshGeometryDTO::deserializeInto(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader, physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerPhysics != nullptr); PxMeshScaleDTO::deserializeInto(reader.getScale(), &poco->scale); Nv::Blast::ExtKJPxInputStream inputStream(reader.getConvexMesh()); //NOTE: Naive approach, no shared convex hulls poco->convexMesh = sExtPxSerializerPhysics->createConvexMesh(inputStream); return poco->convexMesh != nullptr; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetJointDescDTO.h" #include "NvVec3DTO.h" namespace Nv { namespace Blast { bool TkAssetJointDescDTO::serialize(Nv::Blast::Serialization::TkAssetJointDesc::Builder builder, const Nv::Blast::TkAssetJointDesc * poco) { kj::ArrayPtr<const uint32_t> nodeIndices(poco->nodeIndices, 2); builder.setNodeIndices(nodeIndices); builder.initAttachPositions(2); for (int i = 0; i < 2; i++) { NvVec3DTO::serialize(builder.getAttachPositions()[i], &poco->attachPositions[i]); } return true; } Nv::Blast::TkAssetJointDesc* TkAssetJointDescDTO::deserialize(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader) { //TODO: Allocate with ExtContent and return NV_UNUSED(reader); return nullptr; } bool TkAssetJointDescDTO::deserializeInto(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader, Nv::Blast::TkAssetJointDesc * poco) { auto readerAttachPositions = reader.getAttachPositions(); NvVec3DTO::deserializeInto(readerAttachPositions[0], &poco->attachPositions[0]); NvVec3DTO::deserializeInto(readerAttachPositions[1], &poco->attachPositions[1]); auto readerNodeIndices = reader.getNodeIndices(); poco->nodeIndices[0] = readerNodeIndices[0]; poco->nodeIndices[1] = readerNodeIndices[1]; return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTkAsset.h" #include "NvBlastExtTkSerialization-capn.h" DTO_CLASS(TkAsset, Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastAsset.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Asset, Nv::Blast::Asset, Nv::Blast::Serialization::Asset)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxQuatDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxQuat.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxQuat, physx::PxQuat, Nv::Blast::Serialization::PxQuat)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "AssetDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include "NvBlastAsset.h" namespace Nv { namespace Blast { bool AssetDTO::serialize(Nv::Blast::Serialization::Asset::Builder builder, const Nv::Blast::Asset * poco) { NvBlastIDDTO::serialize(builder.initID(), &poco->m_ID); builder.setLeafChunkCount(poco->m_leafChunkCount); builder.setFirstSubsupportChunkIndex(poco->m_firstSubsupportChunkIndex); capnp::List<Nv::Blast::Serialization::NvBlastChunk>::Builder chunks = builder.initChunks(poco->m_chunkCount); builder.setChunkCount(poco->m_chunkCount); NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { NvBlastChunk& chunk = poco->getChunks()[i]; NvBlastChunkDTO::serialize(chunks[i], &chunk); } NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); capnp::List<Nv::Blast::Serialization::NvBlastBond>::Builder bonds = builder.initBonds(poco->m_bondCount); builder.setBondCount(poco->m_bondCount); for (uint32_t i = 0; i < poco->m_bondCount; i++) { NvBlastBond& bond = poco->getBonds()[i]; NvBlastBondDTO::serialize(bonds[i], &bond); } kj::ArrayPtr<uint32_t> stlcArray(poco->getSubtreeLeafChunkCounts(), poco->m_chunkCount); builder.initSubtreeLeafChunkCounts(poco->m_chunkCount); builder.setSubtreeLeafChunkCounts(stlcArray); kj::ArrayPtr<uint32_t> ctgnArray(poco->getChunkToGraphNodeMap(), poco->m_chunkCount); builder.setChunkToGraphNodeMap(ctgnArray); Nv::Blast::Serialization::NvBlastSupportGraph::Builder graphBulder = builder.initGraph(); graphBulder.setNodeCount(poco->m_graph.m_nodeCount); uint32_t* ciPtr = poco->m_graph.getChunkIndices(); kj::ArrayPtr<const uint32_t> ciArray(ciPtr, poco->m_graph.m_nodeCount); graphBulder.setChunkIndices(ciArray); kj::ArrayPtr<const uint32_t> adjPart(poco->m_graph.getAdjacencyPartition(), poco->m_graph.m_nodeCount + 1); graphBulder.setAdjacencyPartition(adjPart); NVBLAST_ASSERT(graphBulder.getAdjacencyPartition().size() == poco->m_graph.m_nodeCount + 1); kj::ArrayPtr<const uint32_t> nodeIndices(poco->m_graph.getAdjacentNodeIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentNodeIndices(nodeIndices); NVBLAST_ASSERT(graphBulder.getAdjacentNodeIndices().size() == poco->m_bondCount * 2); kj::ArrayPtr<const uint32_t> bondIndices(poco->m_graph.getAdjacentBondIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentBondIndices(bondIndices); return true; } Nv::Blast::Asset* AssetDTO::deserialize(Nv::Blast::Serialization::Asset::Reader reader) { NvBlastAssetMemSizeData sizeData; sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getGraph().getNodeCount(); sizeData.bondCount = reader.getBondCount(); const uint32_t leafChunkCount = reader.getLeafChunkCount(); const uint32_t firstSubsupportChunkIndex = reader.getFirstSubsupportChunkIndex(); const size_t assetSize = NvBlastGetAssetMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(assetSize); auto asset = Nv::Blast::initializeAsset(mem, sizeData.chunkCount, sizeData.nodeCount, leafChunkCount, firstSubsupportChunkIndex, sizeData.bondCount, logLL); if (deserializeInto(reader, asset)) return asset; // free the memory so it doesn't leak NVBLAST_FREE(asset); return nullptr; } bool AssetDTO::deserializeInto(Nv::Blast::Serialization::Asset::Reader reader, Nv::Blast::Asset * poco) { NvBlastIDDTO::deserializeInto(reader.getID(), &poco->m_ID); NvBlastBond* bonds = poco->getBonds(); uint32_t bondCount = reader.getBondCount(); auto readerBonds = reader.getBonds(); for (uint32_t i = 0; i < bondCount; i++) { auto bondReader = readerBonds[i]; NvBlastBondDTO::deserializeInto(bondReader, &bonds[i]); } NvBlastChunk* chunks = poco->getChunks(); uint32_t chunkCount = reader.getChunkCount(); auto readerChunks = reader.getChunks(); for (uint32_t i = 0; i < chunkCount; i++) { auto chunkReader = readerChunks[i]; NvBlastChunkDTO::deserializeInto(chunkReader, &chunks[i]); } poco->m_graph.m_nodeCount = reader.getGraph().getNodeCount(); NVBLAST_ASSERT(reader.getSubtreeLeafChunkCounts().size() == poco->m_chunkCount); auto readerSubtreeLeafChunkCounts = reader.getSubtreeLeafChunkCounts(); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { poco->getSubtreeLeafChunkCounts()[i] = readerSubtreeLeafChunkCounts[i]; } auto readerChunkToGraphNodeMap = reader.getChunkToGraphNodeMap(); for (uint32_t i = 0; i < chunkCount; i++) { poco->getChunkToGraphNodeMap()[i] = readerChunkToGraphNodeMap[i]; } uint32_t* ciPtr = poco->m_graph.getChunkIndices(); NVBLAST_ASSERT(reader.getGraph().getChunkIndices().size() == poco->m_graph.m_nodeCount); auto readerGraphChunkIndices = reader.getGraph().getChunkIndices(); for (uint32_t i = 0; i < poco->m_graph.m_nodeCount; i++) { ciPtr[i] = readerGraphChunkIndices[i]; } uint32_t* adjPartition = poco->m_graph.getAdjacencyPartition(); const uint32_t graphAdjacencyPartitionSize = reader.getGraph().getAdjacencyPartition().size(); auto readerGraphAdjacencyPartition = reader.getGraph().getAdjacencyPartition(); for (uint32_t i = 0; i < graphAdjacencyPartitionSize; ++i) { adjPartition[i] = readerGraphAdjacencyPartition[i]; } uint32_t* adjNodes = poco->m_graph.getAdjacentNodeIndices(); const uint32_t graphAdjacentNodeIndicesSize = reader.getGraph().getAdjacentNodeIndices().size(); auto readerGraphAdjacentNodeIndices = reader.getGraph().getAdjacentNodeIndices(); for (uint32_t i = 0; i < graphAdjacentNodeIndicesSize; ++i) { adjNodes[i] = readerGraphAdjacentNodeIndices[i]; } uint32_t* adjBonds = poco->m_graph.getAdjacentBondIndices(); const uint32_t graphAdjacentBondIndicesSize = reader.getGraph().getAdjacentBondIndices().size(); auto readerGraphAdjacentBondIndices = reader.getGraph().getAdjacentBondIndices(); for (uint32_t i = 0; i < graphAdjacentBondIndicesSize; ++i) { adjBonds[i] = readerGraphAdjacentBondIndices[i]; } return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxConvexMeshGeometry.h" #include "PxCooking.h" DTO_CLASS(PxConvexMeshGeometry, physx::PxConvexMeshGeometry, Nv::Blast::Serialization::PxConvexMeshGeometry)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" #include "DTOMacros.h" DTO_CLASS(NvBlastID, NvBlastID, ::Nv::Blast::Serialization::UUID)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastFamily.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Family, Nv::Blast::FamilyHeader, Nv::Blast::Serialization::Family)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(NvBlastChunk, NvBlastChunk, Nv::Blast::Serialization::NvBlastChunk)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/DTOMacros.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #define DTO_CLASS(_NAME, _POCO, _SERIALIZER) \ namespace Nv { \ namespace Blast { \ class _NAME ## DTO \ { \ public: \ \ static bool serialize(_SERIALIZER::Builder builder, const _POCO * poco); \ static _POCO* deserialize(_SERIALIZER::Reader reader); \ static bool deserializeInto(_SERIALIZER::Reader reader, _POCO * poco); \ }; \ } \ }
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxQuatDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxQuatDTO.h" namespace Nv { namespace Blast { bool PxQuatDTO::serialize(Nv::Blast::Serialization::PxQuat::Builder builder, const physx::PxQuat * poco) { builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); builder.setW(poco->w); return true; } physx::PxQuat* PxQuatDTO::deserialize(Nv::Blast::Serialization::PxQuat::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxQuatDTO::deserializeInto(Nv::Blast::Serialization::PxQuat::Reader reader, physx::PxQuat * poco) { poco->x = reader.getX(); poco->y = reader.getY(); poco->z = reader.getZ(); poco->w = reader.getW(); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxTransformDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxTransformDTO.h" #include "PxQuatDTO.h" #include "PxVec3DTO.h" namespace Nv { namespace Blast { bool PxTransformDTO::serialize(Nv::Blast::Serialization::PxTransform::Builder builder, const physx::PxTransform * poco) { PxQuatDTO::serialize(builder.getQ(), &poco->q); PxVec3DTO::serialize(builder.getP(), &poco->p); return true; } physx::PxTransform* PxTransformDTO::deserialize(Nv::Blast::Serialization::PxTransform::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxTransformDTO::deserializeInto(Nv::Blast::Serialization::PxTransform::Reader reader, physx::PxTransform * poco) { PxQuatDTO::deserializeInto(reader.getQ(), &poco->q); PxVec3DTO::deserializeInto(reader.getP(), &poco->p); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastBondDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastBondDTO::serialize(Nv::Blast::Serialization::NvBlastBond::Builder builder, const NvBlastBond * poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> normArray(poco->normal, 3); builder.setNormal(normArray); builder.setArea(poco->area); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setUserData(poco->userData); return true; } NvBlastBond* NvBlastBondDTO::deserialize(Nv::Blast::Serialization::NvBlastBond::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastBondDTO::deserializeInto(Nv::Blast::Serialization::NvBlastBond::Reader reader, NvBlastBond * poco) { poco->area = reader.getArea(); auto readerCentroid = reader.getCentroid(); poco->centroid[0] = readerCentroid[0]; poco->centroid[1] = readerCentroid[1]; poco->centroid[2] = readerCentroid[2]; auto readerNormal = reader.getNormal(); poco->normal[0] = readerNormal[0]; poco->normal[1] = readerNormal[1]; poco->normal[2] = readerNormal[2]; poco->userData = reader.getUserData(); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetDTO.h" #include "AssetDTO.h" #include "TkAssetJointDescDTO.h" #include <vector> #include "NvBlastTkFramework.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { extern TkFramework* sExtTkSerializerFramework; bool TkAssetDTO::serialize(Nv::Blast::Serialization::TkAsset::Builder builder, const Nv::Blast::TkAsset * poco) { const Asset* assetLL = reinterpret_cast<const Nv::Blast::Asset*>(poco->getAssetLL()); Nv::Blast::AssetDTO::serialize(builder.getAssetLL(), assetLL); uint32_t jointDescCount = poco->getJointDescCount(); capnp::List<Nv::Blast::Serialization::TkAssetJointDesc>::Builder jointDescs = builder.initJointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::serialize(jointDescs[i], &poco->getJointDescs()[i]); } return true; } Nv::Blast::TkAsset* TkAssetDTO::deserialize(Nv::Blast::Serialization::TkAsset::Reader reader) { const NvBlastAsset* assetLL = reinterpret_cast<const NvBlastAsset*>(AssetDTO::deserialize(reader.getAssetLL())); std::vector<Nv::Blast::TkAssetJointDesc> jointDescs; const uint32_t jointDescCount = reader.getJointDescs().size(); jointDescs.resize(jointDescCount); auto readerJointDescs = reader.getJointDescs(); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::deserializeInto(readerJointDescs[i], &jointDescs[i]); } // Make sure to set ownsAsset to true - this is serialization and no one else owns it. Nv::Blast::TkAsset* asset = NvBlastTkFrameworkGet()->createAsset(assetLL, jointDescs.data(), jointDescCount, true); return asset; } bool TkAssetDTO::deserializeInto(Nv::Blast::Serialization::TkAsset::Reader reader, Nv::Blast::TkAsset * poco) { NV_UNUSED(reader); poco = nullptr; // NOTE: Because of the way TkAsset is currently structured, this won't work. return false; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxVec3DTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtTkSerialization-capn.h" #include "PxVec3.h" DTO_CLASS(PxVec3, physx::PxVec3, Nv::Blast::Serialization::PxVec3)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvVec3DTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvVec3DTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvVec3DTO::serialize(Nv::Blast::Serialization::NvVec3::Builder builder, const nvidia::NvVec3 * poco) { NVBLAST_ASSERT(poco != nullptr); builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); return true; } nvidia::NvVec3* NvVec3DTO::deserialize(Nv::Blast::Serialization::NvVec3::Reader reader) { //TODO: Allocate using ExtContext and return NV_UNUSED(reader); return nullptr; } bool NvVec3DTO::deserializeInto(Nv::Blast::Serialization::NvVec3::Reader reader, nvidia::NvVec3* target) { target->x = reader.getX(); target->y = reader.getY(); target->z = reader.getZ(); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxVec3DTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxVec3DTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool PxVec3DTO::serialize(Nv::Blast::Serialization::PxVec3::Builder builder, const physx::PxVec3 * poco) { NVBLAST_ASSERT(poco != nullptr); builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); return true; } physx::PxVec3* PxVec3DTO::deserialize(Nv::Blast::Serialization::PxVec3::Reader reader) { //TODO: Allocate using ExtContext and return NV_UNUSED(reader); return nullptr; } bool PxVec3DTO::deserializeInto(Nv::Blast::Serialization::PxVec3::Reader reader, physx::PxVec3* target) { target->x = reader.getX(); target->y = reader.getY(); target->z = reader.getZ(); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastChunkDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastChunkDTO::serialize(Nv::Blast::Serialization::NvBlastChunk::Builder builder, const NvBlastChunk* poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setVolume(poco->volume); builder.setParentChunkIndex(poco->parentChunkIndex); builder.setFirstChildIndex(poco->firstChildIndex); builder.setChildIndexStop(poco->childIndexStop); builder.setUserData(poco->userData); return true; } NvBlastChunk* NvBlastChunkDTO::deserialize(Nv::Blast::Serialization::NvBlastChunk::Reader reader) { //FIXME NV_UNUSED(reader); return nullptr; } bool NvBlastChunkDTO::deserializeInto(Nv::Blast::Serialization::NvBlastChunk::Reader reader, NvBlastChunk* target) { NVBLAST_ASSERT(target != nullptr); auto readerCentroid = reader.getCentroid(); target->centroid[0] = readerCentroid[0]; target->centroid[1] = readerCentroid[1]; target->centroid[2] = readerCentroid[2]; target->childIndexStop = reader.getChildIndexStop(); target->firstChildIndex = reader.getFirstChildIndex(); target->parentChunkIndex = reader.getParentChunkIndex(); target->userData = reader.getUserData(); target->volume = reader.getVolume(); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyDTO.h" #include "ActorDTO.h" #include "AssetDTO.h" #include "FamilyGraphDTO.h" #include "NvBlastFamilyGraph.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include <vector> namespace Nv { namespace Blast { bool FamilyDTO::serialize(Nv::Blast::Serialization::Family::Builder builder, const Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::serialize(builder.initAssetID(), &poco->m_assetID); // cache off the count data from the asset needed to re-create the family post serialization const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(poco->m_asset); builder.setBondCount(sizeData.bondCount); builder.setChunkCount(sizeData.chunkCount); builder.setNodeCount(sizeData.nodeCount); builder.setLowerSupportChunkCount(sizeData.lowerSupportChunkCount); builder.setUpperSupportChunkCount(sizeData.upperSupportChunkCount); // actorCount - these are active builder.setActorCount(poco->m_actorCount); // all possible actors const uint32_t actorCount = poco->getActorsArraySize(); capnp::List<Nv::Blast::Serialization::Actor>::Builder actors = builder.initActors(actorCount); for (uint32_t i = 0; i < actorCount; i++) { Actor& actor = poco->getActors()[i]; ActorDTO::serialize(actors[i], &actor); } // visibleChunkIndexLinks uint32_t* visibleChunkIndexLinks = reinterpret_cast<uint32_t *>(poco->getVisibleChunkIndexLinks()); kj::ArrayPtr<uint32_t> visibleChunkIndexLinksArray(visibleChunkIndexLinks, sizeData.chunkCount * 2); builder.setVisibleChunkIndexLinks(visibleChunkIndexLinksArray); // chunkActorIndices kj::ArrayPtr<uint32_t> chunkActorIndicesArray(poco->getChunkActorIndices(), sizeData.chunkCount); builder.setChunkActorIndices(chunkActorIndicesArray); // graphNodeIndexLinks kj::ArrayPtr<uint32_t> graphNodeIndexLinksArray(poco->getGraphNodeIndexLinks(), sizeData.chunkCount); builder.setGraphNodeIndexLinks(graphNodeIndexLinksArray); // lowerSupportChunkHealths kj::ArrayPtr<float> lowerSupportChunkHealthsArray(poco->getLowerSupportChunkHealths(), sizeData.chunkCount); builder.setLowerSupportChunkHealths(lowerSupportChunkHealthsArray); // graphBondHealths kj::ArrayPtr<float> graphBondHealthsArray(poco->getBondHealths(), sizeData.bondCount); builder.setGraphBondHealths(graphBondHealthsArray); // familyGraph FamilyGraph *graph = poco->getFamilyGraph(); auto builderGraph = builder.initFamilyGraph(); builderGraph.setNodeCount(sizeData.nodeCount); FamilyGraphDTO::serialize(builderGraph, graph); return true; } Nv::Blast::FamilyHeader* FamilyDTO::deserialize(Nv::Blast::Serialization::Family::Reader reader) { // fill in the count info from the reader NvBlastAssetMemSizeData sizeData; sizeData.bondCount = reader.getBondCount(); sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getNodeCount(); sizeData.lowerSupportChunkCount = reader.getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = reader.getUpperSupportChunkCount(); // allocate enough space to hold the family const size_t familySize = NvBlastAssetGetFamilyMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(familySize); // use the count info to initialize the family auto family = reinterpret_cast<Nv::Blast::FamilyHeader *>(NvBlastAssetCreateFamilyFromSizeData(mem, sizeData, Nv::Blast::logLL)); // then fill in the data from the reader if (deserializeInto(reader, family)) return family; // failed to deserialize, free the allocated memory so it doesn't leak NVBLAST_FREE(mem); return nullptr; } bool FamilyDTO::deserializeInto(Nv::Blast::Serialization::Family::Reader reader, Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::deserializeInto(reader.getAssetID(), &poco->m_assetID); // active actor count poco->m_actorCount = reader.getActorCount(); // all possible actors Actor* actors = poco->getActors(); auto readerActors = reader.getActors(); NVBLAST_ASSERT(poco->m_actorCount <= readerActors.size()); for (uint32_t i = 0; i < readerActors.size(); i++) { auto actorReader = readerActors[i]; ActorDTO::deserializeInto(actorReader, &actors[i]); } // visibleChunkIndexLinks // they are stored in the buffer as a flat list of uint32_t values, // but stored as pairs in the Family auto readerVisibleChunkIndexLinks = reader.getVisibleChunkIndexLinks(); const uint32_t numVisibleChunkIndexLinks = readerVisibleChunkIndexLinks.size(); for (uint32_t i = 0; i < numVisibleChunkIndexLinks; i += 2) { const uint32_t vcil = i / 2; poco->getVisibleChunkIndexLinks()[vcil].m_adj[0] = readerVisibleChunkIndexLinks[i]; poco->getVisibleChunkIndexLinks()[vcil].m_adj[1] = readerVisibleChunkIndexLinks[i+1]; } // chunkActorIndices auto readerChunkActorIndices = reader.getChunkActorIndices(); const uint32_t numChunkActorIndices = readerChunkActorIndices.size(); for (uint32_t i = 0; i < numChunkActorIndices; i++) { poco->getChunkActorIndices()[i] = readerChunkActorIndices[i]; } // graphNodeIndexLinks auto readerGraphNodeIndexLinks = reader.getGraphNodeIndexLinks(); const uint32_t numGraphNodeIndexLinks = readerGraphNodeIndexLinks.size(); for (uint32_t i = 0; i < numGraphNodeIndexLinks; i++) { poco->getGraphNodeIndexLinks()[i] = readerGraphNodeIndexLinks[i]; } // lowerSupportChunkHealths auto readerLowerSupportChunkHealths = reader.getLowerSupportChunkHealths(); const uint32_t numLowerSupportChunkHealths = readerLowerSupportChunkHealths.size(); for (uint32_t i = 0; i < numLowerSupportChunkHealths; i++) { poco->getLowerSupportChunkHealths()[i] = readerLowerSupportChunkHealths[i]; } // graphBondHealths auto readerGraphBondHealths = reader.getGraphBondHealths(); const uint32_t numGraphBondHealths = readerGraphBondHealths.size(); for (uint32_t i = 0; i < numGraphBondHealths; i++) { poco->getBondHealths()[i] = readerGraphBondHealths[i]; } // familyGraph FamilyGraphDTO::deserializeInto(reader.getFamilyGraph(), poco->getFamilyGraph()); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastActor.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Actor, Nv::Blast::Actor, Nv::Blast::Serialization::Actor)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyGraphDTO.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { bool FamilyGraphDTO::serialize(Nv::Blast::Serialization::FamilyGraph::Builder builder, const Nv::Blast::FamilyGraph * poco) { // this needs to be set externally so we have access to it here const uint32_t nodeCount = builder.getNodeCount(); kj::ArrayPtr<IslandId> islandIdsArray(poco->getIslandIds(), nodeCount); builder.setIslandIds(islandIdsArray); kj::ArrayPtr<NodeIndex> dirtyNodeLinksArray(poco->getDirtyNodeLinks(), nodeCount); builder.setDirtyNodeLinks(dirtyNodeLinksArray); kj::ArrayPtr<uint32_t> firstDirtyNodeIndicesArray(poco->getFirstDirtyNodeIndices(), nodeCount); builder.setFirstDirtyNodeIndices(firstDirtyNodeIndicesArray); kj::ArrayPtr<NodeIndex> fastRouteArray(poco->getFastRoute(), nodeCount); builder.setFastRoute(fastRouteArray); kj::ArrayPtr<uint32_t> hopCountsArray(poco->getHopCounts(), nodeCount); builder.setHopCounts(hopCountsArray); auto isEdgeRemoved = poco->getIsEdgeRemoved(); uint8_t* isEdgeRemovedData = reinterpret_cast<uint8_t*>(const_cast<char*>(isEdgeRemoved->getData())); capnp::Data::Reader isEdgeRemovedReader(isEdgeRemovedData, isEdgeRemoved->getSize()); builder.setIsEdgeRemoved(isEdgeRemovedReader); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); uint8_t* isNodeInDirtyListData = reinterpret_cast<uint8_t*>(const_cast<char*>(isNodeInDirtyList->getData())); capnp::Data::Reader isNodeInDirtyListReader(isNodeInDirtyListData, isNodeInDirtyList->getSize()); builder.setIsNodeInDirtyList(isNodeInDirtyListReader); return true; } Nv::Blast::FamilyGraph* FamilyGraphDTO::deserialize(Nv::Blast::Serialization::FamilyGraph::Reader reader) { NV_UNUSED(reader); return nullptr; } bool FamilyGraphDTO::deserializeInto(Nv::Blast::Serialization::FamilyGraph::Reader reader, Nv::Blast::FamilyGraph * poco) { auto readerIslandIds = reader.getIslandIds(); const uint32_t numIslandIds = readerIslandIds.size(); for (uint32_t i = 0; i < numIslandIds; i++) { poco->getIslandIds()[i] = readerIslandIds[i]; } auto readerDirtyNodeLinks = reader.getDirtyNodeLinks(); const uint32_t numDirtyNodeLinks = readerDirtyNodeLinks.size(); for (uint32_t i = 0; i < numDirtyNodeLinks; i++) { poco->getDirtyNodeLinks()[i] = readerDirtyNodeLinks[i]; } auto readerFirstDirtyNodeIndices = reader.getFirstDirtyNodeIndices(); const uint32_t numFirstDirtyNodeIndices = readerFirstDirtyNodeIndices.size(); for (uint32_t i = 0; i < numFirstDirtyNodeIndices; i++) { poco->getFirstDirtyNodeIndices()[i] = readerFirstDirtyNodeIndices[i]; } auto readerFastRoute = reader.getFastRoute(); const uint32_t numFastRoute = readerFastRoute.size(); for (uint32_t i = 0; i < numFastRoute; i++) { poco->getFastRoute()[i] = readerFastRoute[i]; } auto readerHopCounts = reader.getHopCounts(); const uint32_t numHopCounts = readerHopCounts.size(); for (uint32_t i = 0; i < numHopCounts; i++) { poco->getHopCounts()[i] = readerHopCounts[i]; } auto readerIsEdgeRemoved = reader.getIsEdgeRemoved(); const uint32_t numIsEdgeRemoved = readerIsEdgeRemoved.size(); const char* isEdgeRemovedData = reinterpret_cast<const char*>(readerIsEdgeRemoved.begin()); auto isEdgeRemoved = poco->getIsEdgeRemoved(); isEdgeRemoved->setData(isEdgeRemovedData, numIsEdgeRemoved); auto readerIsNodeInDirtyList = reader.getIsNodeInDirtyList(); const uint32_t numIsNodeInDirtyList = readerIsNodeInDirtyList.size(); const char* readerIsNodeInDirtyListData = reinterpret_cast<const char*>(readerIsNodeInDirtyList.begin()); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); isNodeInDirtyList->setData(readerIsNodeInDirtyListData, numIsNodeInDirtyList); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxMeshScaleDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxMeshScale.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxMeshScale, physx::PxMeshScale, Nv::Blast::Serialization::PxMeshScale)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTkAsset.h" #include "NvBlastExtTkSerialization-capn.h" DTO_CLASS(TkAssetJointDesc, Nv::Blast::TkAssetJointDesc, Nv::Blast::Serialization::TkAssetJointDesc)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastFamilyGraph.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(FamilyGraph, Nv::Blast::FamilyGraph, Nv::Blast::Serialization::FamilyGraph)
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastIDDTO.h" #include "NvBlastTypes.h" #include "NvBlastAssert.h" #include "NvBlastExtLlSerialization-capn.h" namespace Nv { namespace Blast { bool NvBlastIDDTO::serialize(Nv::Blast::Serialization::UUID::Builder builder, const NvBlastID * poco) { capnp::Data::Reader idArrayReader((unsigned char *)poco->data, 16); builder.setValue(idArrayReader); return true; } NvBlastID* NvBlastIDDTO::deserialize(Nv::Blast::Serialization::UUID::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastIDDTO::deserializeInto(Nv::Blast::Serialization::UUID::Reader reader, NvBlastID * poco) { NVBLAST_ASSERT_WITH_MESSAGE(reader.getValue().size() == 16, "BlastID must be 16 bytes"); memcpy(poco, reader.getValue().begin(), 16); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtLlSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtLlSerialization-capn #ifndef CAPNP_INCLUDED_9a4a58fac38375e0_ #define CAPNP_INCLUDED_9a4a58fac38375e0_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ce4f8468c36f427d); CAPNP_DECLARE_SCHEMA(fe6948a9a6a3eff5); CAPNP_DECLARE_SCHEMA(d20ccbe36dd9711d); CAPNP_DECLARE_SCHEMA(8a38616881ef8310); CAPNP_DECLARE_SCHEMA(d5e1a9fb31b1350d); CAPNP_DECLARE_SCHEMA(b292bd608606f041); enum class Type_b292bd608606f041: uint16_t { ASSET_DATA_BLOCK, INSTANCE_DATA_BLOCK, }; CAPNP_DECLARE_ENUM(Type, b292bd608606f041); CAPNP_DECLARE_SCHEMA(92818c664a7b1aba); CAPNP_DECLARE_SCHEMA(c43da43c95eada67); CAPNP_DECLARE_SCHEMA(f018cbfcaacb3a55); CAPNP_DECLARE_SCHEMA(bfd00835cc19bf3a); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct Asset { Asset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ce4f8468c36f427d, 2, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Family { Family() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(fe6948a9a6a3eff5, 3, 8) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Actor { Actor() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d20ccbe36dd9711d, 3, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct FamilyGraph { FamilyGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(8a38616881ef8310, 1, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastDataBlock { NvBlastDataBlock() = delete; class Reader; class Builder; class Pipeline; typedef ::capnp::schemas::Type_b292bd608606f041 Type; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d5e1a9fb31b1350d, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastChunk { NvBlastChunk() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(92818c664a7b1aba, 3, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastBond { NvBlastBond() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(c43da43c95eada67, 1, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastSupportGraph { NvBlastSupportGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(f018cbfcaacb3a55, 1, 4) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct UUID { UUID() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bfd00835cc19bf3a, 0, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class Asset::Reader { public: typedef Asset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasHeader() const; inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader getHeader() const; inline bool hasID() const; inline ::Nv::Blast::Serialization::UUID::Reader getID() const; inline ::uint32_t getChunkCount() const; inline bool hasGraph() const; inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader getGraph() const; inline ::uint32_t getLeafChunkCount() const; inline ::uint32_t getFirstSubsupportChunkIndex() const; inline ::uint32_t getBondCount() const; inline bool hasChunks() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader getChunks() const; inline bool hasBonds() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader getBonds() const; inline bool hasSubtreeLeafChunkCounts() const; inline ::capnp::List< ::uint32_t>::Reader getSubtreeLeafChunkCounts() const; inline bool hasChunkToGraphNodeMap() const; inline ::capnp::List< ::uint32_t>::Reader getChunkToGraphNodeMap() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Asset::Builder { public: typedef Asset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasHeader(); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder getHeader(); inline void setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder initHeader(); inline void adoptHeader(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> disownHeader(); inline bool hasID(); inline ::Nv::Blast::Serialization::UUID::Builder getID(); inline void setID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initID(); inline void adoptID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownID(); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline bool hasGraph(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder getGraph(); inline void setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder initGraph(); inline void adoptGraph(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> disownGraph(); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); inline ::uint32_t getFirstSubsupportChunkIndex(); inline void setFirstSubsupportChunkIndex( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline bool hasChunks(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder getChunks(); inline void setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder initChunks(unsigned int size); inline void adoptChunks(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> disownChunks(); inline bool hasBonds(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder getBonds(); inline void setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder initBonds(unsigned int size); inline void adoptBonds(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> disownBonds(); inline bool hasSubtreeLeafChunkCounts(); inline ::capnp::List< ::uint32_t>::Builder getSubtreeLeafChunkCounts(); inline void setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initSubtreeLeafChunkCounts(unsigned int size); inline void adoptSubtreeLeafChunkCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownSubtreeLeafChunkCounts(); inline bool hasChunkToGraphNodeMap(); inline ::capnp::List< ::uint32_t>::Builder getChunkToGraphNodeMap(); inline void setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkToGraphNodeMap(unsigned int size); inline void adoptChunkToGraphNodeMap(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkToGraphNodeMap(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Asset::Pipeline { public: typedef Asset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline getHeader(); inline ::Nv::Blast::Serialization::UUID::Pipeline getID(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline getGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Family::Reader { public: typedef Family Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetID() const; inline ::Nv::Blast::Serialization::UUID::Reader getAssetID() const; inline bool hasActors() const; inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader getActors() const; inline bool hasVisibleChunkIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getVisibleChunkIndexLinks() const; inline bool hasChunkActorIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkActorIndices() const; inline bool hasGraphNodeIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getGraphNodeIndexLinks() const; inline bool hasLowerSupportChunkHealths() const; inline ::capnp::List<float>::Reader getLowerSupportChunkHealths() const; inline bool hasGraphBondHealths() const; inline ::capnp::List<float>::Reader getGraphBondHealths() const; inline bool hasFamilyGraph() const; inline ::Nv::Blast::Serialization::FamilyGraph::Reader getFamilyGraph() const; inline ::uint32_t getActorCount() const; inline ::uint32_t getBondCount() const; inline ::uint32_t getChunkCount() const; inline ::uint32_t getNodeCount() const; inline ::uint32_t getLowerSupportChunkCount() const; inline ::uint32_t getUpperSupportChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Family::Builder { public: typedef Family Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetID(); inline ::Nv::Blast::Serialization::UUID::Builder getAssetID(); inline void setAssetID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initAssetID(); inline void adoptAssetID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownAssetID(); inline bool hasActors(); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder getActors(); inline void setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder initActors(unsigned int size); inline void adoptActors(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> disownActors(); inline bool hasVisibleChunkIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getVisibleChunkIndexLinks(); inline void setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initVisibleChunkIndexLinks(unsigned int size); inline void adoptVisibleChunkIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownVisibleChunkIndexLinks(); inline bool hasChunkActorIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkActorIndices(); inline void setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkActorIndices(unsigned int size); inline void adoptChunkActorIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkActorIndices(); inline bool hasGraphNodeIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getGraphNodeIndexLinks(); inline void setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initGraphNodeIndexLinks(unsigned int size); inline void adoptGraphNodeIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownGraphNodeIndexLinks(); inline bool hasLowerSupportChunkHealths(); inline ::capnp::List<float>::Builder getLowerSupportChunkHealths(); inline void setLowerSupportChunkHealths( ::capnp::List<float>::Reader value); inline void setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initLowerSupportChunkHealths(unsigned int size); inline void adoptLowerSupportChunkHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownLowerSupportChunkHealths(); inline bool hasGraphBondHealths(); inline ::capnp::List<float>::Builder getGraphBondHealths(); inline void setGraphBondHealths( ::capnp::List<float>::Reader value); inline void setGraphBondHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initGraphBondHealths(unsigned int size); inline void adoptGraphBondHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownGraphBondHealths(); inline bool hasFamilyGraph(); inline ::Nv::Blast::Serialization::FamilyGraph::Builder getFamilyGraph(); inline void setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value); inline ::Nv::Blast::Serialization::FamilyGraph::Builder initFamilyGraph(); inline void adoptFamilyGraph(::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> disownFamilyGraph(); inline ::uint32_t getActorCount(); inline void setActorCount( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline ::uint32_t getLowerSupportChunkCount(); inline void setLowerSupportChunkCount( ::uint32_t value); inline ::uint32_t getUpperSupportChunkCount(); inline void setUpperSupportChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Family::Pipeline { public: typedef Family Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::UUID::Pipeline getAssetID(); inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline getFamilyGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Actor::Reader { public: typedef Actor Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset() const; inline ::uint32_t getFirstVisibleChunkIndex() const; inline ::uint32_t getVisibleChunkCount() const; inline ::uint32_t getFirstGraphNodeIndex() const; inline ::uint32_t getGraphNodeCount() const; inline ::uint32_t getLeafChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Actor::Builder { public: typedef Actor Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset(); inline void setFamilyOffset( ::uint32_t value); inline ::uint32_t getFirstVisibleChunkIndex(); inline void setFirstVisibleChunkIndex( ::uint32_t value); inline ::uint32_t getVisibleChunkCount(); inline void setVisibleChunkCount( ::uint32_t value); inline ::uint32_t getFirstGraphNodeIndex(); inline void setFirstGraphNodeIndex( ::uint32_t value); inline ::uint32_t getGraphNodeCount(); inline void setGraphNodeCount( ::uint32_t value); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Actor::Pipeline { public: typedef Actor Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class FamilyGraph::Reader { public: typedef FamilyGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasIslandIds() const; inline ::capnp::List< ::uint32_t>::Reader getIslandIds() const; inline bool hasDirtyNodeLinks() const; inline ::capnp::List< ::uint32_t>::Reader getDirtyNodeLinks() const; inline bool hasFirstDirtyNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getFirstDirtyNodeIndices() const; inline bool hasFastRoute() const; inline ::capnp::List< ::uint32_t>::Reader getFastRoute() const; inline bool hasHopCounts() const; inline ::capnp::List< ::uint32_t>::Reader getHopCounts() const; inline bool hasIsEdgeRemoved() const; inline ::capnp::Data::Reader getIsEdgeRemoved() const; inline bool hasIsNodeInDirtyList() const; inline ::capnp::Data::Reader getIsNodeInDirtyList() const; inline ::uint32_t getNodeCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class FamilyGraph::Builder { public: typedef FamilyGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasIslandIds(); inline ::capnp::List< ::uint32_t>::Builder getIslandIds(); inline void setIslandIds( ::capnp::List< ::uint32_t>::Reader value); inline void setIslandIds(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initIslandIds(unsigned int size); inline void adoptIslandIds(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownIslandIds(); inline bool hasDirtyNodeLinks(); inline ::capnp::List< ::uint32_t>::Builder getDirtyNodeLinks(); inline void setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initDirtyNodeLinks(unsigned int size); inline void adoptDirtyNodeLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownDirtyNodeLinks(); inline bool hasFirstDirtyNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getFirstDirtyNodeIndices(); inline void setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFirstDirtyNodeIndices(unsigned int size); inline void adoptFirstDirtyNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFirstDirtyNodeIndices(); inline bool hasFastRoute(); inline ::capnp::List< ::uint32_t>::Builder getFastRoute(); inline void setFastRoute( ::capnp::List< ::uint32_t>::Reader value); inline void setFastRoute(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFastRoute(unsigned int size); inline void adoptFastRoute(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFastRoute(); inline bool hasHopCounts(); inline ::capnp::List< ::uint32_t>::Builder getHopCounts(); inline void setHopCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setHopCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initHopCounts(unsigned int size); inline void adoptHopCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownHopCounts(); inline bool hasIsEdgeRemoved(); inline ::capnp::Data::Builder getIsEdgeRemoved(); inline void setIsEdgeRemoved( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsEdgeRemoved(unsigned int size); inline void adoptIsEdgeRemoved(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsEdgeRemoved(); inline bool hasIsNodeInDirtyList(); inline ::capnp::Data::Builder getIsNodeInDirtyList(); inline void setIsNodeInDirtyList( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsNodeInDirtyList(unsigned int size); inline void adoptIsNodeInDirtyList(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsNodeInDirtyList(); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class FamilyGraph::Pipeline { public: typedef FamilyGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastDataBlock::Reader { public: typedef NvBlastDataBlock Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType() const; inline ::uint32_t getFormatVersion() const; inline ::uint32_t getSize() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastDataBlock::Builder { public: typedef NvBlastDataBlock Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType(); inline void setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value); inline ::uint32_t getFormatVersion(); inline void setFormatVersion( ::uint32_t value); inline ::uint32_t getSize(); inline void setSize( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastDataBlock::Pipeline { public: typedef NvBlastDataBlock Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastChunk::Reader { public: typedef NvBlastChunk Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline float getVolume() const; inline ::uint32_t getParentChunkIndex() const; inline ::uint32_t getFirstChildIndex() const; inline ::uint32_t getChildIndexStop() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastChunk::Builder { public: typedef NvBlastChunk Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline float getVolume(); inline void setVolume(float value); inline ::uint32_t getParentChunkIndex(); inline void setParentChunkIndex( ::uint32_t value); inline ::uint32_t getFirstChildIndex(); inline void setFirstChildIndex( ::uint32_t value); inline ::uint32_t getChildIndexStop(); inline void setChildIndexStop( ::uint32_t value); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastChunk::Pipeline { public: typedef NvBlastChunk Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastBond::Reader { public: typedef NvBlastBond Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNormal() const; inline ::capnp::List<float>::Reader getNormal() const; inline float getArea() const; inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastBond::Builder { public: typedef NvBlastBond Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNormal(); inline ::capnp::List<float>::Builder getNormal(); inline void setNormal( ::capnp::List<float>::Reader value); inline void setNormal(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initNormal(unsigned int size); inline void adoptNormal(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownNormal(); inline float getArea(); inline void setArea(float value); inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastBond::Pipeline { public: typedef NvBlastBond Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastSupportGraph::Reader { public: typedef NvBlastSupportGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount() const; inline bool hasChunkIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkIndices() const; inline bool hasAdjacencyPartition() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacencyPartition() const; inline bool hasAdjacentNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentNodeIndices() const; inline bool hasAdjacentBondIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentBondIndices() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastSupportGraph::Builder { public: typedef NvBlastSupportGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline bool hasChunkIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkIndices(); inline void setChunkIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkIndices(unsigned int size); inline void adoptChunkIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkIndices(); inline bool hasAdjacencyPartition(); inline ::capnp::List< ::uint32_t>::Builder getAdjacencyPartition(); inline void setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacencyPartition(unsigned int size); inline void adoptAdjacencyPartition(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacencyPartition(); inline bool hasAdjacentNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentNodeIndices(); inline void setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentNodeIndices(unsigned int size); inline void adoptAdjacentNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentNodeIndices(); inline bool hasAdjacentBondIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentBondIndices(); inline void setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentBondIndices(unsigned int size); inline void adoptAdjacentBondIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentBondIndices(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastSupportGraph::Pipeline { public: typedef NvBlastSupportGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class UUID::Reader { public: typedef UUID Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasValue() const; inline ::capnp::Data::Reader getValue() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class UUID::Builder { public: typedef UUID Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasValue(); inline ::capnp::Data::Builder getValue(); inline void setValue( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initValue(unsigned int size); inline void adoptValue(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownValue(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class UUID::Pipeline { public: typedef UUID Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool Asset::Reader::hasHeader() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasHeader() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader Asset::Reader::getHeader() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::getHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline Asset::Pipeline::getHeader() { return ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Asset::Builder::setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::initHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptHeader( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> Asset::Builder::disownHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasID() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasID() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Asset::Reader::getID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::getID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Asset::Pipeline::getID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(1)); } #endif // !CAPNP_LITE inline void Asset::Builder::setID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::initID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Asset::Builder::disownID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasGraph() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasGraph() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader Asset::Reader::getGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::getGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline Asset::Pipeline::getGraph() { return ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline(_typeless.getPointerField(2)); } #endif // !CAPNP_LITE inline void Asset::Builder::setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::initGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> Asset::Builder::disownGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getFirstSubsupportChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getFirstSubsupportChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setFirstSubsupportChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasChunks() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunks() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader Asset::Reader::getChunks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::getChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::initChunks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunks( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> Asset::Builder::disownChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasBonds() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasBonds() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader Asset::Reader::getBonds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::getBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Asset::Builder::setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::initBonds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptBonds( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> Asset::Builder::disownBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasSubtreeLeafChunkCounts() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasSubtreeLeafChunkCounts() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getSubtreeLeafChunkCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Asset::Builder::setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initSubtreeLeafChunkCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptSubtreeLeafChunkCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasChunkToGraphNodeMap() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunkToGraphNodeMap() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getChunkToGraphNodeMap() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initChunkToGraphNodeMap(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunkToGraphNodeMap( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasAssetID() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasAssetID() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Family::Reader::getAssetID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::getAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Family::Pipeline::getAssetID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Family::Builder::setAssetID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::initAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptAssetID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Family::Builder::disownAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasActors() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasActors() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader Family::Reader::getActors() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::getActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Family::Builder::setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::initActors(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptActors( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> Family::Builder::disownActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasVisibleChunkIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasVisibleChunkIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getVisibleChunkIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Family::Builder::setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void Family::Builder::setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initVisibleChunkIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptVisibleChunkIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasChunkActorIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasChunkActorIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getChunkActorIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Family::Builder::setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void Family::Builder::setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initChunkActorIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptChunkActorIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphNodeIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphNodeIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getGraphNodeIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initGraphNodeIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphNodeIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasLowerSupportChunkHealths() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasLowerSupportChunkHealths() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getLowerSupportChunkHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Family::Builder::setLowerSupportChunkHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Family::Builder::setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initLowerSupportChunkHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptLowerSupportChunkHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphBondHealths() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphBondHealths() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getGraphBondHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphBondHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphBondHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initGraphBondHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphBondHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasFamilyGraph() const { return !_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasFamilyGraph() { return !_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::FamilyGraph::Reader Family::Reader::getFamilyGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::getFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline Family::Pipeline::getFamilyGraph() { return ::Nv::Blast::Serialization::FamilyGraph::Pipeline(_typeless.getPointerField(7)); } #endif // !CAPNP_LITE inline void Family::Builder::setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::set(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::initFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::init(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptFamilyGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::adopt(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> Family::Builder::disownFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::disown(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::uint32_t Family::Reader::getActorCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getActorCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Family::Builder::setActorCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Family::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Family::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Family::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getLowerSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getLowerSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Family::Builder::setLowerSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getUpperSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getUpperSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Family::Builder::setUpperSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFamilyOffset() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFamilyOffset() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFamilyOffset( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstVisibleChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstVisibleChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstVisibleChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getVisibleChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getVisibleChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setVisibleChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstGraphNodeIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstGraphNodeIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstGraphNodeIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getGraphNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getGraphNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setGraphNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline bool FamilyGraph::Reader::hasIslandIds() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIslandIds() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getIslandIds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIslandIds( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setIslandIds(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initIslandIds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIslandIds( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasDirtyNodeLinks() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasDirtyNodeLinks() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getDirtyNodeLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initDirtyNodeLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptDirtyNodeLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFirstDirtyNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFirstDirtyNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFirstDirtyNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFirstDirtyNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFirstDirtyNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFastRoute() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFastRoute() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFastRoute() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFastRoute( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFastRoute(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFastRoute(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFastRoute( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasHopCounts() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasHopCounts() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getHopCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setHopCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setHopCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initHopCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptHopCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsEdgeRemoved() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsEdgeRemoved() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsEdgeRemoved() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsEdgeRemoved( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsEdgeRemoved(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsEdgeRemoved( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsNodeInDirtyList() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsNodeInDirtyList() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsNodeInDirtyList() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsNodeInDirtyList( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsNodeInDirtyList(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsNodeInDirtyList( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::uint32_t FamilyGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t FamilyGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void FamilyGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Reader::getDataType() const { return _reader.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Builder::getDataType() { return _builder.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value) { _builder.setDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getFormatVersion() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getFormatVersion() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setFormatVersion( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getSize() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getSize() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setSize( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline bool NvBlastChunk::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastChunk::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastChunk::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastChunk::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastChunk::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastChunk::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastChunk::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastChunk::Reader::getVolume() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastChunk::Builder::getVolume() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setVolume(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getParentChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getParentChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setParentChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getFirstChildIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getFirstChildIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setFirstChildIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getChildIndexStop() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getChildIndexStop() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setChildIndexStop( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasNormal() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasNormal() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getNormal() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setNormal( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setNormal(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initNormal(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptNormal( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastBond::Reader::getArea() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastBond::Builder::getArea() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setArea(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t NvBlastBond::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastBond::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastSupportGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastSupportGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastSupportGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastSupportGraph::Reader::hasChunkIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasChunkIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getChunkIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setChunkIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initChunkIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptChunkIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacencyPartition() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacencyPartition() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacencyPartition() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacencyPartition(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacencyPartition( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentBondIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentBondIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentBondIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentBondIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentBondIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool UUID::Reader::hasValue() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool UUID::Builder::hasValue() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader UUID::Reader::getValue() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder UUID::Builder::getValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void UUID::Builder::setValue( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder UUID::Builder::initValue(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void UUID::Builder::adoptValue( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> UUID::Builder::disownValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_9a4a58fac38375e0_