Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/radius_neighbors/radius_neighbors.h | #pragma once
#include "../../common/torch_helper.h"
at::Tensor radius_neighbors(
at::Tensor q_points,
at::Tensor s_points,
at::Tensor q_lengths,
at::Tensor s_lengths,
float radius
);
| 195 | 15.333333 | 38 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/radius_neighbors/radius_neighbors_cpu.cpp | #include "radius_neighbors_cpu.h"
void radius_neighbors_cpu(
std::vector<PointXYZ>& q_points,
std::vector<PointXYZ>& s_points,
std::vector<long>& q_lengths,
std::vector<long>& s_lengths,
std::vector<long>& neighbor_indices,
float radius
) {
std::size_t i0 = 0;
float r2 = radius * radius;
std::size_t max_count = 0;
std::vector<std::vector<std::pair<std::size_t, float>>> all_inds_dists(
q_points.size()
);
std::size_t b = 0;
std::size_t q_start_index = 0;
std::size_t s_start_index = 0;
PointCloud current_cloud;
current_cloud.pts = std::vector<PointXYZ>(
s_points.begin() + s_start_index,
s_points.begin() + s_start_index + s_lengths[b]
);
nanoflann::KDTreeSingleIndexAdaptorParams tree_params(10);
my_kd_tree_t* index = new my_kd_tree_t(3, current_cloud, tree_params);;
index->buildIndex();
nanoflann::SearchParams search_params;
search_params.sorted = true;
for (auto& p0 : q_points) {
if (i0 == q_start_index + q_lengths[b]) {
q_start_index += q_lengths[b];
s_start_index += s_lengths[b];
b++;
current_cloud.pts.clear();
current_cloud.pts = std::vector<PointXYZ>(
s_points.begin() + s_start_index,
s_points.begin() + s_start_index + s_lengths[b]
);
delete index;
index = new my_kd_tree_t(3, current_cloud, tree_params);
index->buildIndex();
}
all_inds_dists[i0].reserve(max_count);
float query_pt[3] = {p0.x, p0.y, p0.z};
std::size_t nMatches = index->radiusSearch(
query_pt, r2, all_inds_dists[i0], search_params
);
if (nMatches > max_count) {
max_count = nMatches;
}
i0++;
}
delete index;
neighbor_indices.resize(q_points.size() * max_count);
i0 = 0;
s_start_index = 0;
q_start_index = 0;
b = 0;
for (auto& inds_dists : all_inds_dists) {
if (i0 == q_start_index + q_lengths[b]) {
q_start_index += q_lengths[b];
s_start_index += s_lengths[b];
b++;
}
for (std::size_t j = 0; j < max_count; j++) {
std::size_t i = i0 * max_count + j;
if (j < inds_dists.size()) {
neighbor_indices[i] = inds_dists[j].first + s_start_index;
} else {
neighbor_indices[i] = s_points.size();
}
}
i0++;
}
}
| 2,276 | 23.75 | 73 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/radius_neighbors/radius_neighbors_cpu.h | #include <vector>
#include "../../extra/cloud/cloud.h"
#include "../../extra/nanoflann/nanoflann.hpp"
typedef nanoflann::KDTreeSingleIndexAdaptor<
nanoflann::L2_Simple_Adaptor<float, PointCloud>, PointCloud, 3
> my_kd_tree_t;
void radius_neighbors_cpu(
std::vector<PointXYZ>& q_points,
std::vector<PointXYZ>& s_points,
std::vector<long>& q_lengths,
std::vector<long>& s_lengths,
std::vector<long>& neighbor_indices,
float radius
);
| 448 | 25.411765 | 64 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/extra/cloud/cloud.cpp | // Modified from https://github.com/HuguesTHOMAS/KPConv-PyTorch
#include "cloud.h"
PointXYZ max_point(std::vector<PointXYZ> points) {
PointXYZ maxP(points[0]);
for (auto p : points) {
if (p.x > maxP.x) {
maxP.x = p.x;
}
if (p.y > maxP.y) {
maxP.y = p.y;
}
if (p.z > maxP.z) {
maxP.z = p.z;
}
}
return maxP;
}
PointXYZ min_point(std::vector<PointXYZ> points) {
PointXYZ minP(points[0]);
for (auto p : points) {
if (p.x < minP.x) {
minP.x = p.x;
}
if (p.y < minP.y) {
minP.y = p.y;
}
if (p.z < minP.z) {
minP.z = p.z;
}
}
return minP;
} | 640 | 15.868421 | 63 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/extra/cloud/cloud.h | // Modified from https://github.com/HuguesTHOMAS/KPConv-PyTorch
#pragma once
#include <vector>
#include <unordered_map>
#include <map>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <time.h>
class PointXYZ {
public:
float x, y, z;
PointXYZ() {
x = 0;
y = 0;
z = 0;
}
PointXYZ(float x0, float y0, float z0) {
x = x0;
y = y0;
z = z0;
}
float operator [] (int i) const {
if (i == 0) {
return x;
}
else if (i == 1) {
return y;
}
else {
return z;
}
}
float dot(const PointXYZ P) const {
return x * P.x + y * P.y + z * P.z;
}
float sq_norm() {
return x * x + y * y + z * z;
}
PointXYZ cross(const PointXYZ P) const {
return PointXYZ(y * P.z - z * P.y, z * P.x - x * P.z, x * P.y - y * P.x);
}
PointXYZ& operator+=(const PointXYZ& P) {
x += P.x;
y += P.y;
z += P.z;
return *this;
}
PointXYZ& operator-=(const PointXYZ& P) {
x -= P.x;
y -= P.y;
z -= P.z;
return *this;
}
PointXYZ& operator*=(const float& a) {
x *= a;
y *= a;
z *= a;
return *this;
}
};
inline PointXYZ operator + (const PointXYZ A, const PointXYZ B) {
return PointXYZ(A.x + B.x, A.y + B.y, A.z + B.z);
}
inline PointXYZ operator - (const PointXYZ A, const PointXYZ B) {
return PointXYZ(A.x - B.x, A.y - B.y, A.z - B.z);
}
inline PointXYZ operator * (const PointXYZ P, const float a) {
return PointXYZ(P.x * a, P.y * a, P.z * a);
}
inline PointXYZ operator * (const float a, const PointXYZ P) {
return PointXYZ(P.x * a, P.y * a, P.z * a);
}
inline std::ostream& operator << (std::ostream& os, const PointXYZ P) {
return os << "[" << P.x << ", " << P.y << ", " << P.z << "]";
}
inline bool operator == (const PointXYZ A, const PointXYZ B) {
return A.x == B.x && A.y == B.y && A.z == B.z;
}
inline PointXYZ floor(const PointXYZ P) {
return PointXYZ(std::floor(P.x), std::floor(P.y), std::floor(P.z));
}
PointXYZ max_point(std::vector<PointXYZ> points);
PointXYZ min_point(std::vector<PointXYZ> points);
struct PointCloud {
std::vector<PointXYZ> pts;
inline size_t kdtree_get_point_count() const {
return pts.size();
}
// Returns the dim'th component of the idx'th point in the class:
// Since this is inlined and the "dim" argument is typically an immediate value, the
// "if/else's" are actually solved at compile time.
inline float kdtree_get_pt(const size_t idx, const size_t dim) const {
if (dim == 0) {
return pts[idx].x;
}
else if (dim == 1) {
return pts[idx].y;
}
else {
return pts[idx].z;
}
}
// Optional bounding-box computation: return false to default to a standard bbox computation loop.
// Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
// Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
template <class BBOX>
bool kdtree_get_bbox(BBOX& /* bb */) const {
return false;
}
};
| 3,099 | 21.463768 | 124 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/extra/nanoflann/nanoflann.hpp | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja ([email protected]). All rights reserved.
* Copyright 2008-2009 David G. Lowe ([email protected]). All rights reserved.
* Copyright 2011-2016 Jose Luis Blanco ([email protected]).
* All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
/** \mainpage nanoflann C++ API documentation
* nanoflann is a C++ header-only library for building KD-Trees, mostly
* optimized for 2D or 3D point clouds.
*
* nanoflann does not require compiling or installing, just an
* #include <nanoflann.hpp> in your code.
*
* See:
* - <a href="modules.html" >C++ API organized by modules</a>
* - <a href="https://github.com/jlblancoc/nanoflann" >Online README</a>
* - <a href="http://jlblancoc.github.io/nanoflann/" >Doxygen
* documentation</a>
*/
#ifndef NANOFLANN_HPP_
#define NANOFLANN_HPP_
#include <algorithm>
#include <array>
#include <cassert>
#include <cmath> // for abs()
#include <cstdio> // for fwrite()
#include <cstdlib> // for abs()
#include <functional>
#include <limits> // std::reference_wrapper
#include <stdexcept>
#include <vector>
/** Library version: 0xMmP (M=Major,m=minor,P=patch) */
#define NANOFLANN_VERSION 0x130
// Avoid conflicting declaration of min/max macros in windows headers
#if !defined(NOMINMAX) && \
(defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64))
#define NOMINMAX
#ifdef max
#undef max
#undef min
#endif
#endif
namespace nanoflann {
/** @addtogroup nanoflann_grp nanoflann C++ library for ANN
* @{ */
/** the PI constant (required to avoid MSVC missing symbols) */
template <typename T> T pi_const() {
return static_cast<T>(3.14159265358979323846);
}
/**
* Traits if object is resizable and assignable (typically has a resize | assign
* method)
*/
template <typename T, typename = int> struct has_resize : std::false_type {};
template <typename T>
struct has_resize<T, decltype((void)std::declval<T>().resize(1), 0)>
: std::true_type {};
template <typename T, typename = int> struct has_assign : std::false_type {};
template <typename T>
struct has_assign<T, decltype((void)std::declval<T>().assign(1, 0), 0)>
: std::true_type {};
/**
* Free function to resize a resizable object
*/
template <typename Container>
inline typename std::enable_if<has_resize<Container>::value, void>::type
resize(Container &c, const size_t nElements) {
c.resize(nElements);
}
/**
* Free function that has no effects on non resizable containers (e.g.
* std::array) It raises an exception if the expected size does not match
*/
template <typename Container>
inline typename std::enable_if<!has_resize<Container>::value, void>::type
resize(Container &c, const size_t nElements) {
if (nElements != c.size())
throw std::logic_error("Try to change the size of a std::array.");
}
/**
* Free function to assign to a container
*/
template <typename Container, typename T>
inline typename std::enable_if<has_assign<Container>::value, void>::type
assign(Container &c, const size_t nElements, const T &value) {
c.assign(nElements, value);
}
/**
* Free function to assign to a std::array
*/
template <typename Container, typename T>
inline typename std::enable_if<!has_assign<Container>::value, void>::type
assign(Container &c, const size_t nElements, const T &value) {
for (size_t i = 0; i < nElements; i++)
c[i] = value;
}
/** @addtogroup result_sets_grp Result set classes
* @{ */
template <typename _DistanceType, typename _IndexType = size_t,
typename _CountType = size_t>
class KNNResultSet {
public:
typedef _DistanceType DistanceType;
typedef _IndexType IndexType;
typedef _CountType CountType;
private:
IndexType *indices;
DistanceType *dists;
CountType capacity;
CountType count;
public:
inline KNNResultSet(CountType capacity_)
: indices(0), dists(0), capacity(capacity_), count(0) {}
inline void init(IndexType *indices_, DistanceType *dists_) {
indices = indices_;
dists = dists_;
count = 0;
if (capacity)
dists[capacity - 1] = (std::numeric_limits<DistanceType>::max)();
}
inline CountType size() const { return count; }
inline bool full() const { return count == capacity; }
/**
* Called during search to add an element matching the criteria.
* @return true if the search should be continued, false if the results are
* sufficient
*/
inline bool addPoint(DistanceType dist, IndexType index) {
CountType i;
for (i = count; i > 0; --i) {
#ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same
// distance, the one with the lowest-index will be
// returned first.
if ((dists[i - 1] > dist) ||
((dist == dists[i - 1]) && (indices[i - 1] > index))) {
#else
if (dists[i - 1] > dist) {
#endif
if (i < capacity) {
dists[i] = dists[i - 1];
indices[i] = indices[i - 1];
}
} else
break;
}
if (i < capacity) {
dists[i] = dist;
indices[i] = index;
}
if (count < capacity)
count++;
// tell caller that the search shall continue
return true;
}
inline DistanceType worstDist() const { return dists[capacity - 1]; }
};
/** operator "<" for std::sort() */
struct IndexDist_Sorter {
/** PairType will be typically: std::pair<IndexType,DistanceType> */
template <typename PairType>
inline bool operator()(const PairType &p1, const PairType &p2) const {
return p1.second < p2.second;
}
};
/**
* A result-set class used when performing a radius based search.
*/
template <typename _DistanceType, typename _IndexType = size_t>
class RadiusResultSet {
public:
typedef _DistanceType DistanceType;
typedef _IndexType IndexType;
public:
const DistanceType radius;
std::vector<std::pair<IndexType, DistanceType>> &m_indices_dists;
inline RadiusResultSet(
DistanceType radius_,
std::vector<std::pair<IndexType, DistanceType>> &indices_dists)
: radius(radius_), m_indices_dists(indices_dists) {
init();
}
inline void init() { clear(); }
inline void clear() { m_indices_dists.clear(); }
inline size_t size() const { return m_indices_dists.size(); }
inline bool full() const { return true; }
/**
* Called during search to add an element matching the criteria.
* @return true if the search should be continued, false if the results are
* sufficient
*/
inline bool addPoint(DistanceType dist, IndexType index) {
if (dist < radius)
m_indices_dists.push_back(std::make_pair(index, dist));
return true;
}
inline DistanceType worstDist() const { return radius; }
/**
* Find the worst result (furtherest neighbor) without copying or sorting
* Pre-conditions: size() > 0
*/
std::pair<IndexType, DistanceType> worst_item() const {
if (m_indices_dists.empty())
throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on "
"an empty list of results.");
typedef
typename std::vector<std::pair<IndexType, DistanceType>>::const_iterator
DistIt;
DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end(),
IndexDist_Sorter());
return *it;
}
};
/** @} */
/** @addtogroup loadsave_grp Load/save auxiliary functions
* @{ */
template <typename T>
void save_value(FILE *stream, const T &value, size_t count = 1) {
fwrite(&value, sizeof(value), count, stream);
}
template <typename T>
void save_value(FILE *stream, const std::vector<T> &value) {
size_t size = value.size();
fwrite(&size, sizeof(size_t), 1, stream);
fwrite(&value[0], sizeof(T), size, stream);
}
template <typename T>
void load_value(FILE *stream, T &value, size_t count = 1) {
size_t read_cnt = fread(&value, sizeof(value), count, stream);
if (read_cnt != count) {
throw std::runtime_error("Cannot read from file");
}
}
template <typename T> void load_value(FILE *stream, std::vector<T> &value) {
size_t size;
size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);
if (read_cnt != 1) {
throw std::runtime_error("Cannot read from file");
}
value.resize(size);
read_cnt = fread(&value[0], sizeof(T), size, stream);
if (read_cnt != size) {
throw std::runtime_error("Cannot read from file");
}
}
/** @} */
/** @addtogroup metric_grp Metric (distance) classes
* @{ */
struct Metric {};
/** Manhattan distance functor (generic version, optimized for
* high-dimensionality data sets). Corresponding distance traits:
* nanoflann::metric_L1 \tparam T Type of the elements (e.g. double, float,
* uint8_t) \tparam _DistanceType Type of distance variables (must be signed)
* (e.g. float, double, int64_t)
*/
template <class T, class DataSource, typename _DistanceType = T>
struct L1_Adaptor {
typedef T ElementType;
typedef _DistanceType DistanceType;
const DataSource &data_source;
L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}
inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size,
DistanceType worst_dist = -1) const {
DistanceType result = DistanceType();
const T *last = a + size;
const T *lastgroup = last - 3;
size_t d = 0;
/* Process 4 items with each loop for efficiency. */
while (a < lastgroup) {
const DistanceType diff0 =
std::abs(a[0] - data_source.kdtree_get_pt(b_idx, d++));
const DistanceType diff1 =
std::abs(a[1] - data_source.kdtree_get_pt(b_idx, d++));
const DistanceType diff2 =
std::abs(a[2] - data_source.kdtree_get_pt(b_idx, d++));
const DistanceType diff3 =
std::abs(a[3] - data_source.kdtree_get_pt(b_idx, d++));
result += diff0 + diff1 + diff2 + diff3;
a += 4;
if ((worst_dist > 0) && (result > worst_dist)) {
return result;
}
}
/* Process last 0-3 components. Not needed for standard vector lengths. */
while (a < last) {
result += std::abs(*a++ - data_source.kdtree_get_pt(b_idx, d++));
}
return result;
}
template <typename U, typename V>
inline DistanceType accum_dist(const U a, const V b, const size_t) const {
return std::abs(a - b);
}
};
/** Squared Euclidean distance functor (generic version, optimized for
* high-dimensionality data sets). Corresponding distance traits:
* nanoflann::metric_L2 \tparam T Type of the elements (e.g. double, float,
* uint8_t) \tparam _DistanceType Type of distance variables (must be signed)
* (e.g. float, double, int64_t)
*/
template <class T, class DataSource, typename _DistanceType = T>
struct L2_Adaptor {
typedef T ElementType;
typedef _DistanceType DistanceType;
const DataSource &data_source;
L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}
inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size,
DistanceType worst_dist = -1) const {
DistanceType result = DistanceType();
const T *last = a + size;
const T *lastgroup = last - 3;
size_t d = 0;
/* Process 4 items with each loop for efficiency. */
while (a < lastgroup) {
const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx, d++);
const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx, d++);
const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx, d++);
const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx, d++);
result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
a += 4;
if ((worst_dist > 0) && (result > worst_dist)) {
return result;
}
}
/* Process last 0-3 components. Not needed for standard vector lengths. */
while (a < last) {
const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx, d++);
result += diff0 * diff0;
}
return result;
}
template <typename U, typename V>
inline DistanceType accum_dist(const U a, const V b, const size_t) const {
return (a - b) * (a - b);
}
};
/** Squared Euclidean (L2) distance functor (suitable for low-dimensionality
* datasets, like 2D or 3D point clouds) Corresponding distance traits:
* nanoflann::metric_L2_Simple \tparam T Type of the elements (e.g. double,
* float, uint8_t) \tparam _DistanceType Type of distance variables (must be
* signed) (e.g. float, double, int64_t)
*/
template <class T, class DataSource, typename _DistanceType = T>
struct L2_Simple_Adaptor {
typedef T ElementType;
typedef _DistanceType DistanceType;
const DataSource &data_source;
L2_Simple_Adaptor(const DataSource &_data_source)
: data_source(_data_source) {}
inline DistanceType evalMetric(const T *a, const size_t b_idx,
size_t size) const {
DistanceType result = DistanceType();
for (size_t i = 0; i < size; ++i) {
const DistanceType diff = a[i] - data_source.kdtree_get_pt(b_idx, i);
result += diff * diff;
}
return result;
}
template <typename U, typename V>
inline DistanceType accum_dist(const U a, const V b, const size_t) const {
return (a - b) * (a - b);
}
};
/** SO2 distance functor
* Corresponding distance traits: nanoflann::metric_SO2
* \tparam T Type of the elements (e.g. double, float)
* \tparam _DistanceType Type of distance variables (must be signed) (e.g.
* float, double) orientation is constrained to be in [-pi, pi]
*/
template <class T, class DataSource, typename _DistanceType = T>
struct SO2_Adaptor {
typedef T ElementType;
typedef _DistanceType DistanceType;
const DataSource &data_source;
SO2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}
inline DistanceType evalMetric(const T *a, const size_t b_idx,
size_t size) const {
return accum_dist(a[size - 1], data_source.kdtree_get_pt(b_idx, size - 1),
size - 1);
}
/** Note: this assumes that input angles are already in the range [-pi,pi] */
template <typename U, typename V>
inline DistanceType accum_dist(const U a, const V b, const size_t) const {
DistanceType result = DistanceType(), PI = pi_const<DistanceType>();
result = b - a;
if (result > PI)
result -= 2 * PI;
else if (result < -PI)
result += 2 * PI;
return result;
}
};
/** SO3 distance functor (Uses L2_Simple)
* Corresponding distance traits: nanoflann::metric_SO3
* \tparam T Type of the elements (e.g. double, float)
* \tparam _DistanceType Type of distance variables (must be signed) (e.g.
* float, double)
*/
template <class T, class DataSource, typename _DistanceType = T>
struct SO3_Adaptor {
typedef T ElementType;
typedef _DistanceType DistanceType;
L2_Simple_Adaptor<T, DataSource> distance_L2_Simple;
SO3_Adaptor(const DataSource &_data_source)
: distance_L2_Simple(_data_source) {}
inline DistanceType evalMetric(const T *a, const size_t b_idx,
size_t size) const {
return distance_L2_Simple.evalMetric(a, b_idx, size);
}
template <typename U, typename V>
inline DistanceType accum_dist(const U a, const V b, const size_t idx) const {
return distance_L2_Simple.accum_dist(a, b, idx);
}
};
/** Metaprogramming helper traits class for the L1 (Manhattan) metric */
struct metric_L1 : public Metric {
template <class T, class DataSource> struct traits {
typedef L1_Adaptor<T, DataSource> distance_t;
};
};
/** Metaprogramming helper traits class for the L2 (Euclidean) metric */
struct metric_L2 : public Metric {
template <class T, class DataSource> struct traits {
typedef L2_Adaptor<T, DataSource> distance_t;
};
};
/** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */
struct metric_L2_Simple : public Metric {
template <class T, class DataSource> struct traits {
typedef L2_Simple_Adaptor<T, DataSource> distance_t;
};
};
/** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */
struct metric_SO2 : public Metric {
template <class T, class DataSource> struct traits {
typedef SO2_Adaptor<T, DataSource> distance_t;
};
};
/** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */
struct metric_SO3 : public Metric {
template <class T, class DataSource> struct traits {
typedef SO3_Adaptor<T, DataSource> distance_t;
};
};
/** @} */
/** @addtogroup param_grp Parameter structs
* @{ */
/** Parameters (see README.md) */
struct KDTreeSingleIndexAdaptorParams {
KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10)
: leaf_max_size(_leaf_max_size) {}
size_t leaf_max_size;
};
/** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */
struct SearchParams {
/** Note: The first argument (checks_IGNORED_) is ignored, but kept for
* compatibility with the FLANN interface */
SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true)
: checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {}
int checks; //!< Ignored parameter (Kept for compatibility with the FLANN
//!< interface).
float eps; //!< search for eps-approximate neighbours (default: 0)
bool sorted; //!< only for radius search, require neighbours sorted by
//!< distance (default: true)
};
/** @} */
/** @addtogroup memalloc_grp Memory allocation
* @{ */
/**
* Allocates (using C's malloc) a generic type T.
*
* Params:
* count = number of instances to allocate.
* Returns: pointer (of type T*) to memory buffer
*/
template <typename T> inline T *allocate(size_t count = 1) {
T *mem = static_cast<T *>(::malloc(sizeof(T) * count));
return mem;
}
/**
* Pooled storage allocator
*
* The following routines allow for the efficient allocation of storage in
* small chunks from a specified pool. Rather than allowing each structure
* to be freed individually, an entire pool of storage is freed at once.
* This method has two advantages over just using malloc() and free(). First,
* it is far more efficient for allocating small objects, as there is
* no overhead for remembering all the information needed to free each
* object or consolidating fragmented memory. Second, the decision about
* how long to keep an object is made at the time of allocation, and there
* is no need to track down all the objects to free them.
*
*/
const size_t WORDSIZE = 16;
const size_t BLOCKSIZE = 8192;
class PooledAllocator {
/* We maintain memory alignment to word boundaries by requiring that all
allocations be in multiples of the machine wordsize. */
/* Size of machine word in bytes. Must be power of 2. */
/* Minimum number of bytes requested at a time from the system. Must be
* multiple of WORDSIZE. */
size_t remaining; /* Number of bytes left in current block of storage. */
void *base; /* Pointer to base of current block of storage. */
void *loc; /* Current location in block to next allocate memory. */
void internal_init() {
remaining = 0;
base = NULL;
usedMemory = 0;
wastedMemory = 0;
}
public:
size_t usedMemory;
size_t wastedMemory;
/**
Default constructor. Initializes a new pool.
*/
PooledAllocator() { internal_init(); }
/**
* Destructor. Frees all the memory allocated in this pool.
*/
~PooledAllocator() { free_all(); }
/** Frees all allocated memory chunks */
void free_all() {
while (base != NULL) {
void *prev =
*(static_cast<void **>(base)); /* Get pointer to prev block. */
::free(base);
base = prev;
}
internal_init();
}
/**
* Returns a pointer to a piece of new memory of the given size in bytes
* allocated from the pool.
*/
void *malloc(const size_t req_size) {
/* Round size up to a multiple of wordsize. The following expression
only works for WORDSIZE that is a power of 2, by masking last bits of
incremented size to zero.
*/
const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);
/* Check whether a new block must be allocated. Note that the first word
of a block is reserved for a pointer to the previous block.
*/
if (size > remaining) {
wastedMemory += remaining;
/* Allocate new storage. */
const size_t blocksize =
(size + sizeof(void *) + (WORDSIZE - 1) > BLOCKSIZE)
? size + sizeof(void *) + (WORDSIZE - 1)
: BLOCKSIZE;
// use the standard C malloc to allocate memory
void *m = ::malloc(blocksize);
if (!m) {
fprintf(stderr, "Failed to allocate memory.\n");
return NULL;
}
/* Fill first word of new block with pointer to previous block. */
static_cast<void **>(m)[0] = base;
base = m;
size_t shift = 0;
// int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) &
// (WORDSIZE-1))) & (WORDSIZE-1);
remaining = blocksize - sizeof(void *) - shift;
loc = (static_cast<char *>(m) + sizeof(void *) + shift);
}
void *rloc = loc;
loc = static_cast<char *>(loc) + size;
remaining -= size;
usedMemory += size;
return rloc;
}
/**
* Allocates (using this pool) a generic type T.
*
* Params:
* count = number of instances to allocate.
* Returns: pointer (of type T*) to memory buffer
*/
template <typename T> T *allocate(const size_t count = 1) {
T *mem = static_cast<T *>(this->malloc(sizeof(T) * count));
return mem;
}
};
/** @} */
/** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff
* @{ */
/** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors
* when DIM=-1. Fixed size version for a generic DIM:
*/
template <int DIM, typename T> struct array_or_vector_selector {
typedef std::array<T, DIM> container_t;
};
/** Dynamic size version */
template <typename T> struct array_or_vector_selector<-1, T> {
typedef std::vector<T> container_t;
};
/** @} */
/** kd-tree base-class
*
* Contains the member functions common to the classes KDTreeSingleIndexAdaptor
* and KDTreeSingleIndexDynamicAdaptor_.
*
* \tparam Derived The name of the class which inherits this class.
* \tparam DatasetAdaptor The user-provided adaptor (see comments above).
* \tparam Distance The distance metric to use, these are all classes derived
* from nanoflann::Metric \tparam DIM Dimensionality of data points (e.g. 3 for
* 3D points) \tparam IndexType Will be typically size_t or int
*/
template <class Derived, typename Distance, class DatasetAdaptor, int DIM = -1,
typename IndexType = size_t>
class KDTreeBaseClass {
public:
/** Frees the previously-built index. Automatically called within
* buildIndex(). */
void freeIndex(Derived &obj) {
obj.pool.free_all();
obj.root_node = NULL;
obj.m_size_at_index_build = 0;
}
typedef typename Distance::ElementType ElementType;
typedef typename Distance::DistanceType DistanceType;
/*--------------------- Internal Data Structures --------------------------*/
struct Node {
/** Union used because a node can be either a LEAF node or a non-leaf node,
* so both data fields are never used simultaneously */
union {
struct leaf {
IndexType left, right; //!< Indices of points in leaf node
} lr;
struct nonleaf {
int divfeat; //!< Dimension used for subdivision.
DistanceType divlow, divhigh; //!< The values used for subdivision.
} sub;
} node_type;
Node *child1, *child2; //!< Child nodes (both=NULL mean its a leaf node)
};
typedef Node *NodePtr;
struct Interval {
ElementType low, high;
};
/**
* Array of indices to vectors in the dataset.
*/
std::vector<IndexType> vind;
NodePtr root_node;
size_t m_leaf_max_size;
size_t m_size; //!< Number of current points in the dataset
size_t m_size_at_index_build; //!< Number of points in the dataset when the
//!< index was built
int dim; //!< Dimensionality of each data point
/** Define "BoundingBox" as a fixed-size or variable-size container depending
* on "DIM" */
typedef
typename array_or_vector_selector<DIM, Interval>::container_t BoundingBox;
/** Define "distance_vector_t" as a fixed-size or variable-size container
* depending on "DIM" */
typedef typename array_or_vector_selector<DIM, DistanceType>::container_t
distance_vector_t;
/** The KD-tree used to find neighbours */
BoundingBox root_bbox;
/**
* Pooled memory allocator.
*
* Using a pooled memory allocator is more efficient
* than allocating memory directly when there is a large
* number small of memory allocations.
*/
PooledAllocator pool;
/** Returns number of points in dataset */
size_t size(const Derived &obj) const { return obj.m_size; }
/** Returns the length of each point in the dataset */
size_t veclen(const Derived &obj) {
return static_cast<size_t>(DIM > 0 ? DIM : obj.dim);
}
/// Helper accessor to the dataset points:
inline ElementType dataset_get(const Derived &obj, size_t idx,
int component) const {
return obj.dataset.kdtree_get_pt(idx, component);
}
/**
* Computes the inde memory usage
* Returns: memory used by the index
*/
size_t usedMemory(Derived &obj) {
return obj.pool.usedMemory + obj.pool.wastedMemory +
obj.dataset.kdtree_get_point_count() *
sizeof(IndexType); // pool memory and vind array memory
}
void computeMinMax(const Derived &obj, IndexType *ind, IndexType count,
int element, ElementType &min_elem,
ElementType &max_elem) {
min_elem = dataset_get(obj, ind[0], element);
max_elem = dataset_get(obj, ind[0], element);
for (IndexType i = 1; i < count; ++i) {
ElementType val = dataset_get(obj, ind[i], element);
if (val < min_elem)
min_elem = val;
if (val > max_elem)
max_elem = val;
}
}
/**
* Create a tree node that subdivides the list of vecs from vind[first]
* to vind[last]. The routine is called recursively on each sublist.
*
* @param left index of the first vector
* @param right index of the last vector
*/
NodePtr divideTree(Derived &obj, const IndexType left, const IndexType right,
BoundingBox &bbox) {
NodePtr node = obj.pool.template allocate<Node>(); // allocate memory
/* If too few exemplars remain, then make this a leaf node. */
if ((right - left) <= static_cast<IndexType>(obj.m_leaf_max_size)) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->node_type.lr.left = left;
node->node_type.lr.right = right;
// compute bounding-box of leaf points
for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
bbox[i].low = dataset_get(obj, obj.vind[left], i);
bbox[i].high = dataset_get(obj, obj.vind[left], i);
}
for (IndexType k = left + 1; k < right; ++k) {
for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
if (bbox[i].low > dataset_get(obj, obj.vind[k], i))
bbox[i].low = dataset_get(obj, obj.vind[k], i);
if (bbox[i].high < dataset_get(obj, obj.vind[k], i))
bbox[i].high = dataset_get(obj, obj.vind[k], i);
}
}
} else {
IndexType idx;
int cutfeat;
DistanceType cutval;
middleSplit_(obj, &obj.vind[0] + left, right - left, idx, cutfeat, cutval,
bbox);
node->node_type.sub.divfeat = cutfeat;
BoundingBox left_bbox(bbox);
left_bbox[cutfeat].high = cutval;
node->child1 = divideTree(obj, left, left + idx, left_bbox);
BoundingBox right_bbox(bbox);
right_bbox[cutfeat].low = cutval;
node->child2 = divideTree(obj, left + idx, right, right_bbox);
node->node_type.sub.divlow = left_bbox[cutfeat].high;
node->node_type.sub.divhigh = right_bbox[cutfeat].low;
for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);
bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);
}
}
return node;
}
void middleSplit_(Derived &obj, IndexType *ind, IndexType count,
IndexType &index, int &cutfeat, DistanceType &cutval,
const BoundingBox &bbox) {
const DistanceType EPS = static_cast<DistanceType>(0.00001);
ElementType max_span = bbox[0].high - bbox[0].low;
for (int i = 1; i < (DIM > 0 ? DIM : obj.dim); ++i) {
ElementType span = bbox[i].high - bbox[i].low;
if (span > max_span) {
max_span = span;
}
}
ElementType max_spread = -1;
cutfeat = 0;
for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
ElementType span = bbox[i].high - bbox[i].low;
if (span > (1 - EPS) * max_span) {
ElementType min_elem, max_elem;
computeMinMax(obj, ind, count, i, min_elem, max_elem);
ElementType spread = max_elem - min_elem;
;
if (spread > max_spread) {
cutfeat = i;
max_spread = spread;
}
}
}
// split in the middle
DistanceType split_val = (bbox[cutfeat].low + bbox[cutfeat].high) / 2;
ElementType min_elem, max_elem;
computeMinMax(obj, ind, count, cutfeat, min_elem, max_elem);
if (split_val < min_elem)
cutval = min_elem;
else if (split_val > max_elem)
cutval = max_elem;
else
cutval = split_val;
IndexType lim1, lim2;
planeSplit(obj, ind, count, cutfeat, cutval, lim1, lim2);
if (lim1 > count / 2)
index = lim1;
else if (lim2 < count / 2)
index = lim2;
else
index = count / 2;
}
/**
* Subdivide the list of points by a plane perpendicular on axe corresponding
* to the 'cutfeat' dimension at 'cutval' position.
*
* On return:
* dataset[ind[0..lim1-1]][cutfeat]<cutval
* dataset[ind[lim1..lim2-1]][cutfeat]==cutval
* dataset[ind[lim2..count]][cutfeat]>cutval
*/
void planeSplit(Derived &obj, IndexType *ind, const IndexType count,
int cutfeat, DistanceType &cutval, IndexType &lim1,
IndexType &lim2) {
/* Move vector indices for left subtree to front of list. */
IndexType left = 0;
IndexType right = count - 1;
for (;;) {
while (left <= right && dataset_get(obj, ind[left], cutfeat) < cutval)
++left;
while (right && left <= right &&
dataset_get(obj, ind[right], cutfeat) >= cutval)
--right;
if (left > right || !right)
break; // "!right" was added to support unsigned Index types
std::swap(ind[left], ind[right]);
++left;
--right;
}
/* If either list is empty, it means that all remaining features
* are identical. Split in the middle to maintain a balanced tree.
*/
lim1 = left;
right = count - 1;
for (;;) {
while (left <= right && dataset_get(obj, ind[left], cutfeat) <= cutval)
++left;
while (right && left <= right &&
dataset_get(obj, ind[right], cutfeat) > cutval)
--right;
if (left > right || !right)
break; // "!right" was added to support unsigned Index types
std::swap(ind[left], ind[right]);
++left;
--right;
}
lim2 = left;
}
DistanceType computeInitialDistances(const Derived &obj,
const ElementType *vec,
distance_vector_t &dists) const {
assert(vec);
DistanceType distsq = DistanceType();
for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
if (vec[i] < obj.root_bbox[i].low) {
dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].low, i);
distsq += dists[i];
}
if (vec[i] > obj.root_bbox[i].high) {
dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].high, i);
distsq += dists[i];
}
}
return distsq;
}
void save_tree(Derived &obj, FILE *stream, NodePtr tree) {
save_value(stream, *tree);
if (tree->child1 != NULL) {
save_tree(obj, stream, tree->child1);
}
if (tree->child2 != NULL) {
save_tree(obj, stream, tree->child2);
}
}
void load_tree(Derived &obj, FILE *stream, NodePtr &tree) {
tree = obj.pool.template allocate<Node>();
load_value(stream, *tree);
if (tree->child1 != NULL) {
load_tree(obj, stream, tree->child1);
}
if (tree->child2 != NULL) {
load_tree(obj, stream, tree->child2);
}
}
/** Stores the index in a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so when
* loading the index object it must be constructed associated to the same
* source of data points used while building it. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void saveIndex_(Derived &obj, FILE *stream) {
save_value(stream, obj.m_size);
save_value(stream, obj.dim);
save_value(stream, obj.root_bbox);
save_value(stream, obj.m_leaf_max_size);
save_value(stream, obj.vind);
save_tree(obj, stream, obj.root_node);
}
/** Loads a previous index from a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so the
* index object must be constructed associated to the same source of data
* points used while building the index. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void loadIndex_(Derived &obj, FILE *stream) {
load_value(stream, obj.m_size);
load_value(stream, obj.dim);
load_value(stream, obj.root_bbox);
load_value(stream, obj.m_leaf_max_size);
load_value(stream, obj.vind);
load_tree(obj, stream, obj.root_node);
}
};
/** @addtogroup kdtrees_grp KD-tree classes and adaptors
* @{ */
/** kd-tree static index
*
* Contains the k-d trees and other information for indexing a set of points
* for nearest-neighbor matching.
*
* The class "DatasetAdaptor" must provide the following interface (can be
* non-virtual, inlined methods):
*
* \code
* // Must return the number of data poins
* inline size_t kdtree_get_point_count() const { ... }
*
*
* // Must return the dim'th component of the idx'th point in the class:
* inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... }
*
* // Optional bounding-box computation: return false to default to a standard
* bbox computation loop.
* // Return true if the BBOX was already computed by the class and returned
* in "bb" so it can be avoided to redo it again.
* // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3
* for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX &bb) const
* {
* bb[0].low = ...; bb[0].high = ...; // 0th dimension limits
* bb[1].low = ...; bb[1].high = ...; // 1st dimension limits
* ...
* return true;
* }
*
* \endcode
*
* \tparam DatasetAdaptor The user-provided adaptor (see comments above).
* \tparam Distance The distance metric to use: nanoflann::metric_L1,
* nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM
* Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will
* be typically size_t or int
*/
template <typename Distance, class DatasetAdaptor, int DIM = -1,
typename IndexType = size_t>
class KDTreeSingleIndexAdaptor
: public KDTreeBaseClass<
KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>,
Distance, DatasetAdaptor, DIM, IndexType> {
public:
/** Deleted copy constructor*/
KDTreeSingleIndexAdaptor(
const KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>
&) = delete;
/**
* The dataset used by this index
*/
const DatasetAdaptor &dataset; //!< The source of our data
const KDTreeSingleIndexAdaptorParams index_params;
Distance distance;
typedef typename nanoflann::KDTreeBaseClass<
nanoflann::KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM,
IndexType>,
Distance, DatasetAdaptor, DIM, IndexType>
BaseClassRef;
typedef typename BaseClassRef::ElementType ElementType;
typedef typename BaseClassRef::DistanceType DistanceType;
typedef typename BaseClassRef::Node Node;
typedef Node *NodePtr;
typedef typename BaseClassRef::Interval Interval;
/** Define "BoundingBox" as a fixed-size or variable-size container depending
* on "DIM" */
typedef typename BaseClassRef::BoundingBox BoundingBox;
/** Define "distance_vector_t" as a fixed-size or variable-size container
* depending on "DIM" */
typedef typename BaseClassRef::distance_vector_t distance_vector_t;
/**
* KDTree constructor
*
* Refer to docs in README.md or online in
* https://github.com/jlblancoc/nanoflann
*
* The KD-Tree point dimension (the length of each point in the datase, e.g. 3
* for 3D points) is determined by means of:
* - The \a DIM template parameter if >0 (highest priority)
* - Otherwise, the \a dimensionality parameter of this constructor.
*
* @param inputData Dataset with the input features
* @param params Basically, the maximum leaf node size
*/
KDTreeSingleIndexAdaptor(const int dimensionality,
const DatasetAdaptor &inputData,
const KDTreeSingleIndexAdaptorParams ¶ms =
KDTreeSingleIndexAdaptorParams())
: dataset(inputData), index_params(params), distance(inputData) {
BaseClassRef::root_node = NULL;
BaseClassRef::m_size = dataset.kdtree_get_point_count();
BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;
BaseClassRef::dim = dimensionality;
if (DIM > 0)
BaseClassRef::dim = DIM;
BaseClassRef::m_leaf_max_size = params.leaf_max_size;
// Create a permutable array of indices to the input vectors.
init_vind();
}
/**
* Builds the index
*/
void buildIndex() {
BaseClassRef::m_size = dataset.kdtree_get_point_count();
BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;
init_vind();
this->freeIndex(*this);
BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;
if (BaseClassRef::m_size == 0)
return;
computeBoundingBox(BaseClassRef::root_bbox);
BaseClassRef::root_node =
this->divideTree(*this, 0, BaseClassRef::m_size,
BaseClassRef::root_bbox); // construct the tree
}
/** \name Query methods
* @{ */
/**
* Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored
* inside the result object.
*
* Params:
* result = the result object in which the indices of the
* nearest-neighbors are stored vec = the vector for which to search the
* nearest neighbors
*
* \tparam RESULTSET Should be any ResultSet<DistanceType>
* \return True if the requested neighbors could be found.
* \sa knnSearch, radiusSearch
*/
template <typename RESULTSET>
bool findNeighbors(RESULTSET &result, const ElementType *vec,
const SearchParams &searchParams) const {
assert(vec);
if (this->size(*this) == 0)
return false;
if (!BaseClassRef::root_node)
throw std::runtime_error(
"[nanoflann] findNeighbors() called before building the index.");
float epsError = 1 + searchParams.eps;
distance_vector_t
dists; // fixed or variable-sized container (depending on DIM)
auto zero = static_cast<decltype(result.worstDist())>(0);
assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim),
zero); // Fill it with zeros.
DistanceType distsq = this->computeInitialDistances(*this, vec, dists);
searchLevel(result, vec, BaseClassRef::root_node, distsq, dists,
epsError); // "count_leaf" parameter removed since was neither
// used nor returned to the user.
return result.full();
}
/**
* Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1].
* Their indices are stored inside the result object. \sa radiusSearch,
* findNeighbors \note nChecks_IGNORED is ignored but kept for compatibility
* with the original FLANN interface. \return Number `N` of valid points in
* the result set. Only the first `N` entries in `out_indices` and
* `out_distances_sq` will be valid. Return may be less than `num_closest`
* only if the number of elements in the tree is less than `num_closest`.
*/
size_t knnSearch(const ElementType *query_point, const size_t num_closest,
IndexType *out_indices, DistanceType *out_distances_sq,
const int /* nChecks_IGNORED */ = 10) const {
nanoflann::KNNResultSet<DistanceType, IndexType> resultSet(num_closest);
resultSet.init(out_indices, out_distances_sq);
this->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
return resultSet.size();
}
/**
* Find all the neighbors to \a query_point[0:dim-1] within a maximum radius.
* The output is given as a vector of pairs, of which the first element is a
* point index and the second the corresponding distance. Previous contents of
* \a IndicesDists are cleared.
*
* If searchParams.sorted==true, the output list is sorted by ascending
* distances.
*
* For a better performance, it is advisable to do a .reserve() on the vector
* if you have any wild guess about the number of expected matches.
*
* \sa knnSearch, findNeighbors, radiusSearchCustomCallback
* \return The number of points within the given radius (i.e. indices.size()
* or dists.size() )
*/
size_t
radiusSearch(const ElementType *query_point, const DistanceType &radius,
std::vector<std::pair<IndexType, DistanceType>> &IndicesDists,
const SearchParams &searchParams) const {
RadiusResultSet<DistanceType, IndexType> resultSet(radius, IndicesDists);
const size_t nFound =
radiusSearchCustomCallback(query_point, resultSet, searchParams);
if (searchParams.sorted)
std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter());
return nFound;
}
/**
* Just like radiusSearch() but with a custom callback class for each point
* found in the radius of the query. See the source of RadiusResultSet<> as a
* start point for your own classes. \sa radiusSearch
*/
template <class SEARCH_CALLBACK>
size_t radiusSearchCustomCallback(
const ElementType *query_point, SEARCH_CALLBACK &resultSet,
const SearchParams &searchParams = SearchParams()) const {
this->findNeighbors(resultSet, query_point, searchParams);
return resultSet.size();
}
/** @} */
public:
/** Make sure the auxiliary list \a vind has the same size than the current
* dataset, and re-generate if size has changed. */
void init_vind() {
// Create a permutable array of indices to the input vectors.
BaseClassRef::m_size = dataset.kdtree_get_point_count();
if (BaseClassRef::vind.size() != BaseClassRef::m_size)
BaseClassRef::vind.resize(BaseClassRef::m_size);
for (size_t i = 0; i < BaseClassRef::m_size; i++)
BaseClassRef::vind[i] = i;
}
void computeBoundingBox(BoundingBox &bbox) {
resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim));
if (dataset.kdtree_get_bbox(bbox)) {
// Done! It was implemented in derived class
} else {
const size_t N = dataset.kdtree_get_point_count();
if (!N)
throw std::runtime_error("[nanoflann] computeBoundingBox() called but "
"no data points found.");
for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {
bbox[i].low = bbox[i].high = this->dataset_get(*this, 0, i);
}
for (size_t k = 1; k < N; ++k) {
for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {
if (this->dataset_get(*this, k, i) < bbox[i].low)
bbox[i].low = this->dataset_get(*this, k, i);
if (this->dataset_get(*this, k, i) > bbox[i].high)
bbox[i].high = this->dataset_get(*this, k, i);
}
}
}
}
/**
* Performs an exact search in the tree starting from a node.
* \tparam RESULTSET Should be any ResultSet<DistanceType>
* \return true if the search should be continued, false if the results are
* sufficient
*/
template <class RESULTSET>
bool searchLevel(RESULTSET &result_set, const ElementType *vec,
const NodePtr node, DistanceType mindistsq,
distance_vector_t &dists, const float epsError) const {
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL) && (node->child2 == NULL)) {
// count_leaf += (node->lr.right-node->lr.left); // Removed since was
// neither used nor returned to the user.
DistanceType worst_dist = result_set.worstDist();
for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right;
++i) {
const IndexType index = BaseClassRef::vind[i]; // reorder... : i;
DistanceType dist = distance.evalMetric(
vec, index, (DIM > 0 ? DIM : BaseClassRef::dim));
if (dist < worst_dist) {
if (!result_set.addPoint(dist, BaseClassRef::vind[i])) {
// the resultset doesn't want to receive any more points, we're done
// searching!
return false;
}
}
}
return true;
}
/* Which child branch should be taken first? */
int idx = node->node_type.sub.divfeat;
ElementType val = vec[idx];
DistanceType diff1 = val - node->node_type.sub.divlow;
DistanceType diff2 = val - node->node_type.sub.divhigh;
NodePtr bestChild;
NodePtr otherChild;
DistanceType cut_dist;
if ((diff1 + diff2) < 0) {
bestChild = node->child1;
otherChild = node->child2;
cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx);
} else {
bestChild = node->child2;
otherChild = node->child1;
cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx);
}
/* Call recursively to search next level down. */
if (!searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError)) {
// the resultset doesn't want to receive any more points, we're done
// searching!
return false;
}
DistanceType dst = dists[idx];
mindistsq = mindistsq + cut_dist - dst;
dists[idx] = cut_dist;
if (mindistsq * epsError <= result_set.worstDist()) {
if (!searchLevel(result_set, vec, otherChild, mindistsq, dists,
epsError)) {
// the resultset doesn't want to receive any more points, we're done
// searching!
return false;
}
}
dists[idx] = dst;
return true;
}
public:
/** Stores the index in a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so when
* loading the index object it must be constructed associated to the same
* source of data points used while building it. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); }
/** Loads a previous index from a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so the
* index object must be constructed associated to the same source of data
* points used while building the index. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); }
}; // class KDTree
/** kd-tree dynamic index
*
* Contains the k-d trees and other information for indexing a set of points
* for nearest-neighbor matching.
*
* The class "DatasetAdaptor" must provide the following interface (can be
* non-virtual, inlined methods):
*
* \code
* // Must return the number of data poins
* inline size_t kdtree_get_point_count() const { ... }
*
* // Must return the dim'th component of the idx'th point in the class:
* inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... }
*
* // Optional bounding-box computation: return false to default to a standard
* bbox computation loop.
* // Return true if the BBOX was already computed by the class and returned
* in "bb" so it can be avoided to redo it again.
* // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3
* for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX &bb) const
* {
* bb[0].low = ...; bb[0].high = ...; // 0th dimension limits
* bb[1].low = ...; bb[1].high = ...; // 1st dimension limits
* ...
* return true;
* }
*
* \endcode
*
* \tparam DatasetAdaptor The user-provided adaptor (see comments above).
* \tparam Distance The distance metric to use: nanoflann::metric_L1,
* nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM
* Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will
* be typically size_t or int
*/
template <typename Distance, class DatasetAdaptor, int DIM = -1,
typename IndexType = size_t>
class KDTreeSingleIndexDynamicAdaptor_
: public KDTreeBaseClass<KDTreeSingleIndexDynamicAdaptor_<
Distance, DatasetAdaptor, DIM, IndexType>,
Distance, DatasetAdaptor, DIM, IndexType> {
public:
/**
* The dataset used by this index
*/
const DatasetAdaptor &dataset; //!< The source of our data
KDTreeSingleIndexAdaptorParams index_params;
std::vector<int> &treeIndex;
Distance distance;
typedef typename nanoflann::KDTreeBaseClass<
nanoflann::KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM,
IndexType>,
Distance, DatasetAdaptor, DIM, IndexType>
BaseClassRef;
typedef typename BaseClassRef::ElementType ElementType;
typedef typename BaseClassRef::DistanceType DistanceType;
typedef typename BaseClassRef::Node Node;
typedef Node *NodePtr;
typedef typename BaseClassRef::Interval Interval;
/** Define "BoundingBox" as a fixed-size or variable-size container depending
* on "DIM" */
typedef typename BaseClassRef::BoundingBox BoundingBox;
/** Define "distance_vector_t" as a fixed-size or variable-size container
* depending on "DIM" */
typedef typename BaseClassRef::distance_vector_t distance_vector_t;
/**
* KDTree constructor
*
* Refer to docs in README.md or online in
* https://github.com/jlblancoc/nanoflann
*
* The KD-Tree point dimension (the length of each point in the datase, e.g. 3
* for 3D points) is determined by means of:
* - The \a DIM template parameter if >0 (highest priority)
* - Otherwise, the \a dimensionality parameter of this constructor.
*
* @param inputData Dataset with the input features
* @param params Basically, the maximum leaf node size
*/
KDTreeSingleIndexDynamicAdaptor_(
const int dimensionality, const DatasetAdaptor &inputData,
std::vector<int> &treeIndex_,
const KDTreeSingleIndexAdaptorParams ¶ms =
KDTreeSingleIndexAdaptorParams())
: dataset(inputData), index_params(params), treeIndex(treeIndex_),
distance(inputData) {
BaseClassRef::root_node = NULL;
BaseClassRef::m_size = 0;
BaseClassRef::m_size_at_index_build = 0;
BaseClassRef::dim = dimensionality;
if (DIM > 0)
BaseClassRef::dim = DIM;
BaseClassRef::m_leaf_max_size = params.leaf_max_size;
}
/** Assignment operator definiton */
KDTreeSingleIndexDynamicAdaptor_
operator=(const KDTreeSingleIndexDynamicAdaptor_ &rhs) {
KDTreeSingleIndexDynamicAdaptor_ tmp(rhs);
std::swap(BaseClassRef::vind, tmp.BaseClassRef::vind);
std::swap(BaseClassRef::m_leaf_max_size, tmp.BaseClassRef::m_leaf_max_size);
std::swap(index_params, tmp.index_params);
std::swap(treeIndex, tmp.treeIndex);
std::swap(BaseClassRef::m_size, tmp.BaseClassRef::m_size);
std::swap(BaseClassRef::m_size_at_index_build,
tmp.BaseClassRef::m_size_at_index_build);
std::swap(BaseClassRef::root_node, tmp.BaseClassRef::root_node);
std::swap(BaseClassRef::root_bbox, tmp.BaseClassRef::root_bbox);
std::swap(BaseClassRef::pool, tmp.BaseClassRef::pool);
return *this;
}
/**
* Builds the index
*/
void buildIndex() {
BaseClassRef::m_size = BaseClassRef::vind.size();
this->freeIndex(*this);
BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;
if (BaseClassRef::m_size == 0)
return;
computeBoundingBox(BaseClassRef::root_bbox);
BaseClassRef::root_node =
this->divideTree(*this, 0, BaseClassRef::m_size,
BaseClassRef::root_bbox); // construct the tree
}
/** \name Query methods
* @{ */
/**
* Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored
* inside the result object.
*
* Params:
* result = the result object in which the indices of the
* nearest-neighbors are stored vec = the vector for which to search the
* nearest neighbors
*
* \tparam RESULTSET Should be any ResultSet<DistanceType>
* \return True if the requested neighbors could be found.
* \sa knnSearch, radiusSearch
*/
template <typename RESULTSET>
bool findNeighbors(RESULTSET &result, const ElementType *vec,
const SearchParams &searchParams) const {
assert(vec);
if (this->size(*this) == 0)
return false;
if (!BaseClassRef::root_node)
return false;
float epsError = 1 + searchParams.eps;
// fixed or variable-sized container (depending on DIM)
distance_vector_t dists;
// Fill it with zeros.
assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim),
static_cast<typename distance_vector_t::value_type>(0));
DistanceType distsq = this->computeInitialDistances(*this, vec, dists);
searchLevel(result, vec, BaseClassRef::root_node, distsq, dists,
epsError); // "count_leaf" parameter removed since was neither
// used nor returned to the user.
return result.full();
}
/**
* Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1].
* Their indices are stored inside the result object. \sa radiusSearch,
* findNeighbors \note nChecks_IGNORED is ignored but kept for compatibility
* with the original FLANN interface. \return Number `N` of valid points in
* the result set. Only the first `N` entries in `out_indices` and
* `out_distances_sq` will be valid. Return may be less than `num_closest`
* only if the number of elements in the tree is less than `num_closest`.
*/
size_t knnSearch(const ElementType *query_point, const size_t num_closest,
IndexType *out_indices, DistanceType *out_distances_sq,
const int /* nChecks_IGNORED */ = 10) const {
nanoflann::KNNResultSet<DistanceType, IndexType> resultSet(num_closest);
resultSet.init(out_indices, out_distances_sq);
this->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
return resultSet.size();
}
/**
* Find all the neighbors to \a query_point[0:dim-1] within a maximum radius.
* The output is given as a vector of pairs, of which the first element is a
* point index and the second the corresponding distance. Previous contents of
* \a IndicesDists are cleared.
*
* If searchParams.sorted==true, the output list is sorted by ascending
* distances.
*
* For a better performance, it is advisable to do a .reserve() on the vector
* if you have any wild guess about the number of expected matches.
*
* \sa knnSearch, findNeighbors, radiusSearchCustomCallback
* \return The number of points within the given radius (i.e. indices.size()
* or dists.size() )
*/
size_t
radiusSearch(const ElementType *query_point, const DistanceType &radius,
std::vector<std::pair<IndexType, DistanceType>> &IndicesDists,
const SearchParams &searchParams) const {
RadiusResultSet<DistanceType, IndexType> resultSet(radius, IndicesDists);
const size_t nFound =
radiusSearchCustomCallback(query_point, resultSet, searchParams);
if (searchParams.sorted)
std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter());
return nFound;
}
/**
* Just like radiusSearch() but with a custom callback class for each point
* found in the radius of the query. See the source of RadiusResultSet<> as a
* start point for your own classes. \sa radiusSearch
*/
template <class SEARCH_CALLBACK>
size_t radiusSearchCustomCallback(
const ElementType *query_point, SEARCH_CALLBACK &resultSet,
const SearchParams &searchParams = SearchParams()) const {
this->findNeighbors(resultSet, query_point, searchParams);
return resultSet.size();
}
/** @} */
public:
void computeBoundingBox(BoundingBox &bbox) {
resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim));
if (dataset.kdtree_get_bbox(bbox)) {
// Done! It was implemented in derived class
} else {
const size_t N = BaseClassRef::m_size;
if (!N)
throw std::runtime_error("[nanoflann] computeBoundingBox() called but "
"no data points found.");
for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {
bbox[i].low = bbox[i].high =
this->dataset_get(*this, BaseClassRef::vind[0], i);
}
for (size_t k = 1; k < N; ++k) {
for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {
if (this->dataset_get(*this, BaseClassRef::vind[k], i) < bbox[i].low)
bbox[i].low = this->dataset_get(*this, BaseClassRef::vind[k], i);
if (this->dataset_get(*this, BaseClassRef::vind[k], i) > bbox[i].high)
bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[k], i);
}
}
}
}
/**
* Performs an exact search in the tree starting from a node.
* \tparam RESULTSET Should be any ResultSet<DistanceType>
*/
template <class RESULTSET>
void searchLevel(RESULTSET &result_set, const ElementType *vec,
const NodePtr node, DistanceType mindistsq,
distance_vector_t &dists, const float epsError) const {
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL) && (node->child2 == NULL)) {
// count_leaf += (node->lr.right-node->lr.left); // Removed since was
// neither used nor returned to the user.
DistanceType worst_dist = result_set.worstDist();
for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right;
++i) {
const IndexType index = BaseClassRef::vind[i]; // reorder... : i;
if (treeIndex[index] == -1)
continue;
DistanceType dist = distance.evalMetric(
vec, index, (DIM > 0 ? DIM : BaseClassRef::dim));
if (dist < worst_dist) {
if (!result_set.addPoint(
static_cast<typename RESULTSET::DistanceType>(dist),
static_cast<typename RESULTSET::IndexType>(
BaseClassRef::vind[i]))) {
// the resultset doesn't want to receive any more points, we're done
// searching!
return; // false;
}
}
}
return;
}
/* Which child branch should be taken first? */
int idx = node->node_type.sub.divfeat;
ElementType val = vec[idx];
DistanceType diff1 = val - node->node_type.sub.divlow;
DistanceType diff2 = val - node->node_type.sub.divhigh;
NodePtr bestChild;
NodePtr otherChild;
DistanceType cut_dist;
if ((diff1 + diff2) < 0) {
bestChild = node->child1;
otherChild = node->child2;
cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx);
} else {
bestChild = node->child2;
otherChild = node->child1;
cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx);
}
/* Call recursively to search next level down. */
searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError);
DistanceType dst = dists[idx];
mindistsq = mindistsq + cut_dist - dst;
dists[idx] = cut_dist;
if (mindistsq * epsError <= result_set.worstDist()) {
searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError);
}
dists[idx] = dst;
}
public:
/** Stores the index in a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so when
* loading the index object it must be constructed associated to the same
* source of data points used while building it. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); }
/** Loads a previous index from a binary file.
* IMPORTANT NOTE: The set of data points is NOT stored in the file, so the
* index object must be constructed associated to the same source of data
* points used while building the index. See the example:
* examples/saveload_example.cpp \sa loadIndex */
void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); }
};
/** kd-tree dynaimic index
*
* class to create multiple static index and merge their results to behave as
* single dynamic index as proposed in Logarithmic Approach.
*
* Example of usage:
* examples/dynamic_pointcloud_example.cpp
*
* \tparam DatasetAdaptor The user-provided adaptor (see comments above).
* \tparam Distance The distance metric to use: nanoflann::metric_L1,
* nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM
* Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will
* be typically size_t or int
*/
template <typename Distance, class DatasetAdaptor, int DIM = -1,
typename IndexType = size_t>
class KDTreeSingleIndexDynamicAdaptor {
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::DistanceType DistanceType;
protected:
size_t m_leaf_max_size;
size_t treeCount;
size_t pointCount;
/**
* The dataset used by this index
*/
const DatasetAdaptor &dataset; //!< The source of our data
std::vector<int> treeIndex; //!< treeIndex[idx] is the index of tree in which
//!< point at idx is stored. treeIndex[idx]=-1
//!< means that point has been removed.
KDTreeSingleIndexAdaptorParams index_params;
int dim; //!< Dimensionality of each data point
typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM>
index_container_t;
std::vector<index_container_t> index;
public:
/** Get a const ref to the internal list of indices; the number of indices is
* adapted dynamically as the dataset grows in size. */
const std::vector<index_container_t> &getAllIndices() const { return index; }
private:
/** finds position of least significant unset bit */
int First0Bit(IndexType num) {
int pos = 0;
while (num & 1) {
num = num >> 1;
pos++;
}
return pos;
}
/** Creates multiple empty trees to handle dynamic support */
void init() {
typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM>
my_kd_tree_t;
std::vector<my_kd_tree_t> index_(
treeCount, my_kd_tree_t(dim /*dim*/, dataset, treeIndex, index_params));
index = index_;
}
public:
Distance distance;
/**
* KDTree constructor
*
* Refer to docs in README.md or online in
* https://github.com/jlblancoc/nanoflann
*
* The KD-Tree point dimension (the length of each point in the datase, e.g. 3
* for 3D points) is determined by means of:
* - The \a DIM template parameter if >0 (highest priority)
* - Otherwise, the \a dimensionality parameter of this constructor.
*
* @param inputData Dataset with the input features
* @param params Basically, the maximum leaf node size
*/
KDTreeSingleIndexDynamicAdaptor(const int dimensionality,
const DatasetAdaptor &inputData,
const KDTreeSingleIndexAdaptorParams ¶ms =
KDTreeSingleIndexAdaptorParams(),
const size_t maximumPointCount = 1000000000U)
: dataset(inputData), index_params(params), distance(inputData) {
treeCount = static_cast<size_t>(std::log2(maximumPointCount));
pointCount = 0U;
dim = dimensionality;
treeIndex.clear();
if (DIM > 0)
dim = DIM;
m_leaf_max_size = params.leaf_max_size;
init();
const size_t num_initial_points = dataset.kdtree_get_point_count();
if (num_initial_points > 0) {
addPoints(0, num_initial_points - 1);
}
}
/** Deleted copy constructor*/
KDTreeSingleIndexDynamicAdaptor(
const KDTreeSingleIndexDynamicAdaptor<Distance, DatasetAdaptor, DIM,
IndexType> &) = delete;
/** Add points to the set, Inserts all points from [start, end] */
void addPoints(IndexType start, IndexType end) {
size_t count = end - start + 1;
treeIndex.resize(treeIndex.size() + count);
for (IndexType idx = start; idx <= end; idx++) {
int pos = First0Bit(pointCount);
index[pos].vind.clear();
treeIndex[pointCount] = pos;
for (int i = 0; i < pos; i++) {
for (int j = 0; j < static_cast<int>(index[i].vind.size()); j++) {
index[pos].vind.push_back(index[i].vind[j]);
if (treeIndex[index[i].vind[j]] != -1)
treeIndex[index[i].vind[j]] = pos;
}
index[i].vind.clear();
index[i].freeIndex(index[i]);
}
index[pos].vind.push_back(idx);
index[pos].buildIndex();
pointCount++;
}
}
/** Remove a point from the set (Lazy Deletion) */
void removePoint(size_t idx) {
if (idx >= pointCount)
return;
treeIndex[idx] = -1;
}
/**
* Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored
* inside the result object.
*
* Params:
* result = the result object in which the indices of the
* nearest-neighbors are stored vec = the vector for which to search the
* nearest neighbors
*
* \tparam RESULTSET Should be any ResultSet<DistanceType>
* \return True if the requested neighbors could be found.
* \sa knnSearch, radiusSearch
*/
template <typename RESULTSET>
bool findNeighbors(RESULTSET &result, const ElementType *vec,
const SearchParams &searchParams) const {
for (size_t i = 0; i < treeCount; i++) {
index[i].findNeighbors(result, &vec[0], searchParams);
}
return result.full();
}
};
/** An L2-metric KD-tree adaptor for working with data directly stored in an
* Eigen Matrix, without duplicating the data storage. Each row in the matrix
* represents a point in the state space.
*
* Example of usage:
* \code
* Eigen::Matrix<num_t,Dynamic,Dynamic> mat;
* // Fill out "mat"...
*
* typedef KDTreeEigenMatrixAdaptor< Eigen::Matrix<num_t,Dynamic,Dynamic> >
* my_kd_tree_t; const int max_leaf = 10; my_kd_tree_t mat_index(mat, max_leaf
* ); mat_index.index->buildIndex(); mat_index.index->... \endcode
*
* \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality
* for the points in the data set, allowing more compiler optimizations. \tparam
* Distance The distance metric to use: nanoflann::metric_L1,
* nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
*/
template <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2>
struct KDTreeEigenMatrixAdaptor {
typedef KDTreeEigenMatrixAdaptor<MatrixType, DIM, Distance> self_t;
typedef typename MatrixType::Scalar num_t;
typedef typename MatrixType::Index IndexType;
typedef
typename Distance::template traits<num_t, self_t>::distance_t metric_t;
typedef KDTreeSingleIndexAdaptor<metric_t, self_t,
MatrixType::ColsAtCompileTime, IndexType>
index_t;
index_t *index; //! The kd-tree index for the user to call its methods as
//! usual with any other FLANN index.
/// Constructor: takes a const ref to the matrix object with the data points
KDTreeEigenMatrixAdaptor(const size_t dimensionality,
const std::reference_wrapper<const MatrixType> &mat,
const int leaf_max_size = 10)
: m_data_matrix(mat) {
const auto dims = mat.get().cols();
if (size_t(dims) != dimensionality)
throw std::runtime_error(
"Error: 'dimensionality' must match column count in data matrix");
if (DIM > 0 && int(dims) != DIM)
throw std::runtime_error(
"Data set dimensionality does not match the 'DIM' template argument");
index =
new index_t(static_cast<int>(dims), *this /* adaptor */,
nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size));
index->buildIndex();
}
public:
/** Deleted copy constructor */
KDTreeEigenMatrixAdaptor(const self_t &) = delete;
~KDTreeEigenMatrixAdaptor() { delete index; }
const std::reference_wrapper<const MatrixType> m_data_matrix;
/** Query for the \a num_closest closest points to a given point (entered as
* query_point[0:dim-1]). Note that this is a short-cut method for
* index->findNeighbors(). The user can also call index->... methods as
* desired. \note nChecks_IGNORED is ignored but kept for compatibility with
* the original FLANN interface.
*/
inline void query(const num_t *query_point, const size_t num_closest,
IndexType *out_indices, num_t *out_distances_sq,
const int /* nChecks_IGNORED */ = 10) const {
nanoflann::KNNResultSet<num_t, IndexType> resultSet(num_closest);
resultSet.init(out_indices, out_distances_sq);
index->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
}
/** @name Interface expected by KDTreeSingleIndexAdaptor
* @{ */
const self_t &derived() const { return *this; }
self_t &derived() { return *this; }
// Must return the number of data points
inline size_t kdtree_get_point_count() const {
return m_data_matrix.get().rows();
}
// Returns the dim'th component of the idx'th point in the class:
inline num_t kdtree_get_pt(const IndexType idx, size_t dim) const {
return m_data_matrix.get().coeff(idx, IndexType(dim));
}
// Optional bounding-box computation: return false to default to a standard
// bbox computation loop.
// Return true if the BBOX was already computed by the class and returned in
// "bb" so it can be avoided to redo it again. Look at bb.size() to find out
// the expected dimensionality (e.g. 2 or 3 for point clouds)
template <class BBOX> bool kdtree_get_bbox(BBOX & /*bb*/) const {
return false;
}
/** @} */
}; // end of KDTreeEigenMatrixAdaptor
/** @} */
/** @} */ // end of grouping
} // namespace nanoflann
#endif /* NANOFLANN_HPP_ */
| 73,133 | 34.779843 | 80 | hpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/__init__.py | from geotransformer.modules.geotransformer.geotransformer import GeometricStructureEmbedding, GeometricTransformer
from geotransformer.modules.geotransformer.superpoint_matching import SuperPointMatching
from geotransformer.modules.geotransformer.superpoint_target import SuperPointTargetGenerator
from geotransformer.modules.geotransformer.point_matching import PointMatching
from geotransformer.modules.geotransformer.local_global_registration import LocalGlobalRegistration
| 477 | 78.666667 | 114 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/geotransformer.py | import numpy as np
import torch
import torch.nn as nn
from geotransformer.modules.ops import pairwise_distance
from geotransformer.modules.transformer import SinusoidalPositionalEmbedding, RPEConditionalTransformer
class GeometricStructureEmbedding(nn.Module):
def __init__(self, hidden_dim, sigma_d, sigma_a, angle_k, reduction_a='max'):
super(GeometricStructureEmbedding, self).__init__()
self.sigma_d = sigma_d
self.sigma_a = sigma_a
self.factor_a = 180.0 / (self.sigma_a * np.pi)
self.angle_k = angle_k
self.embedding = SinusoidalPositionalEmbedding(hidden_dim)
self.proj_d = nn.Linear(hidden_dim, hidden_dim)
self.proj_a = nn.Linear(hidden_dim, hidden_dim)
self.reduction_a = reduction_a
if self.reduction_a not in ['max', 'mean']:
raise ValueError(f'Unsupported reduction mode: {self.reduction_a}.')
@torch.no_grad()
def get_embedding_indices(self, points):
r"""Compute the indices of pair-wise distance embedding and triplet-wise angular embedding.
Args:
points: torch.Tensor (B, N, 3), input point cloud
Returns:
d_indices: torch.FloatTensor (B, N, N), distance embedding indices
a_indices: torch.FloatTensor (B, N, N, k), angular embedding indices
"""
batch_size, num_point, _ = points.shape
dist_map = torch.sqrt(pairwise_distance(points, points)) # (B, N, N)
d_indices = dist_map / self.sigma_d
k = self.angle_k
knn_indices = dist_map.topk(k=k + 1, dim=2, largest=False)[1][:, :, 1:] # (B, N, k)
knn_indices = knn_indices.unsqueeze(3).expand(batch_size, num_point, k, 3) # (B, N, k, 3)
expanded_points = points.unsqueeze(1).expand(batch_size, num_point, num_point, 3) # (B, N, N, 3)
knn_points = torch.gather(expanded_points, dim=2, index=knn_indices) # (B, N, k, 3)
ref_vectors = knn_points - points.unsqueeze(2) # (B, N, k, 3)
anc_vectors = points.unsqueeze(1) - points.unsqueeze(2) # (B, N, N, 3)
ref_vectors = ref_vectors.unsqueeze(2).expand(batch_size, num_point, num_point, k, 3) # (B, N, N, k, 3)
anc_vectors = anc_vectors.unsqueeze(3).expand(batch_size, num_point, num_point, k, 3) # (B, N, N, k, 3)
sin_values = torch.linalg.norm(torch.cross(ref_vectors, anc_vectors, dim=-1), dim=-1) # (B, N, N, k)
cos_values = torch.sum(ref_vectors * anc_vectors, dim=-1) # (B, N, N, k)
angles = torch.atan2(sin_values, cos_values) # (B, N, N, k)
a_indices = angles * self.factor_a
return d_indices, a_indices
def forward(self, points):
d_indices, a_indices = self.get_embedding_indices(points)
d_embeddings = self.embedding(d_indices)
d_embeddings = self.proj_d(d_embeddings)
a_embeddings = self.embedding(a_indices)
a_embeddings = self.proj_a(a_embeddings)
if self.reduction_a == 'max':
a_embeddings = a_embeddings.max(dim=3)[0]
else:
a_embeddings = a_embeddings.mean(dim=3)
embeddings = d_embeddings + a_embeddings
return embeddings
class GeometricTransformer(nn.Module):
def __init__(
self,
input_dim,
output_dim,
hidden_dim,
num_heads,
blocks,
sigma_d,
sigma_a,
angle_k,
dropout=None,
activation_fn='ReLU',
reduction_a='max',
):
r"""Geometric Transformer (GeoTransformer).
Args:
input_dim: input feature dimension
output_dim: output feature dimension
hidden_dim: hidden feature dimension
num_heads: number of head in transformer
blocks: list of 'self' or 'cross'
sigma_d: temperature of distance
sigma_a: temperature of angles
angle_k: number of nearest neighbors for angular embedding
activation_fn: activation function
reduction_a: reduction mode of angular embedding ['max', 'mean']
"""
super(GeometricTransformer, self).__init__()
self.embedding = GeometricStructureEmbedding(hidden_dim, sigma_d, sigma_a, angle_k, reduction_a=reduction_a)
self.in_proj = nn.Linear(input_dim, hidden_dim)
self.transformer = RPEConditionalTransformer(
blocks, hidden_dim, num_heads, dropout=dropout, activation_fn=activation_fn
)
self.out_proj = nn.Linear(hidden_dim, output_dim)
def forward(
self,
ref_points,
src_points,
ref_feats,
src_feats,
ref_masks=None,
src_masks=None,
):
r"""Geometric Transformer
Args:
ref_points (Tensor): (B, N, 3)
src_points (Tensor): (B, M, 3)
ref_feats (Tensor): (B, N, C)
src_feats (Tensor): (B, M, C)
ref_masks (Optional[BoolTensor]): (B, N)
src_masks (Optional[BoolTensor]): (B, M)
Returns:
ref_feats: torch.Tensor (B, N, C)
src_feats: torch.Tensor (B, M, C)
"""
ref_embeddings = self.embedding(ref_points)
src_embeddings = self.embedding(src_points)
ref_feats = self.in_proj(ref_feats)
src_feats = self.in_proj(src_feats)
ref_feats, src_feats = self.transformer(
ref_feats,
src_feats,
ref_embeddings,
src_embeddings,
masks0=ref_masks,
masks1=src_masks,
)
ref_feats = self.out_proj(ref_feats)
src_feats = self.out_proj(src_feats)
return ref_feats, src_feats
| 5,681 | 35.423077 | 116 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/local_global_registration.py | from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from geotransformer.modules.ops import apply_transform
from geotransformer.modules.registration import WeightedProcrustes
from torch.autograd import Variable
import torch.nn.functional as F
# import sys
# path = r"GeoTransformer-corr-classification-testRR/GeoTransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/classification.py"
# sys.path.append(path)
# import classification
class Resnet(nn.Module):
def __init__(self,basicBlock,blockNums,nb_classes):
super(Resnet, self).__init__()
self.in_planes=64
#输入层
self.conv1=nn.Conv2d(1,self.in_planes,kernel_size=(3,3),stride=(1,1),padding=1,bias=False)
self.bn1=nn.BatchNorm2d(self.in_planes)
self.relu=nn.ReLU(inplace=True)
self.maxpool=nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
self.layer1=self._make_layers(basicBlock,blockNums[0],64,1)
self.layer2=self._make_layers(basicBlock,blockNums[1],128,2)
self.layer3=self._make_layers(basicBlock,blockNums[2],256,2)
self.layer4=self._make_layers(basicBlock,blockNums[3],512,2)
self.avgpool=nn.AdaptiveAvgPool2d(output_size=(1,1))
self.fc=nn.Linear(512,nb_classes)
def _make_layers(self,basicBlock,blockNum,plane,stride):
"""
:param basicBlock: 基本残差块类
:param blockNum: 当前层包含基本残差块的数目,resnet18每层均为2
:param plane: 输出通道数
:param stride: 卷积步长
:return:
"""
layers=[]
for i in range(blockNum):
if i==0:
layer=basicBlock(self.in_planes,plane,3,stride=stride)
else:
layer=basicBlock(plane,plane,3,stride=1)
layers.append(layer)
self.in_planes=plane
return nn.Sequential(*layers)
def forward(self,inx):
x=self.maxpool(self.relu(self.bn1(self.conv1(inx))))
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
x=self.avgpool(x)
x=x.view(x.shape[0],-1)
out=self.fc(x)
return out
class basic_block(nn.Module):
"""基本残差块,由两层卷积构成"""
def __init__(self,in_planes,planes,kernel_size=3,stride=1):
"""
:param in_planes: 输入通道
:param planes: 输出通道
:param kernel_size: 卷积核大小
:param stride: 卷积步长
"""
super(basic_block, self).__init__()
self.conv1=nn.Conv2d(in_planes,planes,kernel_size=kernel_size,stride=stride,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(planes)
self.relu=nn.ReLU()
self.conv2=nn.Conv2d(planes,planes,kernel_size=kernel_size,stride=1,padding=1,bias=False)
self.bn2=nn.BatchNorm2d(planes)
if stride!=1 or in_planes!=planes:
self.downsample=nn.Sequential(nn.Conv2d(in_planes,planes,kernel_size=1,stride=stride)
,nn.BatchNorm2d(planes))
else:
self.downsample=nn.Sequential()
def forward(self,inx):
x=self.relu(self.bn1(self.conv1(inx)))
x=self.bn2(self.conv2(x))
out=x+self.downsample(inx)
return F.relu(out)
class classification(nn.Module):
def __init__(self):
super(classification, self).__init__()
self.resnet18=Resnet(basic_block,[2,2,2,2],256)
self.linear1 = nn.Linear(256, 128)
self.linear2 = nn.Linear(128, 1)
self.activate1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.5)
self.activate2 = nn.Sigmoid()
def forward(self, x):
x = self.resnet18(x)
x = self.linear1(x)
x = self.activate1(x)
x = self.dropout1(x)
x = self.linear2(x)
x = self.activate2(x)
return x
class LocalGlobalRegistration(nn.Module):
def __init__(
self,
k: int,
acceptance_radius: float,
mutual: bool = True,
confidence_threshold: float = 0.05,
use_dustbin: bool = False,
use_global_score: bool = False,
correspondence_threshold: int = 3,
correspondence_limit: Optional[int] = None,
num_refinement_steps: int = 5,
):
r"""Point Matching with Local-to-Global Registration.
Args:
k (int): top-k selection for matching.
acceptance_radius (float): acceptance radius for LGR.
mutual (bool=True): mutual or non-mutual matching.
confidence_threshold (float=0.05): ignore matches whose scores are below this threshold.
use_dustbin (bool=False): whether dustbin row/column is used in the score matrix.
use_global_score (bool=False): whether use patch correspondence scores.
correspondence_threshold (int=3): minimal number of correspondences for each patch correspondence.
correspondence_limit (optional[int]=None): maximal number of verification correspondences.
num_refinement_steps (int=5): number of refinement steps.
"""
super(LocalGlobalRegistration, self).__init__()
self.k = k
self.acceptance_radius = acceptance_radius
self.mutual = mutual
self.confidence_threshold = confidence_threshold
self.use_dustbin = use_dustbin
self.use_global_score = use_global_score
self.correspondence_threshold = correspondence_threshold
self.correspondence_limit = correspondence_limit
self.num_refinement_steps = num_refinement_steps
self.procrustes = WeightedProcrustes(return_transform=True)
def compute_correspondence_matrix(self, score_mat, ref_knn_masks, src_knn_masks):
r"""Compute matching matrix and score matrix for each patch correspondence."""
mask_mat = torch.logical_and(ref_knn_masks.unsqueeze(2), src_knn_masks.unsqueeze(1))
batch_size, ref_length, src_length = score_mat.shape
batch_indices = torch.arange(batch_size).cuda()
# correspondences from reference side
ref_topk_scores, ref_topk_indices = score_mat.topk(k=self.k, dim=2) # (B, N, K)
ref_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, ref_length, self.k) # (B, N, K)
ref_indices = torch.arange(ref_length).cuda().view(1, ref_length, 1).expand(batch_size, -1, self.k) # (B, N, K)
ref_score_mat = torch.zeros_like(score_mat)
ref_score_mat[ref_batch_indices, ref_indices, ref_topk_indices] = ref_topk_scores
ref_corr_mat = torch.gt(ref_score_mat, self.confidence_threshold)
# correspondences from source side
src_topk_scores, src_topk_indices = score_mat.topk(k=self.k, dim=1) # (B, K, N)
src_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, self.k, src_length) # (B, K, N)
src_indices = torch.arange(src_length).cuda().view(1, 1, src_length).expand(batch_size, self.k, -1) # (B, K, N)
src_score_mat = torch.zeros_like(score_mat)
src_score_mat[src_batch_indices, src_topk_indices, src_indices] = src_topk_scores
src_corr_mat = torch.gt(src_score_mat, self.confidence_threshold)
# merge results from two sides
if self.mutual:
corr_mat = torch.logical_and(ref_corr_mat, src_corr_mat)
else:
corr_mat = torch.logical_or(ref_corr_mat, src_corr_mat)
if self.use_dustbin:
corr_mat = corr_mat[:, :-1, :-1]
corr_mat = torch.logical_and(corr_mat, mask_mat)
return corr_mat
@staticmethod
def convert_to_batch(ref_corr_points, src_corr_points, corr_scores, chunks):
r"""Convert stacked correspondences to batched points.
The extracted dense correspondences from all patch correspondences are stacked. However, to compute the
transformations from all patch correspondences in parallel, the dense correspondences need to be reorganized
into a batch.
Args:
ref_corr_points (Tensor): (C, 3)
src_corr_points (Tensor): (C, 3)
corr_scores (Tensor): (C,)
chunks (List[Tuple[int, int]]): the starting index and ending index of each patch correspondences.
Returns:
batch_ref_corr_points (Tensor): (B, K, 3), padded with zeros.
batch_src_corr_points (Tensor): (B, K, 3), padded with zeros.
batch_corr_scores (Tensor): (B, K), padded with zeros.
"""
batch_size = len(chunks)
indices = torch.cat([torch.arange(x, y) for x, y in chunks], dim=0).cuda()
ref_corr_points = ref_corr_points[indices] # (total, 3)
src_corr_points = src_corr_points[indices] # (total, 3)
corr_scores = corr_scores[indices] # (total,)
max_corr = np.max([y - x for x, y in chunks])
target_chunks = [(i * max_corr, i * max_corr + y - x) for i, (x, y) in enumerate(chunks)]
indices = torch.cat([torch.arange(x, y) for x, y in target_chunks], dim=0).cuda()
indices0 = indices.unsqueeze(1).expand(indices.shape[0], 3) # (total,) -> (total, 3)
indices1 = torch.arange(3).unsqueeze(0).expand(indices.shape[0], 3).cuda() # (3,) -> (total, 3)
batch_ref_corr_points = torch.zeros(batch_size * max_corr, 3).cuda()
batch_ref_corr_points.index_put_([indices0, indices1], ref_corr_points)
batch_ref_corr_points = batch_ref_corr_points.view(batch_size, max_corr, 3)
batch_src_corr_points = torch.zeros(batch_size * max_corr, 3).cuda()
batch_src_corr_points.index_put_([indices0, indices1], src_corr_points)
batch_src_corr_points = batch_src_corr_points.view(batch_size, max_corr, 3)
batch_corr_scores = torch.zeros(batch_size * max_corr).cuda()
batch_corr_scores.index_put_([indices], corr_scores)
batch_corr_scores = batch_corr_scores.view(batch_size, max_corr)
return batch_ref_corr_points, batch_src_corr_points, batch_corr_scores
def recompute_correspondence_scores(self, ref_corr_points, src_corr_points, corr_scores, estimated_transform):
aligned_src_corr_points = apply_transform(src_corr_points, estimated_transform)
corr_residuals = torch.linalg.norm(ref_corr_points - aligned_src_corr_points, dim=1)
inlier_masks = torch.lt(corr_residuals, self.acceptance_radius)
new_corr_scores = corr_scores * inlier_masks.float()
return new_corr_scores
def local_to_global_registration(self, ref_knn_points, src_knn_points, score_mat, corr_mat,
#ref_node_corr_knn_feats, src_node_corr_knn_feats, transform
):
# extract dense correspondences
# batch_indices, ref_indices, src_indices = torch.nonzero(corr_mat, as_tuple=True)
# global_ref_corr_points = ref_knn_points[batch_indices, ref_indices]
# global_src_corr_points = src_knn_points[batch_indices, src_indices]
# global_ref_corr_points_feat = ref_node_corr_knn_feats[batch_indices, ref_indices]
# global_src_corr_points_feat = src_node_corr_knn_feats[batch_indices, src_indices]
# global_corr_scores = score_mat[batch_indices, ref_indices, src_indices]
# classify_model = torch.load('/Download/GeoTransformer-corr-classification-testRR/GeoTransformer/output/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/snapshots/classification.pth')
# classify_model.to('cuda')
# classify_model.eval()
# global_src_corr_points = apply_transform(global_src_corr_points, transform)
# residuals = torch.sqrt(((global_ref_corr_points - global_src_corr_points) ** 2) .sum(1))
# residuals = torch.tensor(residuals)
# ground_truth = torch.lt(residuals , 0.1).float().unsqueeze(1)
# mean = global_src_corr_points_feat.mean()
# var = global_src_corr_points_feat.var()
# src_mean_var_text = "src_mean:{:.5f}, src_var:{:.5f}".format(mean, var)
# global_src_corr_points_feat = (global_src_corr_points_feat - mean) / torch.pow(var + 1e-05,0.5)
# mean = global_ref_corr_points_feat.mean()
# var = global_ref_corr_points_feat.var()
# ref_mean_var_text = " ref_mean:{:.5f}, ref_var:{:.5f} ".format(mean, var)
# global_ref_corr_points_feat = (global_ref_corr_points_feat - mean) / torch.pow(var + 1e-05,0.5)
# # with open("geo_all_middle_text.txt","a+") as f:
# # f.write(src_mean_var_text + ref_mean_var_text)
# global_src_corr_points_feat = global_src_corr_points_feat.unsqueeze(0)
# global_ref_corr_points_feat = global_ref_corr_points_feat.unsqueeze(0)
# corr_points_feat = torch.cat((global_ref_corr_points_feat.transpose(0,1), global_src_corr_points_feat.transpose(0,1)), dim=1)
# corr_points_feat = corr_points_feat.repeat(1,1,2)
# corr_points_feat = corr_points_feat.chunk(16,dim=2)
# corr_points_feat = torch.cat((corr_points_feat),dim=1)
# inputs = Variable(corr_points_feat.unsqueeze(1)).to('cuda')
# target = Variable(ground_truth).to('cuda').squeeze(0)
# out = classify_model(inputs) # 前向传播
# predict = torch.gt(out, 0.5).float()
# total_corr_numbers = predict.shape[0]
# Inliers_number = target.cpu().clone().detach().squeeze(1).sum()
# Outliers_number = target.cpu().clone().detach().squeeze(1).shape[0] - Inliers_number
# predict_1_unmber = predict.cpu().clone().detach().squeeze(1).sum()
# predict_0_unmber = predict.cpu().clone().detach().squeeze(1).shape[0] - predict_1_unmber
# predict_1_and_True_unmber = torch.eq(predict.cpu().clone().detach().squeeze(1) + target.cpu().clone().detach().squeeze(1),2).float().sum()
# predict_1_and_False_unmber = predict_1_unmber - predict_1_and_True_unmber
# predict_0_and_True_unmber = torch.eq(predict.cpu().clone().detach().squeeze(1) + target.cpu().clone().detach().squeeze(1),0).float().sum()
# predict_0_and_False_unmber = predict_0_unmber - predict_0_and_True_unmber
# print("gt_IR:", Inliers_number/total_corr_numbers)
# print("predict_IR:", predict_1_and_True_unmber/predict_1_unmber)
# print("predict_Acc:", (predict_1_and_True_unmber+predict_0_and_True_unmber)/total_corr_numbers)
# with open("test_origin3.txt","a+") as f:
# txtstr = 'gt_IR:{:<.5f}, predict_IR:{:<.5f}, predict_Acc:{:<.5f}, gt_Inliers:{:<.0f}, gt_Outliers:{:<.0f}, predict_1_unmber:{:<.0f}, predict_1_and_True_unmber:{:<.0f}, predict_1_and_False_unmber:{:<.0f}, predict_0_unmber:{:<.0f}, predict_0_and_True_unmber:{:<.0f}, predict_0_and_False_unmber:{:<.0f} \n'.format(Inliers_number/total_corr_numbers, predict_1_and_True_unmber/predict_1_unmber, \
# (predict_1_and_True_unmber+predict_0_and_True_unmber)/total_corr_numbers, \
# Inliers_number, Outliers_number, \
# predict_1_unmber, predict_1_and_True_unmber, predict_1_and_False_unmber, \
# predict_0_unmber, predict_0_and_True_unmber, predict_0_and_False_unmber )
# f.write(txtstr)
# corr_mat[batch_indices, ref_indices, src_indices] = (predict == 1).squeeze()
batch_indices, ref_indices, src_indices = torch.nonzero(corr_mat, as_tuple=True)
global_ref_corr_points = ref_knn_points[batch_indices, ref_indices]
global_src_corr_points = src_knn_points[batch_indices, src_indices]
# global_ref_corr_points_feat = ref_node_corr_knn_feats[batch_indices, ref_indices]
# global_src_corr_points_feat = src_node_corr_knn_feats[batch_indices, src_indices]
global_corr_scores = score_mat[batch_indices, ref_indices, src_indices]
# build verification set
if self.correspondence_limit is not None and global_corr_scores.shape[0] > self.correspondence_limit:
corr_scores, sel_indices = global_corr_scores.topk(k=self.correspondence_limit, largest=True)
ref_corr_points = global_ref_corr_points[sel_indices]
src_corr_points = global_src_corr_points[sel_indices]
# ref_corr_points_feat = global_ref_corr_points_feat[sel_indices]
# src_corr_points_feat = global_src_corr_points_feat[sel_indices]
else:
ref_corr_points = global_ref_corr_points
src_corr_points = global_src_corr_points
# ref_corr_points_feat = global_ref_corr_points_feat
# src_corr_points_feat = global_src_corr_points_feat
corr_scores = global_corr_scores
# compute starting and ending index of each patch correspondence.
# torch.nonzero is row-major, so the correspondences from the same patch correspondence are consecutive.
# find the first occurrence of each batch index, then the chunk of this batch can be obtained.
unique_masks = torch.ne(batch_indices[1:], batch_indices[:-1])
unique_indices = torch.nonzero(unique_masks, as_tuple=True)[0] + 1
unique_indices = unique_indices.detach().cpu().numpy().tolist()
unique_indices = [0] + unique_indices + [batch_indices.shape[0]]
chunks = [
(x, y) for x, y in zip(unique_indices[:-1], unique_indices[1:]) if y - x >= self.correspondence_threshold
]
batch_size = len(chunks)
if batch_size > 0:
# local registration
batch_ref_corr_points, batch_src_corr_points, batch_corr_scores = self.convert_to_batch(
global_ref_corr_points, global_src_corr_points, global_corr_scores, chunks
)
batch_transforms = self.procrustes(batch_src_corr_points, batch_ref_corr_points, batch_corr_scores)
batch_aligned_src_corr_points = apply_transform(src_corr_points.unsqueeze(0), batch_transforms)
batch_corr_residuals = torch.linalg.norm(
ref_corr_points.unsqueeze(0) - batch_aligned_src_corr_points, dim=2
)
batch_inlier_masks = torch.lt(batch_corr_residuals, self.acceptance_radius) # (P, N)
best_index = batch_inlier_masks.sum(dim=1).argmax()
cur_corr_scores = corr_scores * batch_inlier_masks[best_index].float()
else:
# degenerate: initialize transformation with all correspondences
estimated_transform = self.procrustes(src_corr_points, ref_corr_points, corr_scores)
cur_corr_scores = self.recompute_correspondence_scores(
ref_corr_points, src_corr_points, corr_scores, estimated_transform
)
# global refinement
estimated_transform = self.procrustes(src_corr_points, ref_corr_points, cur_corr_scores)
for _ in range(self.num_refinement_steps - 1):
cur_corr_scores = self.recompute_correspondence_scores(
ref_corr_points, src_corr_points, corr_scores, estimated_transform
)
estimated_transform = self.procrustes(src_corr_points, ref_corr_points, cur_corr_scores)
return global_ref_corr_points, global_src_corr_points, global_corr_scores, estimated_transform, #global_ref_corr_points_feat, global_src_corr_points_feat
def forward(
self,
ref_knn_points,
src_knn_points,
ref_knn_masks,
src_knn_masks,
score_mat,
global_scores,
# ref_node_corr_knn_feats,
# src_node_corr_knn_feats,
# transform
):
r"""Point Matching Module forward propagation with Local-to-Global registration.
Args:
ref_knn_points (Tensor): (B, K, 3)
src_knn_points (Tensor): (B, K, 3)
ref_knn_masks (BoolTensor): (B, K)
src_knn_masks (BoolTensor): (B, K)
score_mat (Tensor): (B, K, K) or (B, K + 1, K + 1), log likelihood
global_scores (Tensor): (B,)
Returns:
ref_corr_points: torch.LongTensor (C, 3)
src_corr_points: torch.LongTensor (C, 3)
corr_scores: torch.Tensor (C,)
estimated_transform: torch.Tensor (4, 4)
"""
score_mat = torch.exp(score_mat)
corr_mat = self.compute_correspondence_matrix(score_mat, ref_knn_masks, src_knn_masks) # (B, K, K)
if self.use_dustbin:
score_mat = score_mat[:, :-1, :-1]
if self.use_global_score:
score_mat = score_mat * global_scores.view(-1, 1, 1)
score_mat = score_mat * corr_mat.float()
ref_corr_points, src_corr_points, corr_scores, estimated_transform = self.local_to_global_registration(
ref_knn_points, src_knn_points, score_mat, corr_mat, #ref_node_corr_knn_feats, src_node_corr_knn_feats, transform
)
return ref_corr_points, src_corr_points, corr_scores, estimated_transform#, corr_mat, ref_corr_points_feat, src_corr_points_feat
| 20,850 | 49.98044 | 411 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/point_matching.py | import torch
import torch.nn as nn
class PointMatching(nn.Module):
def __init__(
self,
k: int,
mutual: bool = True,
confidence_threshold: float = 0.05,
use_dustbin: bool = False,
use_global_score: bool = False,
remove_duplicate: bool = False,
):
r"""Point Matching with Local-to-Global Registration.
Args:
k (int): top-k selection for matching.
mutual (bool=True): mutual or non-mutual matching.
confidence_threshold (float=0.05): ignore matches whose scores are below this threshold.
use_dustbin (bool=False): whether dustbin row/column is used in the score matrix.
use_global_score (bool=False): whether use patch correspondence scores.
"""
super(PointMatching, self).__init__()
self.k = k
self.mutual = mutual
self.confidence_threshold = confidence_threshold
self.use_dustbin = use_dustbin
self.use_global_score = use_global_score
self.remove_duplicate = remove_duplicate
def compute_correspondence_matrix(self, score_mat, ref_knn_masks, src_knn_masks):
r"""Compute matching matrix and score matrix for each patch correspondence."""
mask_mat = torch.logical_and(ref_knn_masks.unsqueeze(2), src_knn_masks.unsqueeze(1))
batch_size, ref_length, src_length = score_mat.shape
batch_indices = torch.arange(batch_size).cuda()
# correspondences from reference side
ref_topk_scores, ref_topk_indices = score_mat.topk(k=self.k, dim=2) # (B, N, K)
ref_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, ref_length, self.k) # (B, N, K)
ref_indices = torch.arange(ref_length).cuda().view(1, ref_length, 1).expand(batch_size, -1, self.k) # (B, N, K)
ref_score_mat = torch.zeros_like(score_mat)
ref_score_mat[ref_batch_indices, ref_indices, ref_topk_indices] = ref_topk_scores
ref_corr_mat = torch.gt(ref_score_mat, self.confidence_threshold)
# correspondences from source side
src_topk_scores, src_topk_indices = score_mat.topk(k=self.k, dim=1) # (B, K, N)
src_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, self.k, src_length) # (B, K, N)
src_indices = torch.arange(src_length).cuda().view(1, 1, src_length).expand(batch_size, self.k, -1) # (B, K, N)
src_score_mat = torch.zeros_like(score_mat)
src_score_mat[src_batch_indices, src_topk_indices, src_indices] = src_topk_scores
src_corr_mat = torch.gt(src_score_mat, self.confidence_threshold)
# merge results from two sides
if self.mutual:
corr_mat = torch.logical_and(ref_corr_mat, src_corr_mat)
else:
corr_mat = torch.logical_or(ref_corr_mat, src_corr_mat)
if self.use_dustbin:
corr_mat = corr_mat[:, -1:, -1]
corr_mat = torch.logical_and(corr_mat, mask_mat)
return corr_mat
def forward(
self,
ref_knn_points,
src_knn_points,
ref_knn_masks,
src_knn_masks,
ref_knn_indices,
src_knn_indices,
score_mat,
global_scores,
):
r"""Point Matching Module forward propagation with Local-to-Global registration.
Args:
ref_knn_points (Tensor): (B, K, 3)
src_knn_points (Tensor): (B, K, 3)
ref_knn_masks (BoolTensor): (B, K)
src_knn_masks (BoolTensor): (B, K)
ref_knn_indices (LongTensor): (B, K)
src_knn_indices (LongTensor): (B, K)
score_mat (Tensor): (B, K, K) or (B, K + 1, K + 1), log likelihood
global_scores (Tensor): (B,)
Returns:
ref_corr_points (Tensor): (C, 3)
src_corr_points (Tensor): (C, 3)
ref_corr_indices (LongTensor): (C,)
src_corr_indices (LongTensor): (C,)
corr_scores (Tensor): (C,)
"""
score_mat = torch.exp(score_mat)
corr_mat = self.compute_correspondence_matrix(score_mat, ref_knn_masks, src_knn_masks) # (B, K, K)
if self.use_dustbin:
score_mat = score_mat[:, :-1, :-1]
if self.use_global_score:
score_mat = score_mat * global_scores.view(-1, 1, 1)
score_mat = score_mat * corr_mat.float()
batch_indices, ref_indices, src_indices = torch.nonzero(corr_mat, as_tuple=True)
ref_corr_indices = ref_knn_indices[batch_indices, ref_indices]
src_corr_indices = src_knn_indices[batch_indices, src_indices]
ref_corr_points = ref_knn_points[batch_indices, ref_indices]
src_corr_points = src_knn_points[batch_indices, src_indices]
corr_scores = score_mat[batch_indices, ref_indices, src_indices]
return ref_corr_points, src_corr_points, ref_corr_indices, src_corr_indices, corr_scores
| 4,906 | 41.301724 | 120 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/superpoint_matching.py | import torch
import torch.nn as nn
from geotransformer.modules.ops import pairwise_distance
class SuperPointMatching(nn.Module):
def __init__(self, num_correspondences, dual_normalization=True):
super(SuperPointMatching, self).__init__()
self.num_correspondences = num_correspondences
self.dual_normalization = dual_normalization
def torch_farthest_point_sample(self, xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def forward(self, ref_feats, src_feats, ref_points, src_points, ref_masks=None, src_masks=None):
r"""Extract superpoint correspondences.
Args:
ref_feats (Tensor): features of the superpoints in reference point cloud.
src_feats (Tensor): features of the superpoints in source point cloud.
ref_masks (BoolTensor=None): masks of the superpoints in reference point cloud (False if empty).
src_masks (BoolTensor=None): masks of the superpoints in source point cloud (False if empty).
Returns:
ref_corr_indices (LongTensor): indices of the corresponding superpoints in reference point cloud.
src_corr_indices (LongTensor): indices of the corresponding superpoints in source point cloud.
corr_scores (Tensor): scores of the correspondences.
"""
if ref_masks is None:
ref_masks = torch.ones(size=(ref_feats.shape[0],), dtype=torch.bool).cuda()
if src_masks is None:
src_masks = torch.ones(size=(src_feats.shape[0],), dtype=torch.bool).cuda()
# remove empty patch
ref_indices = torch.nonzero(ref_masks, as_tuple=True)[0]
src_indices = torch.nonzero(src_masks, as_tuple=True)[0]
ref_feats = ref_feats[ref_indices]
src_feats = src_feats[src_indices]
ref_points = ref_points[ref_indices]
src_points = src_points[src_indices]
# select top-k proposals
matching_scores = torch.exp(-pairwise_distance(ref_feats, src_feats, normalized=True))
if self.dual_normalization:
ref_matching_scores = matching_scores / matching_scores.sum(dim=1, keepdim=True)
src_matching_scores = matching_scores / matching_scores.sum(dim=0, keepdim=True)
matching_scores = ref_matching_scores * src_matching_scores
num_correspondences = min(self.num_correspondences, matching_scores.numel())
corr_scores, corr_indices = matching_scores.view(-1).topk(k=num_correspondences, largest=True)
ref_sel_indices = corr_indices // matching_scores.shape[1]
src_sel_indices = corr_indices % matching_scores.shape[1]
# farthest point select
ref_points = ref_points[ref_sel_indices]
src_points = src_points[src_sel_indices]
torch_fps_ref_indices = self.torch_farthest_point_sample(ref_points.unsqueeze(0), ref_indices.shape[0])
torch_fps_src_indices = self.torch_farthest_point_sample(src_points.unsqueeze(0), src_indices.shape[0])
torch_fps_ref_indices_mask = torch.eq(torch_fps_ref_indices,0)
torch_fps_src_indices_mask = torch.eq(torch_fps_src_indices,0)
torch_fps_ref_indices = torch_fps_ref_indices[~torch_fps_ref_indices_mask]
torch_fps_src_indices = torch_fps_src_indices[~torch_fps_src_indices_mask]
torch_fps_src_indices = torch_fps_src_indices.resize_(1,torch_fps_src_indices.shape[0]+1)
torch_fps_src_indices = torch_fps_src_indices.squeeze(0)
torch_fps_src_indices[-1] = 0
torch_fps_ref_indices = torch_fps_ref_indices.resize_(1,torch_fps_ref_indices.shape[0]+1)
torch_fps_ref_indices = torch_fps_ref_indices.squeeze(0)
torch_fps_ref_indices[-1] = 0
if torch_fps_ref_indices.shape[0] > torch_fps_src_indices.shape[0]:
fps_indices = torch_fps_ref_indices
else:
fps_indices = torch_fps_src_indices
if fps_indices.shape[0] >= 256:
torch_src_sel_indices = src_sel_indices[fps_indices[:256]]
torch_ref_sel_indices = ref_sel_indices[fps_indices[:256]]
else:
torch_src_sel_indices = src_sel_indices[fps_indices]
torch_ref_sel_indices = ref_sel_indices[fps_indices]
fps_range = fps_indices.shape[0]
src_sel_indices_matrix = src_sel_indices.repeat(fps_range,1)
src_fps_indices_matrix = torch_src_sel_indices.unsqueeze(1)
corr_info = src_sel_indices_matrix - src_fps_indices_matrix
corr_info = torch.eq(corr_info,0).float()
known_corr_reject = torch.argmax(corr_info,dim=1)
corr_info[:,known_corr_reject] = 0.0
corr_info = torch.nonzero(corr_info)
sel_corr_info = corr_info[:,1]
sel_corr_sort, _ = sel_corr_info.sort()
left_sel_indices = ref_sel_indices[sel_corr_sort[:256-fps_range]]
torch_src_sel_indices = torch_src_sel_indices.resize_(256)
torch_ref_sel_indices = torch_ref_sel_indices.resize_(256)
torch_src_sel_indices[fps_range:] = src_sel_indices[sel_corr_sort[:256-fps_range]]
torch_ref_sel_indices[fps_range:] = ref_sel_indices[sel_corr_sort[:256-fps_range]]
ref_corr_indices = ref_indices[torch_ref_sel_indices]
src_corr_indices = src_indices[torch_src_sel_indices]
corr_scores = matching_scores[torch_ref_sel_indices,torch_src_sel_indices]
# recover original indices
# ref_corr_indices = ref_indices[ref_sel_indices]
# src_corr_indices = src_indices[src_sel_indices]
return ref_corr_indices, src_corr_indices, corr_scores
| 6,514 | 48.732824 | 119 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/geotransformer/superpoint_target.py | import numpy as np
import torch
import torch.nn as nn
class SuperPointTargetGenerator(nn.Module):
def __init__(self, num_targets, overlap_threshold):
super(SuperPointTargetGenerator, self).__init__()
self.num_targets = num_targets
self.overlap_threshold = overlap_threshold
@torch.no_grad()
def forward(self, gt_corr_indices, gt_corr_overlaps):
r"""Generate ground truth superpoint (patch) correspondences.
Randomly select "num_targets" correspondences whose overlap is above "overlap_threshold".
Args:
gt_corr_indices (LongTensor): ground truth superpoint correspondences (N, 2)
gt_corr_overlaps (Tensor): ground truth superpoint correspondences overlap (N,)
Returns:
gt_ref_corr_indices (LongTensor): selected superpoints in reference point cloud.
gt_src_corr_indices (LongTensor): selected superpoints in source point cloud.
gt_corr_overlaps (LongTensor): overlaps of the selected superpoint correspondences.
"""
gt_corr_masks = torch.gt(gt_corr_overlaps, self.overlap_threshold)
gt_corr_overlaps = gt_corr_overlaps[gt_corr_masks]
gt_corr_indices = gt_corr_indices[gt_corr_masks]
if gt_corr_indices.shape[0] > self.num_targets:
indices = np.arange(gt_corr_indices.shape[0])
sel_indices = np.random.choice(indices, self.num_targets, replace=False)
sel_indices = torch.from_numpy(sel_indices).cuda()
gt_corr_indices = gt_corr_indices[sel_indices]
gt_corr_overlaps = gt_corr_overlaps[sel_indices]
gt_ref_corr_indices = gt_corr_indices[:, 0]
gt_src_corr_indices = gt_corr_indices[:, 1]
return gt_ref_corr_indices, gt_src_corr_indices, gt_corr_overlaps
| 1,812 | 42.166667 | 97 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/kpconv/__init__.py | from geotransformer.modules.kpconv.kpconv import KPConv
from geotransformer.modules.kpconv.modules import (
ConvBlock,
ResidualBlock,
UnaryBlock,
LastUnaryBlock,
GroupNorm,
KNNInterpolate,
GlobalAvgPool,
MaxPool,
)
from geotransformer.modules.kpconv.functional import nearest_upsample, global_avgpool, maxpool
| 342 | 25.384615 | 94 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/kpconv/functional.py | import torch
from geotransformer.modules.ops import index_select
def nearest_upsample(x, upsample_indices):
"""Pools features from the closest neighbors.
WARNING: this function assumes the neighbors are ordered.
Args:
x: [n1, d] features matrix
upsample_indices: [n2, max_num] Only the first column is used for pooling
Returns:
x: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get features for each pooling location [n2, d]
x = index_select(x, upsample_indices[:, 0], dim=0)
return x
def knn_interpolate(s_feats, q_points, s_points, neighbor_indices, k, eps=1e-8):
r"""K-NN interpolate.
WARNING: this function assumes the neighbors are ordered.
Args:
s_feats (Tensor): (M, C)
q_points (Tensor): (N, 3)
s_points (Tensor): (M, 3)
neighbor_indices (LongTensor): (N, X)
k (int)
eps (float)
Returns:
q_feats (Tensor): (N, C)
"""
s_points = torch.cat((s_points, torch.zeros_like(s_points[:1, :])), 0) # (M + 1, 3)
s_feats = torch.cat((s_feats, torch.zeros_like(s_feats[:1, :])), 0) # (M + 1, C)
knn_indices = neighbor_indices[:, :k].contiguous()
knn_points = index_select(s_points, knn_indices, dim=0) # (N, k, 3)
knn_feats = index_select(s_feats, knn_indices, dim=0) # (N, k, C)
knn_sq_distances = (q_points.unsqueeze(1) - knn_points).pow(2).sum(dim=-1) # (N, k)
knn_masks = torch.ne(knn_indices, s_points.shape[0] - 1).float() # (N, k)
knn_weights = knn_masks / (knn_sq_distances + eps) # (N, k)
knn_weights = knn_weights / (knn_weights.sum(dim=1, keepdim=True) + eps) # (N, k)
q_feats = (knn_feats * knn_weights.unsqueeze(-1)).sum(dim=1) # (N, C)
return q_feats
def maxpool(x, neighbor_indices):
"""Max pooling from neighbors.
Args:
x: [n1, d] features matrix
neighbor_indices: [n2, max_num] pooling indices
Returns:
pooled_feats: [n2, d] pooled features matrix
"""
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
neighbor_feats = index_select(x, neighbor_indices, dim=0)
pooled_feats = neighbor_feats.max(1)[0]
return pooled_feats
def global_avgpool(x, batch_lengths):
"""Global average pooling over batch.
Args:
x: [N, D] input features
batch_lengths: [B] list of batch lengths
Returns:
x: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0 : i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
x = torch.stack(averaged_features)
return x
| 2,918 | 31.076923 | 88 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/kpconv/kernel_points.py | #
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions handling the disposition of kernel points.
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
import os.path as osp
from os import makedirs
from os.path import join, exists
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
# ------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
#
def create_3D_rotations(axis, angle):
"""
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
:param axis: float32[N, 3]
:param angle: float32[N,]
:return: float32[N, 3, 3]
"""
t1 = np.cos(angle)
t2 = 1 - t1
t3 = axis[:, 0] * axis[:, 0]
t6 = t2 * axis[:, 0]
t7 = t6 * axis[:, 1]
t8 = np.sin(angle)
t9 = t8 * axis[:, 2]
t11 = t6 * axis[:, 2]
t12 = t8 * axis[:, 1]
t15 = axis[:, 1] * axis[:, 1]
t19 = t2 * axis[:, 1] * axis[:, 2]
t20 = t8 * axis[:, 0]
t24 = axis[:, 2] * axis[:, 2]
R = np.stack(
[t1 + t2 * t3, t7 - t9, t11 + t12, t7 + t9, t1 + t2 * t15, t19 - t20, t11 - t12, t19 + t20, t1 + t2 * t24],
axis=1,
)
return np.reshape(R, (-1, 3, 3))
def spherical_Lloyd(
radius,
num_cells,
dimension=3,
fixed='center',
approximation='monte-carlo',
approx_n=5000,
max_iter=500,
momentum=0.9,
verbose=0,
):
"""
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
:param radius: Radius of the kernels
:param num_cells: Number of cell (kernel points) in the Voronoi diagram.
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param approximation: Approximation method for Lloyd's algorithm ('discretization', 'monte-carlo')
:param approx_n: Number of point used for approximation.
:param max_iter: Maximum nu;ber of iteration for the algorithm.
:param momentum: Momentum of the low pass filter smoothing kernel point positions
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1.0
#######################
# Kernel initialization
#######################
# Random kernel points (Uniform distribution in a sphere)
kernel_points = np.zeros((0, dimension))
while kernel_points.shape[0] < num_cells:
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3
##############################
# Approximation initialization
##############################
# Initialize figure
if verbose > 1:
fig = plt.figure()
# Initialize discretization in this method is chosen
if approximation == 'discretization':
side_n = int(np.floor(approx_n ** (1.0 / dimension)))
dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl / 2, radius0, dl)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else:
raise ValueError('Unsupported dimension (max is 4)')
elif approximation == 'monte-carlo':
X = np.zeros((0, dimension))
else:
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation))
# Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
#####################
# Kernel optimization
#####################
# Warning if at least one kernel point has no cell
warning = False
# moving vectors of kernel points saved to detect convergence
max_moves = np.zeros((0,))
for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo':
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Get the distances matrix [n_approx, K, dim]
differences = np.expand_dims(X, 1) - kernel_points
sq_distances = np.sum(np.square(differences), axis=2)
# Compute cell centers
cell_inds = np.argmin(sq_distances, axis=1)
centers = []
for c in range(num_cells):
bool_c = cell_inds == c
num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
else:
warning = True
centers.append(kernel_points[c])
# Update kernel points with low pass filter to smooth mote carlo
centers = np.vstack(centers)
moves = (1 - momentum) * (centers - kernel_points)
kernel_points += moves
# Check moves for convergence
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0
if verbose:
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1))))
if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC))
if verbose > 1:
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0, marker='.', cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
###################
# User verification
###################
# Show the convergence to ask user if this kernel is correct
if verbose:
if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves)
ax2.duplicate_removal(X[:, 0], X[:, 1], c=cell_inds, s=20.0, marker='.', cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal')
plt.title('Check if kernel is correct.')
plt.draw()
plt.show()
if dimension > 2:
plt.figure()
plt.plot(max_moves)
plt.title('Check if kernel is correct.')
plt.show()
# Rescale kernels with real radius
return kernel_points * radius
def kernel_point_optimization_debug(
radius, num_points, num_kernels=1, dimension=3, fixed='center', ratio=0.66, verbose=0
):
"""
Creation of kernel point via optimization of potentials.
:param radius: Radius of the kernels
:param num_points: points composing kernels
:param num_kernels: number of wanted kernels
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param ratio: ratio of the radius where you want the kernels points to be placed
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1
diameter0 = 2
# Factor multiplicating gradients for moving points (~learning rate)
moving_factor = 1e-2
continuous_moving_decay = 0.9995
# Gradient threshold to stop optimization
thresh = 1e-5
# Gradient clipping value
clip = 0.05 * radius0
#######################
# Kernel initialization
#######################
# Random kernel points
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
while kernel_points.shape[0] < num_kernels * num_points:
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
kernel_points = kernel_points[: num_kernels * num_points, :].reshape((num_kernels, num_points, -1))
# Optional fixing
if fixed == 'center':
kernel_points[:, 0, :] *= 0
if fixed == 'verticals':
kernel_points[:, :3, :] *= 0
kernel_points[:, 1, -1] += 2 * radius0 / 3
kernel_points[:, 2, -1] -= 2 * radius0 / 3
#####################
# Kernel optimization
#####################
# Initialize figure
if verbose > 1:
fig = plt.figure()
saved_gradient_norms = np.zeros((10000, num_kernels))
old_gradient_norms = np.zeros((num_kernels, num_points))
for iter in range(10000):
# Compute gradients
# *****************
# Derivative of the sum of potentials of all points
A = np.expand_dims(kernel_points, axis=2)
B = np.expand_dims(kernel_points, axis=1)
interd2 = np.sum(np.power(A - B, 2), axis=-1)
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3 / 2) + 1e-6)
inter_grads = np.sum(inter_grads, axis=1)
# Derivative of the radius potential
circle_grads = 10 * kernel_points
# All gradients
gradients = inter_grads + circle_grads
if fixed == 'verticals':
gradients[:, 1:3, :-1] = 0
# Stop condition
# **************
# Compute norm of gradients
gradients_norms = np.sqrt(np.sum(np.power(gradients, 2), axis=-1))
saved_gradient_norms[iter, :] = np.max(gradients_norms, axis=1)
# Stop if all moving points are gradients fixed (low gradients diff)
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh:
break
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh:
break
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
break
old_gradient_norms = gradients_norms
# Move points
# ***********
# Clip gradient to get moving dists
moving_dists = np.minimum(moving_factor * gradients_norms, clip)
# Fix central point
if fixed == 'center':
moving_dists[:, 0] = 0
if fixed == 'verticals':
moving_dists[:, 0] = 0
# Move points
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1)
if verbose:
print('iter {:5d} / max grad = {:f}'.format(iter, np.max(gradients_norms[:, 3:])))
if verbose > 1:
plt.clf()
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.')
circle = plt.Circle((0, 0), radius, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius * 1.1, radius * 1.1))
fig.axes[0].set_ylim((-radius * 1.1, radius * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
print(moving_factor)
# moving factor decay
moving_factor *= continuous_moving_decay
# Rescale radius to fit the wanted ratio of radius
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
kernel_points *= ratio / np.mean(r[:, 1:])
# Rescale kernels with real radius
return kernel_points * radius, saved_gradient_norms
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Kernel directory
# kernel_dir = osp.join('kernels', 'dispositions')
kernel_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'dispositions')
if not exists(kernel_dir):
makedirs(kernel_dir)
# To many points switch to Lloyds
if num_kpoints > 30:
lloyd = True
# Kernel_file
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))
# Check if already done
if not exists(kernel_file):
if lloyd:
# Create kernels
kernel_points = spherical_Lloyd(1.0, num_kpoints, dimension=dimension, fixed=fixed, verbose=0)
else:
# Create kernels
kernel_points, grad_norms = kernel_point_optimization_debug(
1.0, num_kpoints, num_kernels=100, dimension=dimension, fixed=fixed, verbose=0
)
# Find best candidate
best_k = np.argmin(grad_norms[-1, :])
# Save points
kernel_points = kernel_points[best_k, :, :]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(kernel_points)
o3d.io.write_point_cloud(kernel_file, pcd)
else:
pcd = o3d.io.read_point_cloud(kernel_file)
kernel_points = np.array(pcd.points).astype(np.float32)
# Random roations for the kernel
# N.B. 4D random rotations not supported yet
R = np.eye(dimension)
theta = np.random.rand() * 2 * np.pi
if dimension == 2:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]], dtype=np.float32)
elif dimension == 3:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
else:
phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
# Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
R = R.astype(np.float32)
# Add a small noise
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)
# Scale kernels
kernel_points = radius * kernel_points
# Rotate kernels
kernel_points = np.matmul(kernel_points, R)
return kernel_points.astype(np.float32)
| 16,726 | 35.682018 | 120 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/kpconv/kpconv.py | import math
import torch
import torch.nn as nn
from geotransformer.modules.ops import index_select
from geotransformer.modules.kpconv.kernel_points import load_kernels
class KPConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
radius,
sigma,
bias=False,
dimension=3,
inf=1e6,
eps=1e-9,
):
"""Initialize parameters for KPConv.
Modified from [KPConv-PyTorch](https://github.com/HuguesTHOMAS/KPConv-PyTorch).
Deformable KPConv is not supported.
Args:
in_channels: dimension of input features.
out_channels: dimension of output features.
kernel_size: Number of kernel points.
radius: radius used for kernel point init.
sigma: influence radius of each kernel point.
bias: use bias or not (default: False)
dimension: dimension of the point space.
inf: value of infinity to generate the padding point
eps: epsilon for gaussian influence
"""
super(KPConv, self).__init__()
# Save parameters
self.kernel_size = kernel_size
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.sigma = sigma
self.dimension = dimension
self.inf = inf
self.eps = eps
# Initialize weights
self.weights = nn.Parameter(torch.zeros(self.kernel_size, in_channels, out_channels))
if bias:
self.bias = nn.Parameter(torch.zeros(self.out_channels))
else:
self.register_parameter('bias', None)
# Reset parameters
self.reset_parameters()
# Initialize kernel points
kernel_points = self.initialize_kernel_points() # (N, 3)
self.register_buffer('kernel_points', kernel_points)
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weights, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weights)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def initialize_kernel_points(self):
"""Initialize the kernel point positions in a sphere."""
kernel_points = load_kernels(self.radius, self.kernel_size, dimension=self.dimension, fixed='center')
return torch.from_numpy(kernel_points).float()
def forward(self, s_feats, q_points, s_points, neighbor_indices):
r"""KPConv forward.
Args:
s_feats (Tensor): (N, C_in)
q_points (Tensor): (M, 3)
s_points (Tensor): (N, 3)
neighbor_indices (LongTensor): (M, H)
Returns:
q_feats (Tensor): (M, C_out)
"""
s_points = torch.cat([s_points, torch.zeros_like(s_points[:1, :]) + self.inf], 0) # (N, 3) -> (N+1, 3)
neighbors = index_select(s_points, neighbor_indices, dim=0) # (N+1, 3) -> (M, H, 3)
neighbors = neighbors - q_points.unsqueeze(1) # (M, H, 3)
# Get Kernel point influences
neighbors = neighbors.unsqueeze(2) # (M, H, 3) -> (M, H, 1, 3)
differences = neighbors - self.kernel_points # (M, H, 1, 3) x (K, 3) -> (M, H, K, 3)
sq_distances = torch.sum(differences ** 2, dim=3) # (M, H, K)
neighbor_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.sigma, min=0.0) # (M, H, K)
neighbor_weights = torch.transpose(neighbor_weights, 1, 2) # (M, H, K) -> (M, K, H)
# apply neighbor weights
s_feats = torch.cat((s_feats, torch.zeros_like(s_feats[:1, :])), 0) # (N, C) -> (N+1, C)
neighbor_feats = index_select(s_feats, neighbor_indices, dim=0) # (N+1, C) -> (M, H, C)
weighted_feats = torch.matmul(neighbor_weights, neighbor_feats) # (M, K, H) x (M, H, C) -> (M, K, C)
# apply convolutional weights
weighted_feats = weighted_feats.permute(1, 0, 2) # (M, K, C) -> (K, M, C)
kernel_outputs = torch.matmul(weighted_feats, self.weights) # (K, M, C) x (K, C, C_out) -> (K, M, C_out)
output_feats = torch.sum(kernel_outputs, dim=0, keepdim=False) # (K, M, C_out) -> (M, C_out)
# normalization
neighbor_feats_sum = torch.sum(neighbor_feats, dim=-1)
neighbor_num = torch.sum(torch.gt(neighbor_feats_sum, 0.0), dim=-1)
neighbor_num = torch.max(neighbor_num, torch.ones_like(neighbor_num))
output_feats = output_feats / neighbor_num.unsqueeze(1)
# add bias
if self.bias is not None:
output_feats = output_feats + self.bias
return output_feats
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'kernel_size: {}'.format(self.kernel_size)
format_string += ', in_channels: {}'.format(self.in_channels)
format_string += ', out_channels: {}'.format(self.out_channels)
format_string += ', radius: {:g}'.format(self.radius)
format_string += ', sigma: {:g}'.format(self.sigma)
format_string += ', bias: {}'.format(self.bias is not None)
format_string += ')'
return format_string
| 5,257 | 38.238806 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/kpconv/modules.py | import torch
import torch.nn as nn
from geotransformer.modules.kpconv.functional import maxpool, nearest_upsample, global_avgpool, knn_interpolate
from geotransformer.modules.kpconv.kpconv import KPConv
class KNNInterpolate(nn.Module):
def __init__(self, k, eps=1e-8):
super(KNNInterpolate, self).__init__()
self.k = k
self.eps = eps
def forward(self, s_feats, q_points, s_points, neighbor_indices):
if self.k == 1:
return nearest_upsample(s_feats, neighbor_indices)
else:
return knn_interpolate(s_feats, q_points, s_points, neighbor_indices, self.k, eps=self.eps)
class MaxPool(nn.Module):
@staticmethod
def forward(s_feats, neighbor_indices):
return maxpool(s_feats, neighbor_indices)
class GlobalAvgPool(nn.Module):
@staticmethod
def forward(feats, lengths):
return global_avgpool(feats, lengths)
class GroupNorm(nn.Module):
def __init__(self, num_groups, num_channels):
r"""Initialize a group normalization block.
Args:
num_groups: number of groups
num_channels: feature dimension
"""
super(GroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.norm = nn.GroupNorm(self.num_groups, self.num_channels)
def forward(self, x):
x = x.transpose(0, 1).unsqueeze(0) # (N, C) -> (B, C, N)
x = self.norm(x)
x = x.squeeze(0).transpose(0, 1) # (B, C, N) -> (N, C)
return x.squeeze()
class UnaryBlock(nn.Module):
def __init__(self, in_channels, out_channels, group_norm, has_relu=True, bias=True, layer_norm=False):
r"""Initialize a standard unary block with GroupNorm and LeakyReLU.
Args:
in_channels: dimension input features
out_channels: dimension input features
group_norm: number of groups in group normalization
bias: If True, use bias
layer_norm: If True, use LayerNorm instead of GroupNorm
"""
super(UnaryBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.group_norm = group_norm
self.mlp = nn.Linear(in_channels, out_channels, bias=bias)
if layer_norm:
self.norm = nn.LayerNorm(out_channels)
else:
self.norm = GroupNorm(group_norm, out_channels)
if has_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
else:
self.leaky_relu = None
def forward(self, x):
x = self.mlp(x)
x = self.norm(x)
if self.leaky_relu is not None:
x = self.leaky_relu(x)
return x
class LastUnaryBlock(nn.Module):
def __init__(self, in_channels, out_channels, bias=True):
r"""Initialize a standard last_unary block without GN, ReLU.
Args:
in_channels: dimension input features
out_channels: dimension input features
"""
super(LastUnaryBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mlp = nn.Linear(in_channels, out_channels, bias=bias)
def forward(self, x):
x = self.mlp(x)
return x
class ConvBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
radius,
sigma,
group_norm,
negative_slope=0.1,
bias=True,
layer_norm=False,
):
r"""Initialize a KPConv block with ReLU and BatchNorm.
Args:
in_channels: dimension input features
out_channels: dimension input features
kernel_size: number of kernel points
radius: convolution radius
sigma: influence radius of each kernel point
group_norm: group number for GroupNorm
negative_slope: leaky relu negative slope
bias: If True, use bias in KPConv
layer_norm: If True, use LayerNorm instead of GroupNorm
"""
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.KPConv = KPConv(in_channels, out_channels, kernel_size, radius, sigma, bias=bias)
if layer_norm:
self.norm = nn.LayerNorm(out_channels)
else:
self.norm = GroupNorm(group_norm, out_channels)
self.leaky_relu = nn.LeakyReLU(negative_slope=negative_slope)
def forward(self, s_feats, q_points, s_points, neighbor_indices):
x = self.KPConv(s_feats, q_points, s_points, neighbor_indices)
x = self.norm(x)
x = self.leaky_relu(x)
return x
class ResidualBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
radius,
sigma,
group_norm,
strided=False,
bias=True,
layer_norm=False,
):
r"""Initialize a ResNet bottleneck block.
Args:
in_channels: dimension input features
out_channels: dimension input features
kernel_size: number of kernel points
radius: convolution radius
sigma: influence radius of each kernel point
group_norm: group number for GroupNorm
strided: strided or not
bias: If True, use bias in KPConv
layer_norm: If True, use LayerNorm instead of GroupNorm
"""
super(ResidualBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.strided = strided
mid_channels = out_channels // 4
if in_channels != mid_channels:
self.unary1 = UnaryBlock(in_channels, mid_channels, group_norm, bias=bias, layer_norm=layer_norm)
else:
self.unary1 = nn.Identity()
self.KPConv = KPConv(mid_channels, mid_channels, kernel_size, radius, sigma, bias=bias)
if layer_norm:
self.norm_conv = nn.LayerNorm(mid_channels)
else:
self.norm_conv = GroupNorm(group_norm, mid_channels)
self.unary2 = UnaryBlock(
mid_channels, out_channels, group_norm, has_relu=False, bias=bias, layer_norm=layer_norm
)
if in_channels != out_channels:
self.unary_shortcut = UnaryBlock(
in_channels, out_channels, group_norm, has_relu=False, bias=bias, layer_norm=layer_norm
)
else:
self.unary_shortcut = nn.Identity()
self.leaky_relu = nn.LeakyReLU(0.1)
def forward(self, s_feats, q_points, s_points, neighbor_indices):
x = self.unary1(s_feats)
x = self.KPConv(x, q_points, s_points, neighbor_indices)
x = self.norm_conv(x)
x = self.leaky_relu(x)
x = self.unary2(x)
if self.strided:
shortcut = maxpool(s_feats, neighbor_indices)
else:
shortcut = s_feats
shortcut = self.unary_shortcut(shortcut)
x = x + shortcut
x = self.leaky_relu(x)
return x
| 7,155 | 30.663717 | 111 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/layers/__init__.py | from geotransformer.modules.layers.conv_block import ConvBlock
from geotransformer.modules.layers.factory import build_dropout_layer, build_conv_layer, build_norm_layer, build_act_layer
| 186 | 61.333333 | 122 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/layers/conv_block.py | import warnings
import torch
import torch.nn as nn
from geotransformer.modules.layers.factory import build_conv_layer, build_norm_layer, build_act_layer
class ConvBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=None,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
depth_multiplier=None,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
act_before_norm=False,
):
r"""Conv-Norm-Act Block.
Args:
act_before_norm (bool=False): If True, conv-act-norm. If False, conv-norm-act.
"""
super().__init__()
assert conv_cfg is not None
if isinstance(norm_cfg, str):
norm_cfg = {'type': norm_cfg}
if isinstance(act_cfg, str):
act_cfg = {'type': act_cfg}
norm_type = norm_cfg['type']
if norm_type in ['BatchNorm', 'InstanceNorm']:
norm_cfg['type'] = norm_type + conv_cfg[-2:]
self.act_before_norm = act_before_norm
bias = True
if not self.act_before_norm:
# conv-norm-act
norm_type = norm_cfg['type']
if norm_type.startswith('BatchNorm') or norm_type.startswith('InstanceNorm'):
bias = False
if conv_cfg == 'Linear':
layer_cfg = {
'type': conv_cfg,
'in_features': in_channels,
'out_features': out_channels,
'bias': bias,
}
elif conv_cfg.startswith('SeparableConv'):
if groups != 1:
warnings.warn(f'`groups={groups}` is ignored when building {conv_cfg} layer.')
layer_cfg = {
'type': conv_cfg,
'in_channels': in_channels,
'out_channels': out_channels,
'kernel_size': kernel_size,
'stride': stride,
'padding': padding,
'dilation': dilation,
'depth_multiplier': depth_multiplier,
'bias': bias,
'padding_mode': padding_mode,
}
else:
if depth_multiplier is not None:
warnings.warn(f'`depth_multiplier={depth_multiplier}` is ignored when building {conv_cfg} layer.')
layer_cfg = {
'type': conv_cfg,
'in_channels': in_channels,
'out_channels': out_channels,
'kernel_size': kernel_size,
'stride': stride,
'padding': padding,
'dilation': dilation,
'groups': groups,
'bias': bias,
'padding_mode': padding_mode,
}
self.conv = build_conv_layer(layer_cfg)
norm_layer = build_norm_layer(out_channels, norm_cfg)
act_layer = build_act_layer(act_cfg)
if self.act_before_norm:
self.act = act_layer
self.norm = norm_layer
else:
self.norm = norm_layer
self.act = act_layer
def forward(self, x):
x = self.conv(x)
if self.act_before_norm:
x = self.norm(self.act(x))
else:
x = self.act(self.norm(x))
return x
| 3,326 | 29.805556 | 114 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/layers/factory.py | from typing import Union, Dict, Optional, Tuple
import torch.nn as nn
NORM_LAYERS = {
'BatchNorm1d': nn.BatchNorm1d,
'BatchNorm2d': nn.BatchNorm2d,
'BatchNorm3d': nn.BatchNorm3d,
'InstanceNorm1d': nn.InstanceNorm1d,
'InstanceNorm2d': nn.InstanceNorm2d,
'InstanceNorm3d': nn.InstanceNorm3d,
'GroupNorm': nn.GroupNorm,
'LayerNorm': nn.LayerNorm,
}
ACT_LAYERS = {
'ReLU': nn.ReLU,
'LeakyReLU': nn.LeakyReLU,
'ELU': nn.ELU,
'GELU': nn.GELU,
'Sigmoid': nn.Sigmoid,
'Softplus': nn.Softplus,
'Tanh': nn.Tanh,
'Identity': nn.Identity,
}
CONV_LAYERS = {
'Linear': nn.Linear,
'Conv1d': nn.Conv1d,
'Conv2d': nn.Conv2d,
'Conv3d': nn.Conv3d,
}
def parse_cfg(cfg: Union[str, Dict]) -> Tuple[str, Dict]:
assert isinstance(cfg, (str, Dict)), 'Illegal cfg type: {}.'.format(type(cfg))
if isinstance(cfg, str):
cfg = {'type': cfg}
else:
cfg = cfg.copy()
layer = cfg.pop('type')
return layer, cfg
def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module:
r"""Factory function for dropout layer."""
if p is None or p == 0:
return nn.Identity()
else:
return nn.Dropout(p=p, **kwargs)
def build_norm_layer(num_features, norm_cfg: Optional[Union[str, Dict]]) -> nn.Module:
r"""Factory function for normalization layers."""
if norm_cfg is None:
return nn.Identity()
layer, kwargs = parse_cfg(norm_cfg)
assert layer in NORM_LAYERS, f'Illegal normalization: {layer}.'
if layer == 'GroupNorm':
kwargs['num_channels'] = num_features
elif layer == 'LayerNorm':
kwargs['normalized_shape'] = num_features
else:
kwargs['num_features'] = num_features
return NORM_LAYERS[layer](**kwargs)
def build_act_layer(act_cfg: Optional[Union[str, Dict]]) -> nn.Module:
r"""Factory function for activation functions."""
if act_cfg is None:
return nn.Identity()
layer, kwargs = parse_cfg(act_cfg)
assert layer in ACT_LAYERS, f'Illegal activation: {layer}.'
if layer == 'LeakyReLU':
if 'negative_slope' not in kwargs:
kwargs['negative_slope'] = 0.2
return ACT_LAYERS[layer](**kwargs)
def build_conv_layer(conv_cfg: Union[str, Dict]) -> nn.Module:
r"""Factory function for convolution or linear layers."""
layer, kwargs = parse_cfg(conv_cfg)
assert layer in CONV_LAYERS, f'Illegal layer: {layer}.'
return CONV_LAYERS[layer](**kwargs)
| 2,489 | 27.295455 | 86 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/loss/__init__.py | from geotransformer.modules.loss.circle_loss import CircleLoss, WeightedCircleLoss
| 84 | 27.333333 | 82 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/loss/circle_loss.py | import ipdb
import torch
import torch.nn.functional as F
import torch.nn as nn
def circle_loss(
pos_masks,
neg_masks,
feat_dists,
pos_margin,
neg_margin,
pos_optimal,
neg_optimal,
log_scale,
):
# get anchors that have both positive and negative pairs
row_masks = (torch.gt(pos_masks.sum(-1), 0) & torch.gt(neg_masks.sum(-1), 0)).detach()
col_masks = (torch.gt(pos_masks.sum(-2), 0) & torch.gt(neg_masks.sum(-2), 0)).detach()
# get alpha for both positive and negative pairs
pos_weights = feat_dists - 1e5 * (~pos_masks).float() # mask the non-positive
pos_weights = pos_weights - pos_optimal # mask the uninformative positive
pos_weights = torch.maximum(torch.zeros_like(pos_weights), pos_weights).detach()
neg_weights = feat_dists + 1e5 * (~neg_masks).float() # mask the non-negative
neg_weights = neg_optimal - neg_weights # mask the uninformative negative
neg_weights = torch.maximum(torch.zeros_like(neg_weights), neg_weights).detach()
loss_pos_row = torch.logsumexp(log_scale * (feat_dists - pos_margin) * pos_weights, dim=-1)
loss_pos_col = torch.logsumexp(log_scale * (feat_dists - pos_margin) * pos_weights, dim=-2)
loss_neg_row = torch.logsumexp(log_scale * (neg_margin - feat_dists) * neg_weights, dim=-1)
loss_neg_col = torch.logsumexp(log_scale * (neg_margin - feat_dists) * neg_weights, dim=-2)
loss_row = F.softplus(loss_pos_row + loss_neg_row) / log_scale
loss_col = F.softplus(loss_pos_col + loss_neg_col) / log_scale
loss = (loss_row[row_masks].mean() + loss_col[col_masks].mean()) / 2
return loss
def weighted_circle_loss(
pos_masks,
neg_masks,
feat_dists,
pos_margin,
neg_margin,
pos_optimal,
neg_optimal,
log_scale,
pos_scales=None,
neg_scales=None,
):
# get anchors that have both positive and negative pairs
row_masks = (torch.gt(pos_masks.sum(-1), 0) & torch.gt(neg_masks.sum(-1), 0)).detach()
col_masks = (torch.gt(pos_masks.sum(-2), 0) & torch.gt(neg_masks.sum(-2), 0)).detach()
# get alpha for both positive and negative pairs
pos_weights = feat_dists - 1e5 * (~pos_masks).float() # mask the non-positive
pos_weights = pos_weights - pos_optimal # mask the uninformative positive
pos_weights = torch.maximum(torch.zeros_like(pos_weights), pos_weights)
if pos_scales is not None:
pos_weights = pos_weights * pos_scales
pos_weights = pos_weights.detach()
neg_weights = feat_dists + 1e5 * (~neg_masks).float() # mask the non-negative
neg_weights = neg_optimal - neg_weights # mask the uninformative negative
neg_weights = torch.maximum(torch.zeros_like(neg_weights), neg_weights)
if neg_scales is not None:
neg_weights = neg_weights * neg_scales
neg_weights = neg_weights.detach()
loss_pos_row = torch.logsumexp(log_scale * (feat_dists - pos_margin) * pos_weights, dim=-1)
loss_pos_col = torch.logsumexp(log_scale * (feat_dists - pos_margin) * pos_weights, dim=-2)
loss_neg_row = torch.logsumexp(log_scale * (neg_margin - feat_dists) * neg_weights, dim=-1)
loss_neg_col = torch.logsumexp(log_scale * (neg_margin - feat_dists) * neg_weights, dim=-2)
loss_row = F.softplus(loss_pos_row + loss_neg_row) / log_scale
loss_col = F.softplus(loss_pos_col + loss_neg_col) / log_scale
loss = (loss_row[row_masks].mean() + loss_col[col_masks].mean()) / 2
return loss
class CircleLoss(nn.Module):
def __init__(self, pos_margin, neg_margin, pos_optimal, neg_optimal, log_scale):
super(CircleLoss, self).__init__()
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.pos_optimal = pos_optimal
self.neg_optimal = neg_optimal
self.log_scale = log_scale
def forward(self, pos_masks, neg_masks, feat_dists):
return circle_loss(
pos_masks,
neg_masks,
feat_dists,
self.pos_margin,
self.neg_margin,
self.pos_optimal,
self.neg_optimal,
self.log_scale,
)
class WeightedCircleLoss(nn.Module):
def __init__(self, pos_margin, neg_margin, pos_optimal, neg_optimal, log_scale):
super(WeightedCircleLoss, self).__init__()
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.pos_optimal = pos_optimal
self.neg_optimal = neg_optimal
self.log_scale = log_scale
def forward(self, pos_masks, neg_masks, feat_dists, pos_scales=None, neg_scales=None):
return weighted_circle_loss(
pos_masks,
neg_masks,
feat_dists,
self.pos_margin,
self.neg_margin,
self.pos_optimal,
self.neg_optimal,
self.log_scale,
pos_scales=pos_scales,
neg_scales=neg_scales,
)
| 4,897 | 35.827068 | 95 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/__init__.py | from geotransformer.modules.ops.grid_subsample import grid_subsample
from geotransformer.modules.ops.index_select import index_select
from geotransformer.modules.ops.pairwise_distance import pairwise_distance
from geotransformer.modules.ops.pointcloud_partition import (
get_point_to_node_indices,
point_to_node_partition,
knn_partition,
ball_query_partition,
)
from geotransformer.modules.ops.radius_search import radius_search
from geotransformer.modules.ops.transformation import (
apply_transform,
apply_rotation,
inverse_transform,
skew_symmetric_matrix,
rodrigues_rotation_matrix,
rodrigues_alignment_matrix,
get_transform_from_rotation_translation,
get_rotation_translation_from_transform,
)
from geotransformer.modules.ops.vector_angle import vector_angle, rad2deg, deg2rad
| 830 | 36.772727 | 82 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/grid_subsample.py | import importlib
ext_module = importlib.import_module('geotransformer.ext')
def grid_subsample(points, lengths, voxel_size):
"""Grid subsampling in stack mode.
This function is implemented on CPU.
Args:
points (Tensor): stacked points. (N, 3)
lengths (Tensor): number of points in the stacked batch. (B,)
voxel_size (float): voxel size.
Returns:
s_points (Tensor): stacked subsampled points (M, 3)
s_lengths (Tensor): numbers of subsampled points in the batch. (B,)
"""
s_points, s_lengths = ext_module.grid_subsampling(points, lengths, voxel_size)
return s_points, s_lengths
| 651 | 27.347826 | 82 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/index_select.py | import torch
def index_select(data: torch.Tensor, index: torch.LongTensor, dim: int) -> torch.Tensor:
r"""Advanced index select.
Returns a tensor `output` which indexes the `data` tensor along dimension `dim`
using the entries in `index` which is a `LongTensor`.
Different from `torch.index_select`, `index` does not has to be 1-D. The `dim`-th
dimension of `data` will be expanded to the number of dimensions in `index`.
For example, suppose the shape `data` is $(a_0, a_1, ..., a_{n-1})$, the shape of `index` is
$(b_0, b_1, ..., b_{m-1})$, and `dim` is $i$, then `output` is $(n+m-1)$-d tensor, whose shape is
$(a_0, ..., a_{i-1}, b_0, b_1, ..., b_{m-1}, a_{i+1}, ..., a_{n-1})$.
Args:
data (Tensor): (a_0, a_1, ..., a_{n-1})
index (LongTensor): (b_0, b_1, ..., b_{m-1})
dim: int
Returns:
output (Tensor): (a_0, ..., a_{dim-1}, b_0, ..., b_{m-1}, a_{dim+1}, ..., a_{n-1})
"""
output = data.index_select(dim, index.view(-1))
if index.ndim > 1:
output_shape = data.shape[:dim] + index.shape + data.shape[dim:][1:]
output = output.view(*output_shape)
return output
| 1,178 | 35.84375 | 101 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/pairwise_distance.py | import torch
def pairwise_distance(
x: torch.Tensor, y: torch.Tensor, normalized: bool = False, channel_first: bool = False
) -> torch.Tensor:
r"""Pairwise distance of two (batched) point clouds.
Args:
x (Tensor): (*, N, C) or (*, C, N)
y (Tensor): (*, M, C) or (*, C, M)
normalized (bool=False): if the points are normalized, we have "x2 + y2 = 1", so "d2 = 2 - 2xy".
channel_first (bool=False): if True, the points shape is (*, C, N).
Returns:
dist: torch.Tensor (*, N, M)
"""
if channel_first:
channel_dim = -2
xy = torch.matmul(x.transpose(-1, -2), y) # [(*, C, N) -> (*, N, C)] x (*, C, M)
else:
channel_dim = -1
xy = torch.matmul(x, y.transpose(-1, -2)) # (*, N, C) x [(*, M, C) -> (*, C, M)]
if normalized:
sq_distances = 2.0 - 2.0 * xy
else:
x2 = torch.sum(x ** 2, dim=channel_dim).unsqueeze(-1) # (*, N, C) or (*, C, N) -> (*, N) -> (*, N, 1)
y2 = torch.sum(y ** 2, dim=channel_dim).unsqueeze(-2) # (*, M, C) or (*, C, M) -> (*, M) -> (*, 1, M)
sq_distances = x2 - 2 * xy + y2
sq_distances = sq_distances.clamp(min=0.0)
return sq_distances
| 1,205 | 36.6875 | 110 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/pointcloud_partition.py | import warnings
import torch
from geotransformer.modules.ops.pairwise_distance import pairwise_distance
from geotransformer.modules.ops.index_select import index_select
def get_point_to_node_indices(points: torch.Tensor, nodes: torch.Tensor, return_counts: bool = False):
r"""Compute Point-to-Node partition indices of the point cloud.
Distribute points to the nearest node. Each point is distributed to only one node.
Args:
points (Tensor): point cloud (N, C)
nodes (Tensor): node set (M, C)
return_counts (bool=False): whether return the number of points in each node.
Returns:
indices (LongTensor): index of the node that each point belongs to (N,)
node_sizes (longTensor): the number of points in each node.
"""
sq_dist_mat = pairwise_distance(points, nodes)
indices = sq_dist_mat.min(dim=1)[1]
if return_counts:
unique_indices, unique_counts = torch.unique(indices, return_counts=True)
node_sizes = torch.zeros(nodes.shape[0], dtype=torch.long).cuda()
node_sizes[unique_indices] = unique_counts
return indices, node_sizes
else:
return indices
@torch.no_grad()
def knn_partition(points: torch.Tensor, nodes: torch.Tensor, k: int, return_distance: bool = False):
r"""k-NN partition of the point cloud.
Find the k nearest points for each node.
Args:
points: torch.Tensor (num_point, num_channel)
nodes: torch.Tensor (num_node, num_channel)
k: int
return_distance: bool
Returns:
knn_indices: torch.Tensor (num_node, k)
knn_indices: torch.Tensor (num_node, k)
"""
k = min(k, points.shape[0])
sq_dist_mat = pairwise_distance(nodes, points)
knn_sq_distances, knn_indices = sq_dist_mat.topk(dim=1, k=k, largest=False)
if return_distance:
knn_distances = torch.sqrt(knn_sq_distances)
return knn_distances, knn_indices
else:
return knn_indices
@torch.no_grad()
def point_to_node_partition(
points: torch.Tensor,
nodes: torch.Tensor,
point_limit: int,
return_count: bool = False,
):
r"""Point-to-Node partition to the point cloud.
Fixed knn bug.
Args:
points (Tensor): (N, 3)
nodes (Tensor): (M, 3)
point_limit (int): max number of points to each node
return_count (bool=False): whether to return `node_sizes`
Returns:
point_to_node (Tensor): (N,)
node_sizes (LongTensor): (M,)
node_masks (BoolTensor): (M,)
node_knn_indices (LongTensor): (M, K)
node_knn_masks (BoolTensor) (M, K)
"""
sq_dist_mat = pairwise_distance(nodes, points) # (M, N)
point_to_node = sq_dist_mat.min(dim=0)[1] # (N,)
node_masks = torch.zeros(nodes.shape[0], dtype=torch.bool).cuda() # (M,)
node_masks.index_fill_(0, point_to_node, True)
matching_masks = torch.zeros_like(sq_dist_mat, dtype=torch.bool) # (M, N)
point_indices = torch.arange(points.shape[0]).cuda() # (N,)
matching_masks[point_to_node, point_indices] = True # (M, N)
sq_dist_mat.masked_fill_(~matching_masks, 1e12) # (M, N)
node_knn_indices = sq_dist_mat.topk(k=point_limit, dim=1, largest=False)[1] # (M, K)
node_knn_node_indices = index_select(point_to_node, node_knn_indices, dim=0) # (M, K)
node_indices = torch.arange(nodes.shape[0]).cuda().unsqueeze(1).expand(-1, point_limit) # (M, K)
node_knn_masks = torch.eq(node_knn_node_indices, node_indices) # (M, K)
node_knn_indices.masked_fill_(~node_knn_masks, points.shape[0])
if return_count:
unique_indices, unique_counts = torch.unique(point_to_node, return_counts=True)
node_sizes = torch.zeros(nodes.shape[0], dtype=torch.long).cuda() # (M,)
node_sizes.index_put_([unique_indices], unique_counts)
return point_to_node, node_sizes, node_masks, node_knn_indices, node_knn_masks
else:
return point_to_node, node_masks, node_knn_indices, node_knn_masks
@torch.no_grad()
def point_to_node_partition_bug(
points: torch.Tensor,
nodes: torch.Tensor,
point_limit: int,
return_count: bool = False,
):
r"""Point-to-Node partition to the point cloud.
BUG: this implementation ignores point_to_node indices when building patches. However, the points that do not
belong to a superpoint should be masked out.
Args:
points (Tensor): (N, 3)
nodes (Tensor): (M, 3)
point_limit (int): max number of points to each node
return_count (bool=False): whether to return `node_sizes`
Returns:
point_to_node (Tensor): (N,)
node_sizes (LongTensor): (M,)
node_masks (BoolTensor): (M,)
node_knn_indices (LongTensor): (M, K)
node_knn_masks (BoolTensor) (M, K)
"""
warnings.warn('There is a bug in this implementation. Use `point_to_node_partition` instead.')
sq_dist_mat = pairwise_distance(nodes, points) # (M, N)
point_to_node = sq_dist_mat.min(dim=0)[1] # (N,)
node_masks = torch.zeros(nodes.shape[0], dtype=torch.bool).cuda() # (M,)
node_masks.index_fill_(0, point_to_node, True)
node_knn_indices = sq_dist_mat.topk(k=point_limit, dim=1, largest=False)[1] # (M, K)
node_knn_node_indices = index_select(point_to_node, node_knn_indices, dim=0) # (M, K)
node_indices = torch.arange(nodes.shape[0]).cuda().unsqueeze(1).expand(-1, point_limit) # (M, K)
node_knn_masks = torch.eq(node_knn_node_indices, node_indices) # (M, K)
node_knn_indices.masked_fill_(~node_knn_masks, points.shape[0])
if return_count:
unique_indices, unique_counts = torch.unique(point_to_node, return_counts=True)
node_sizes = torch.zeros(nodes.shape[0], dtype=torch.long).cuda() # (M,)
node_sizes.index_put_([unique_indices], unique_counts)
return point_to_node, node_sizes, node_masks, node_knn_indices, node_knn_masks
else:
return point_to_node, node_masks, node_knn_indices, node_knn_masks
@torch.no_grad()
def ball_query_partition(
points: torch.Tensor,
nodes: torch.Tensor,
radius: float,
point_limit: int,
return_count: bool = False,
):
node_knn_distances, node_knn_indices = knn_partition(points, nodes, point_limit, return_distance=True)
node_knn_masks = torch.lt(node_knn_distances, radius) # (N, k)
sentinel_indices = torch.full_like(node_knn_indices, points.shape[0]) # (N, k)
node_knn_indices = torch.where(node_knn_masks, node_knn_indices, sentinel_indices) # (N, k)
if return_count:
node_sizes = node_knn_masks.sum(1) # (N,)
return node_knn_indices, node_knn_masks, node_sizes
else:
return node_knn_indices, node_knn_masks
| 6,727 | 37.227273 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/radius_search.py | import importlib
ext_module = importlib.import_module('geotransformer.ext')
def radius_search(q_points, s_points, q_lengths, s_lengths, radius, neighbor_limit):
r"""Computes neighbors for a batch of q_points and s_points, apply radius search (in stack mode).
This function is implemented on CPU.
Args:
q_points (Tensor): the query points (N, 3)
s_points (Tensor): the support points (M, 3)
q_lengths (Tensor): the list of lengths of batch elements in q_points
s_lengths (Tensor): the list of lengths of batch elements in s_points
radius (float): maximum distance of neighbors
neighbor_limit (int): maximum number of neighbors
Returns:
neighbors (Tensor): the k nearest neighbors of q_points in s_points (N, k).
Filled with M if there are less than k neighbors.
"""
neighbor_indices = ext_module.radius_neighbors(q_points, s_points, q_lengths, s_lengths, radius)
if neighbor_limit > 0:
neighbor_indices = neighbor_indices[:, :neighbor_limit]
return neighbor_indices
| 1,080 | 37.607143 | 101 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/transformation.py | from typing import Optional
import torch
import torch.nn.functional as F
def apply_transform(points: torch.Tensor, transform: torch.Tensor, normals: Optional[torch.Tensor] = None):
r"""Rigid transform to points and normals (optional).
Given a point cloud P(3, N), normals V(3, N) and a transform matrix T in the form of
| R t |
| 0 1 |,
the output point cloud Q = RP + t, V' = RV.
In the implementation, P and V are (N, 3), so R should be transposed: Q = PR^T + t, V' = VR^T.
There are two cases supported:
1. points and normals are (*, 3), transform is (4, 4), the output points are (*, 3).
In this case, the transform is applied to all points.
2. points and normals are (B, N, 3), transform is (B, 4, 4), the output points are (B, N, 3).
In this case, the transform is applied batch-wise. The points can be broadcast if B=1.
Args:
points (Tensor): (*, 3) or (B, N, 3)
normals (optional[Tensor]=None): same shape as points.
transform (Tensor): (4, 4) or (B, 4, 4)
Returns:
points (Tensor): same shape as points.
normals (Tensor): same shape as points.
"""
if normals is not None:
assert points.shape == normals.shape
if transform.ndim == 2:
rotation = transform[:3, :3]
translation = transform[:3, 3]
points_shape = points.shape
points = points.reshape(-1, 3)
points = torch.matmul(points, rotation.transpose(-1, -2)) + translation
points = points.reshape(*points_shape)
if normals is not None:
normals = normals.reshape(-1, 3)
normals = torch.matmul(normals, rotation.transpose(-1, -2))
normals = normals.reshape(*points_shape)
elif transform.ndim == 3 and points.ndim == 3:
rotation = transform[:, :3, :3] # (B, 3, 3)
translation = transform[:, None, :3, 3] # (B, 1, 3)
points = torch.matmul(points, rotation.transpose(-1, -2)) + translation
if normals is not None:
normals = torch.matmul(normals, rotation.transpose(-1, -2))
else:
raise ValueError(
'Incompatible shapes between points {} and transform {}.'.format(
tuple(points.shape), tuple(transform.shape)
)
)
if normals is not None:
return points, normals
else:
return points
def apply_rotation(points: torch.Tensor, rotation: torch.Tensor, normals: Optional[torch.Tensor] = None):
r"""Rotate points and normals (optional) along the origin.
Given a point cloud P(3, N), normals V(3, N) and a rotation matrix R, the output point cloud Q = RP, V' = RV.
In the implementation, P and V are (N, 3), so R should be transposed: Q = PR^T, V' = VR^T.
There are two cases supported:
1. points and normals are (*, 3), rotation is (3, 3), the output points are (*, 3).
In this case, the rotation is applied to all points.
2. points and normals are (B, N, 3), transform is (B, 3, 3), the output points are (B, N, 3).
In this case, the rotation is applied batch-wise. The points can be broadcast if B=1.
Args:
points (Tensor): (*, 3) or (B, N, 3)
normals (optional[Tensor]=None): same shape as points.
rotation (Tensor): (3, 3) or (B, 3, 3)
Returns:
points (Tensor): same shape as points.
normals (Tensor): same shape as points.
"""
if normals is not None:
assert points.shape == normals.shape
if rotation.ndim == 2:
points_shape = points.shape
points = points.reshape(-1, 3)
points = torch.matmul(points, rotation.transpose(-1, -2))
points = points.reshape(*points_shape)
if normals is not None:
normals = normals.reshape(-1, 3)
normals = torch.matmul(normals, rotation.transpose(-1, -2))
normals = normals.reshape(*points_shape)
elif rotation.ndim == 3 and points.ndim == 3:
points = torch.matmul(points, rotation.transpose(-1, -2))
if normals is not None:
normals = torch.matmul(normals, rotation.transpose(-1, -2))
else:
raise ValueError(
'Incompatible shapes between points {} and rotation{}.'.format(tuple(points.shape), tuple(rotation.shape))
)
if normals is not None:
return points, normals
else:
return points
def get_rotation_translation_from_transform(transform):
r"""Decompose transformation matrix into rotation matrix and translation vector.
Args:
transform (Tensor): (*, 4, 4)
Returns:
rotation (Tensor): (*, 3, 3)
translation (Tensor): (*, 3)
"""
rotation = transform[..., :3, :3]
translation = transform[..., :3, 3]
return rotation, translation
def get_transform_from_rotation_translation(rotation, translation):
r"""Compose transformation matrix from rotation matrix and translation vector.
Args:
rotation (Tensor): (*, 3, 3)
translation (Tensor): (*, 3)
Returns:
transform (Tensor): (*, 4, 4)
"""
input_shape = rotation.shape
rotation = rotation.view(-1, 3, 3)
translation = translation.view(-1, 3)
transform = torch.eye(4).to(rotation).unsqueeze(0).repeat(rotation.shape[0], 1, 1)
transform[:, :3, :3] = rotation
transform[:, :3, 3] = translation
output_shape = input_shape[:-2] + (4, 4)
transform = transform.view(*output_shape)
return transform
def inverse_transform(transform):
r"""Inverse rigid transform.
Args:
transform (Tensor): (*, 4, 4)
Return:
inv_transform (Tensor): (*, 4, 4)
"""
rotation, translation = get_rotation_translation_from_transform(transform) # (*, 3, 3), (*, 3)
inv_rotation = rotation.transpose(-1, -2) # (*, 3, 3)
inv_translation = -torch.matmul(inv_rotation, translation.unsqueeze(-1)).squeeze(-1) # (*, 3)
inv_transform = get_transform_from_rotation_translation(inv_rotation, inv_translation) # (*, 4, 4)
return inv_transform
def skew_symmetric_matrix(inputs):
r"""Compute Skew-symmetric Matrix.
[v]_{\times} = 0 -z y
z 0 -x
-y x 0
Args:
inputs (Tensor): input vectors (*, c)
Returns:
skews (Tensor): output skew-symmetric matrix (*, 3, 3)
"""
input_shape = inputs.shape
output_shape = input_shape[:-1] + (3, 3)
skews = torch.zeros(size=output_shape).cuda()
skews[..., 0, 1] = -inputs[..., 2]
skews[..., 0, 2] = inputs[..., 1]
skews[..., 1, 0] = inputs[..., 2]
skews[..., 1, 2] = -inputs[..., 0]
skews[..., 2, 0] = -inputs[..., 1]
skews[..., 2, 1] = inputs[..., 0]
return skews
def rodrigues_rotation_matrix(axes, angles):
r"""Compute Rodrigues Rotation Matrix.
R = I + \sin{\theta} K + (1 - \cos{\theta}) K^2,
where K is the skew-symmetric matrix of the axis vector.
Args:
axes (Tensor): axis vectors (*, 3)
angles (Tensor): rotation angles in right-hand direction in rad. (*)
Returns:
rotations (Tensor): Rodrigues rotation matrix (*, 3, 3)
"""
input_shape = axes.shape
axes = axes.view(-1, 3)
angles = angles.view(-1)
axes = F.normalize(axes, p=2, dim=1)
skews = skew_symmetric_matrix(axes) # (B, 3, 3)
sin_values = torch.sin(angles).view(-1, 1, 1) # (B,)
cos_values = torch.cos(angles).view(-1, 1, 1) # (B,)
eyes = torch.eye(3).cuda().unsqueeze(0).expand_as(skews) # (B, 3, 3)
rotations = eyes + sin_values * skews + (1.0 - cos_values) * torch.matmul(skews, skews)
output_shape = input_shape[:-1] + (3, 3)
rotations = rotations.view(*output_shape)
return rotations
def rodrigues_alignment_matrix(src_vectors, tgt_vectors):
r"""Compute the Rodrigues rotation matrix aligning source vectors to target vectors.
Args:
src_vectors (Tensor): source vectors (*, 3)
tgt_vectors (Tensor): target vectors (*, 3)
Returns:
rotations (Tensor): rotation matrix (*, 3, 3)
"""
input_shape = src_vectors.shape
src_vectors = src_vectors.view(-1, 3) # (B, 3)
tgt_vectors = tgt_vectors.view(-1, 3) # (B, 3)
# compute axes
src_vectors = F.normalize(src_vectors, dim=-1, p=2) # (B, 3)
tgt_vectors = F.normalize(tgt_vectors, dim=-1, p=2) # (B, 3)
src_skews = skew_symmetric_matrix(src_vectors) # (B, 3, 3)
axes = torch.matmul(src_skews, tgt_vectors.unsqueeze(-1)).squeeze(-1) # (B, 3)
# compute rodrigues rotation matrix
sin_values = torch.linalg.norm(axes, dim=-1) # (B,)
cos_values = (src_vectors * tgt_vectors).sum(dim=-1) # (B,)
axes = F.normalize(axes, dim=-1, p=2) # (B, 3)
skews = skew_symmetric_matrix(axes) # (B, 3, 3)
eyes = torch.eye(3).cuda().unsqueeze(0).expand_as(skews) # (B, 3, 3)
sin_values = sin_values.view(-1, 1, 1)
cos_values = cos_values.view(-1, 1, 1)
rotations = eyes + sin_values * skews + (1.0 - cos_values) * torch.matmul(skews, skews)
# handle opposite direction
sin_values = sin_values.view(-1)
cos_values = cos_values.view(-1)
masks = torch.logical_and(torch.eq(sin_values, 0.0), torch.lt(cos_values, 0.0))
rotations[masks] *= -1
output_shape = input_shape[:-1] + (3, 3)
rotations = rotations.view(*output_shape)
return rotations
| 9,356 | 35.550781 | 118 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/ops/vector_angle.py | import torch
import numpy as np
def rad2deg(rad: torch.Tensor) -> torch.Tensor:
factor = 180.0 / np.pi
deg = rad * factor
return deg
def deg2rad(deg: torch.Tensor) -> torch.Tensor:
factor = np.pi / 180.0
rad = deg * factor
return rad
def vector_angle(x: torch.Tensor, y: torch.Tensor, dim: int, use_degree: bool = False):
r"""Compute the angles between two set of 3D vectors.
Args:
x (Tensor): set of vectors (*, 3, *)
y (Tensor): set of vectors (*, 3, *).
dim (int): dimension index of the coordinates.
use_degree (bool=False): If True, return angles in degree instead of rad.
Returns:
angles (Tensor): (*)
"""
cross = torch.linalg.norm(torch.cross(x, y, dim=dim), dim=dim) # (*, 3 *) x (*, 3, *) -> (*, 3, *) -> (*)
dot = torch.sum(x * y, dim=dim) # (*, 3 *) x (*, 3, *) -> (*)
angles = torch.atan2(cross, dot) # (*)
if use_degree:
angles = rad2deg(angles)
return angles
| 992 | 27.371429 | 110 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/registration/__init__.py | from geotransformer.modules.registration.matching import (
extract_correspondences_from_feats,
extract_correspondences_from_scores,
extract_correspondences_from_scores_topk,
extract_correspondences_from_scores_threshold,
dense_correspondences_to_node_correspondences,
get_node_correspondences,
node_correspondences_to_dense_correspondences,
get_node_occlusion_ratios,
get_node_overlap_ratios,
)
from geotransformer.modules.registration.metrics import (
modified_chamfer_distance,
relative_rotation_error,
relative_translation_error,
isotropic_transform_error,
anisotropic_transform_error,
)
from geotransformer.modules.registration.procrustes import weighted_procrustes, WeightedProcrustes
| 746 | 36.35 | 98 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/registration/matching.py | from typing import Optional
import torch
from geotransformer.modules.ops import index_select, apply_transform, pairwise_distance, get_point_to_node_indices
# Extract correspondences
@torch.no_grad()
def extract_correspondences_from_scores(
score_mat: torch.Tensor,
mutual: bool = False,
bilateral: bool = False,
has_dustbin: bool = False,
threshold: float = 0.0,
return_score: bool = False,
):
r"""Extract the indices of correspondences from matching scores matrix (max selection).
Args:
score_mat (Tensor): the logarithmic matching probabilities (N, M) or (N + 1, M + 1) according to `has_dustbin`
mutual (bool = False): whether to get mutual correspondences.
bilateral (bool = False), whether bilateral non-mutual matching, ignored if `mutual` is set.
has_dustbin (bool = False): whether to use slack variables.
threshold (float = 0): confidence threshold.
return_score (bool = False): return correspondence scores.
Returns:
ref_corr_indices (LongTensor): (C,)
src_corr_indices (LongTensor): (C,)
corr_scores (Tensor): (C,)
"""
score_mat = torch.exp(score_mat)
ref_length, src_length = score_mat.shape
ref_max_scores, ref_max_indices = torch.max(score_mat, dim=1)
ref_indices = torch.arange(ref_length).cuda()
ref_corr_scores_mat = torch.zeros_like(score_mat)
ref_corr_scores_mat[ref_indices, ref_max_indices] = ref_max_scores
ref_corr_masks_mat = torch.gt(ref_corr_scores_mat, threshold)
if mutual or bilateral:
src_max_scores, src_max_indices = torch.max(score_mat, dim=0)
src_indices = torch.arange(src_length).cuda()
src_corr_scores_mat = torch.zeros_like(score_mat)
src_corr_scores_mat[src_max_indices, src_indices] = src_max_scores
src_corr_masks_mat = torch.gt(src_corr_scores_mat, threshold)
if mutual:
corr_masks_mat = torch.logical_and(ref_corr_masks_mat, src_corr_masks_mat)
else:
corr_masks_mat = torch.logical_or(ref_corr_masks_mat, src_corr_masks_mat)
else:
corr_masks_mat = ref_corr_masks_mat
if has_dustbin:
corr_masks_mat = corr_masks_mat[:-1, :-1]
ref_corr_indices, src_corr_indices = torch.nonzero(corr_masks_mat, as_tuple=True)
if return_score:
corr_scores = score_mat[ref_corr_indices, src_corr_indices]
return ref_corr_indices, src_corr_indices, corr_scores
else:
return ref_corr_indices, src_corr_indices
@torch.no_grad()
def extract_correspondences_from_scores_threshold(
scores_mat: torch.Tensor, threshold: float, has_dustbin: bool = False, return_score: bool = False
):
r"""Extract the indices of correspondences from matching scores matrix (thresholding selection).
Args:
score_mat (Tensor): the logarithmic matching probabilities (N, M) or (N + 1, M + 1) according to `has_dustbin`
threshold (float = 0): confidence threshold
has_dustbin (bool = False): whether to use slack variables
return_score (bool = False): return correspondence scores
Returns:
ref_corr_indices (LongTensor): (C,)
src_corr_indices (LongTensor): (C,)
corr_scores (Tensor): (C,)
"""
scores_mat = torch.exp(scores_mat)
if has_dustbin:
scores_mat = scores_mat[:-1, :-1]
masks = torch.gt(scores_mat, threshold)
ref_corr_indices, src_corr_indices = torch.nonzero(masks, as_tuple=True)
if return_score:
corr_scores = scores_mat[ref_corr_indices, src_corr_indices]
return ref_corr_indices, src_corr_indices, corr_scores
else:
return ref_corr_indices, src_corr_indices
@torch.no_grad()
def extract_correspondences_from_scores_topk(
scores_mat: torch.Tensor, k: int, has_dustbin: bool = False, largest: bool = True, return_score: bool = False
):
r"""Extract the indices of correspondences from matching scores matrix (global top-k selection).
Args:
score_mat (Tensor): the scores (N, M) or (N + 1, M + 1) according to `has_dustbin`.
k (int): top-k.
has_dustbin (bool = False): whether to use slack variables.
largest (bool = True): whether to choose the largest ones.
return_score (bool = False): return correspondence scores.
Returns:
ref_corr_indices (LongTensor): (C,)
src_corr_indices (LongTensor): (C,)
corr_scores (Tensor): (C,)
"""
corr_indices = scores_mat.view(-1).topk(k=k, largest=largest)[1]
ref_corr_indices = corr_indices // scores_mat.shape[1]
src_corr_indices = corr_indices % scores_mat.shape[1]
if has_dustbin:
ref_masks = torch.ne(ref_corr_indices, scores_mat.shape[0] - 1)
src_masks = torch.ne(src_corr_indices, scores_mat.shape[1] - 1)
masks = torch.logical_and(ref_masks, src_masks)
ref_corr_indices = ref_corr_indices[masks]
src_corr_indices = src_corr_indices[masks]
if return_score:
corr_scores = scores_mat[ref_corr_indices, src_corr_indices]
return ref_corr_indices, src_corr_indices, corr_scores
else:
return ref_corr_indices, src_corr_indices
@torch.no_grad()
def extract_correspondences_from_feats(
ref_feats: torch.Tensor,
src_feats: torch.Tensor,
mutual: bool = False,
bilateral: bool = False,
return_feat_dist: bool = False,
):
r"""Extract the indices of correspondences from feature distances (nn selection).
Args:
ref_feats (Tensor): features of reference point cloud (N, C).
src_feats (Tensor): features of source point cloud (M, C).
mutual (bool = False): whether to get mutual correspondences.
bilateral (bool = False), whether bilateral non-mutual matching, ignored if `mutual` is set.
return_feat_dist (bool = False): return feature distances.
Returns:
ref_corr_indices (LongTensor): (C,)
src_corr_indices (LongTensor): (C,)
corr_feat_dists (Tensor): (C,)
"""
feat_dists_mat = pairwise_distance(ref_feats, src_feats)
ref_corr_indices, src_corr_indices = extract_correspondences_from_scores(
-feat_dists_mat,
mutual=mutual,
has_dustbin=False,
bilateral=bilateral,
)
if return_feat_dist:
corr_feat_dists = feat_dists_mat[ref_corr_indices, src_corr_indices]
return ref_corr_indices, src_corr_indices, corr_feat_dists
else:
return ref_corr_indices, src_corr_indices
# Patch correspondences
@torch.no_grad()
def dense_correspondences_to_node_correspondences(
ref_points: torch.Tensor,
src_points: torch.Tensor,
ref_nodes: torch.Tensor,
src_nodes: torch.Tensor,
corr_indices: torch.Tensor,
return_score: bool = False,
):
r"""Generate patch correspondences from point correspondences and the number of point correspondences within each
patch correspondences.
For each point correspondence, convert it to patch correspondence by replacing the point indices to the
corresponding patch indices.
We also define the proxy score for each patch correspondence as a estimation of the overlap ratio:
s = (#point_corr / #point_in_ref_patch + #point_corr / #point_in_src_patch) / 2
Args:
ref_points: reference point cloud
src_points: source point cloud
ref_nodes: reference patch points
src_nodes: source patch points
corr_indices: point correspondences
return_score: whether return the proxy score for each patch correspondences
Returns:
node_corr_indices (LongTensor): (C, 2)
node_corr_counts (LongTensor): (C,)
node_corr_scores (Tensor): (C,)
"""
ref_point_to_node, ref_node_sizes = get_point_to_node_indices(ref_points, ref_nodes, return_counts=True)
src_point_to_node, src_node_sizes = get_point_to_node_indices(src_points, src_nodes, return_counts=True)
ref_corr_indices = corr_indices[:, 0]
src_corr_indices = corr_indices[:, 1]
ref_node_corr_indices = ref_point_to_node[ref_corr_indices]
src_node_corr_indices = src_point_to_node[src_corr_indices]
node_corr_indices = ref_node_corr_indices * src_nodes.shape[0] + src_node_corr_indices
node_corr_indices, node_corr_counts = torch.unique(node_corr_indices, return_counts=True)
ref_node_corr_indices = node_corr_indices // src_nodes.shape[0]
src_node_corr_indices = node_corr_indices % src_nodes.shape[0]
node_corr_indices = torch.stack([ref_node_corr_indices, src_node_corr_indices], dim=1)
if return_score:
ref_node_corr_scores = node_corr_counts / ref_node_sizes[ref_node_corr_indices]
src_node_corr_scores = node_corr_counts / src_node_sizes[src_node_corr_indices]
node_corr_scores = (ref_node_corr_scores + src_node_corr_scores) / 2
return node_corr_indices, node_corr_counts, node_corr_scores
else:
return node_corr_indices, node_corr_counts
@torch.no_grad()
def get_node_correspondences(
ref_nodes: torch.Tensor,
src_nodes: torch.Tensor,
ref_knn_points: torch.Tensor,
src_knn_points: torch.Tensor,
transform: torch.Tensor,
pos_radius: float,
ref_masks: Optional[torch.Tensor] = None,
src_masks: Optional[torch.Tensor] = None,
ref_knn_masks: Optional[torch.Tensor] = None,
src_knn_masks: Optional[torch.Tensor] = None,
):
r"""Generate ground-truth superpoint/patch correspondences.
Each patch is composed of at most k nearest points of the corresponding superpoint.
A pair of points match if the distance between them is smaller than `self.pos_radius`.
Args:
ref_nodes: torch.Tensor (M, 3)
src_nodes: torch.Tensor (N, 3)
ref_knn_points: torch.Tensor (M, K, 3)
src_knn_points: torch.Tensor (N, K, 3)
transform: torch.Tensor (4, 4)
pos_radius: float
ref_masks (optional): torch.BoolTensor (M,) (default: None)
src_masks (optional): torch.BoolTensor (N,) (default: None)
ref_knn_masks (optional): torch.BoolTensor (M, K) (default: None)
src_knn_masks (optional): torch.BoolTensor (N, K) (default: None)
Returns:
corr_indices: torch.LongTensor (C, 2)
corr_overlaps: torch.Tensor (C,)
"""
src_nodes = apply_transform(src_nodes, transform)
src_knn_points = apply_transform(src_knn_points, transform)
# generate masks
if ref_masks is None:
ref_masks = torch.ones(size=(ref_nodes.shape[0],), dtype=torch.bool).cuda()
if src_masks is None:
src_masks = torch.ones(size=(src_nodes.shape[0],), dtype=torch.bool).cuda()
if ref_knn_masks is None:
ref_knn_masks = torch.ones(size=(ref_knn_points.shape[0], ref_knn_points.shape[1]), dtype=torch.bool).cuda()
if src_knn_masks is None:
src_knn_masks = torch.ones(size=(src_knn_points.shape[0], src_knn_points.shape[1]), dtype=torch.bool).cuda()
node_mask_mat = torch.logical_and(ref_masks.unsqueeze(1), src_masks.unsqueeze(0)) # (M, N)
# filter out non-overlapping patches using enclosing sphere
ref_knn_dists = torch.linalg.norm(ref_knn_points - ref_nodes.unsqueeze(1), dim=-1) # (M, K)
ref_knn_dists.masked_fill_(~ref_knn_masks, 0.0)
ref_max_dists = ref_knn_dists.max(1)[0] # (M,)
src_knn_dists = torch.linalg.norm(src_knn_points - src_nodes.unsqueeze(1), dim=-1) # (N, K)
src_knn_dists.masked_fill_(~src_knn_masks, 0.0)
src_max_dists = src_knn_dists.max(1)[0] # (N,)
dist_mat = torch.sqrt(pairwise_distance(ref_nodes, src_nodes)) # (M, N)
intersect_mat = torch.gt(ref_max_dists.unsqueeze(1) + src_max_dists.unsqueeze(0) + pos_radius - dist_mat, 0)
intersect_mat = torch.logical_and(intersect_mat, node_mask_mat)
sel_ref_indices, sel_src_indices = torch.nonzero(intersect_mat, as_tuple=True)
# select potential patch pairs
ref_knn_masks = ref_knn_masks[sel_ref_indices] # (B, K)
src_knn_masks = src_knn_masks[sel_src_indices] # (B, K)
ref_knn_points = ref_knn_points[sel_ref_indices] # (B, K, 3)
src_knn_points = src_knn_points[sel_src_indices] # (B, K, 3)
point_mask_mat = torch.logical_and(ref_knn_masks.unsqueeze(2), src_knn_masks.unsqueeze(1)) # (B, K, K)
# compute overlaps
dist_mat = pairwise_distance(ref_knn_points, src_knn_points) # (B, K, K)
dist_mat.masked_fill_(~point_mask_mat, 1e12)
point_overlap_mat = torch.lt(dist_mat, pos_radius ** 2) # (B, K, K)
ref_overlap_counts = torch.count_nonzero(point_overlap_mat.sum(-1), dim=-1).float() # (B,)
src_overlap_counts = torch.count_nonzero(point_overlap_mat.sum(-2), dim=-1).float() # (B,)
ref_overlaps = ref_overlap_counts / ref_knn_masks.sum(-1).float() # (B,)
src_overlaps = src_overlap_counts / src_knn_masks.sum(-1).float() # (B,)
overlaps = (ref_overlaps + src_overlaps) / 2 # (B,)
overlap_masks = torch.gt(overlaps, 0)
ref_corr_indices = sel_ref_indices[overlap_masks]
src_corr_indices = sel_src_indices[overlap_masks]
corr_indices = torch.stack([ref_corr_indices, src_corr_indices], dim=1)
corr_overlaps = overlaps[overlap_masks]
return corr_indices, corr_overlaps
@torch.no_grad()
def node_correspondences_to_dense_correspondences(
ref_knn_points,
src_knn_points,
ref_knn_indices,
src_knn_indices,
node_corr_indices,
transform,
matching_radius,
ref_knn_masks=None,
src_knn_masks=None,
return_distance=False,
):
if ref_knn_masks is None:
ref_knn_masks = torch.ones_like(ref_knn_indices)
if src_knn_masks is None:
src_knn_masks = torch.ones_like(src_knn_indices)
src_knn_points = apply_transform(src_knn_points, transform)
ref_node_corr_indices = node_corr_indices[:, 0] # (P,)
src_node_corr_indices = node_corr_indices[:, 1] # (P,)
ref_node_corr_knn_indices = ref_knn_indices[ref_node_corr_indices] # (P, K)
src_node_corr_knn_indices = src_knn_indices[src_node_corr_indices] # (P, K)
ref_node_corr_knn_points = ref_knn_points[ref_node_corr_indices] # (P, K, 3)
src_node_corr_knn_points = src_knn_points[src_node_corr_indices] # (P, K, 3)
ref_node_corr_knn_masks = ref_knn_masks[ref_node_corr_indices] # (P, K)
src_node_corr_knn_masks = src_knn_masks[src_node_corr_indices] # (P, K)
dist_mat = torch.sqrt(pairwise_distance(ref_node_corr_knn_points, src_node_corr_knn_points)) # (P, K, K)
corr_mat = torch.lt(dist_mat, matching_radius)
mask_mat = torch.logical_and(ref_node_corr_knn_masks.unsqueeze(2), src_node_corr_knn_masks.unsqueeze(1))
corr_mat = torch.logical_and(corr_mat, mask_mat) # (P, K, K)
batch_indices, row_indices, col_indices = torch.nonzero(corr_mat, as_tuple=True) # (C,) (C,) (C,)
ref_corr_indices = ref_node_corr_knn_indices[batch_indices, row_indices]
src_corr_indices = src_node_corr_knn_indices[batch_indices, col_indices]
corr_indices = torch.stack([ref_corr_indices, src_corr_indices], dim=1)
if return_distance:
corr_distances = dist_mat[batch_indices, row_indices, col_indices]
return corr_indices, corr_distances
else:
return corr_indices
@torch.no_grad()
def get_node_overlap_ratios(
ref_points,
src_points,
ref_knn_points,
src_knn_points,
ref_knn_indices,
src_knn_indices,
node_corr_indices,
transform,
matching_radius,
ref_knn_masks,
src_knn_masks,
eps=1e-5,
):
corr_indices = node_correspondences_to_dense_correspondences(
ref_knn_points,
src_knn_points,
ref_knn_indices,
src_knn_indices,
node_corr_indices,
transform,
matching_radius,
ref_knn_masks=ref_knn_masks,
src_knn_masks=ref_knn_masks,
)
unique_ref_corr_indices = torch.unique(corr_indices[:, 0])
unique_src_corr_indices = torch.unique(corr_indices[:, 1])
ref_overlap_masks = torch.zeros(ref_points.shape[0] + 1).cuda() # pad for following indexing
src_overlap_masks = torch.zeros(src_points.shape[0] + 1).cuda() # pad for following indexing
ref_overlap_masks.index_fill_(0, unique_ref_corr_indices, 1.0)
src_overlap_masks.index_fill_(0, unique_src_corr_indices, 1.0)
ref_knn_overlap_masks = index_select(ref_overlap_masks, ref_knn_indices, dim=0) # (N', K)
src_knn_overlap_masks = index_select(src_overlap_masks, src_knn_indices, dim=0) # (M', K)
ref_knn_overlap_ratios = (ref_knn_overlap_masks * ref_knn_masks).sum(1) / (ref_knn_masks.sum(1) + eps)
src_knn_overlap_ratios = (src_knn_overlap_masks * src_knn_masks).sum(1) / (src_knn_masks.sum(1) + eps)
return ref_knn_overlap_ratios, src_knn_overlap_ratios
@torch.no_grad()
def get_node_occlusion_ratios(
ref_points,
src_points,
ref_knn_points,
src_knn_points,
ref_knn_indices,
src_knn_indices,
node_corr_indices,
transform,
matching_radius,
ref_knn_masks,
src_knn_masks,
eps=1e-5,
):
ref_knn_overlap_ratios, src_knn_overlap_ratios = get_node_overlap_ratios(
ref_points,
src_points,
ref_knn_points,
src_knn_points,
ref_knn_indices,
src_knn_indices,
node_corr_indices,
transform,
matching_radius,
ref_knn_masks,
src_knn_masks,
eps=eps,
)
ref_knn_occlusion_ratios = 1.0 - ref_knn_overlap_ratios
src_knn_occlusion_ratios = 1.0 - src_knn_overlap_ratios
return ref_knn_occlusion_ratios, src_knn_occlusion_ratios
| 17,458 | 39.414352 | 118 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/registration/metrics.py | import numpy as np
import torch
from geotransformer.modules.ops import apply_transform, pairwise_distance, get_rotation_translation_from_transform
from geotransformer.utils.registration import compute_transform_mse_and_mae
def modified_chamfer_distance(raw_points, ref_points, src_points, gt_transform, transform, reduction='mean'):
r"""Compute the modified chamfer distance.
Args:
raw_points (Tensor): (B, N_raw, 3)
ref_points (Tensor): (B, N_ref, 3)
src_points (Tensor): (B, N_src, 3)
gt_transform (Tensor): (B, 4, 4)
transform (Tensor): (B, 4, 4)
reduction (str='mean'): reduction method, 'mean', 'sum' or 'none'
Returns:
chamfer_distance
"""
assert reduction in ['mean', 'sum', 'none']
# P_t -> Q_raw
aligned_src_points = apply_transform(src_points, transform) # (B, N_src, 3)
sq_dist_mat_p_q = pairwise_distance(aligned_src_points, raw_points) # (B, N_src, N_raw)
nn_sq_distances_p_q = sq_dist_mat_p_q.min(dim=-1)[0] # (B, N_src)
chamfer_distance_p_q = torch.sqrt(nn_sq_distances_p_q).mean(dim=-1) # (B)
# Q -> P_raw
composed_transform = torch.matmul(transform, torch.inverse(gt_transform)) # (B, 4, 4)
aligned_raw_points = apply_transform(raw_points, composed_transform) # (B, N_raw, 3)
sq_dist_mat_q_p = pairwise_distance(ref_points, aligned_raw_points) # (B, N_ref, N_raw)
nn_sq_distances_q_p = sq_dist_mat_q_p.min(dim=-1)[0] # (B, N_ref)
chamfer_distance_q_p = torch.sqrt(nn_sq_distances_q_p).mean(dim=-1) # (B)
# sum up
chamfer_distance = chamfer_distance_p_q + chamfer_distance_q_p # (B)
if reduction == 'mean':
chamfer_distance = chamfer_distance.mean()
elif reduction == 'sum':
chamfer_distance = chamfer_distance.sum()
return chamfer_distance
def relative_rotation_error(gt_rotations, rotations):
r"""Isotropic Relative Rotation Error.
RRE = acos((trace(R^T \cdot \bar{R}) - 1) / 2)
Args:
gt_rotations (Tensor): ground truth rotation matrix (*, 3, 3)
rotations (Tensor): estimated rotation matrix (*, 3, 3)
Returns:
rre (Tensor): relative rotation errors (*)
"""
mat = torch.matmul(rotations.transpose(-1, -2), gt_rotations)
trace = mat[..., 0, 0] + mat[..., 1, 1] + mat[..., 2, 2]
x = 0.5 * (trace - 1.0)
x = x.clamp(min=-1.0, max=1.0)
x = torch.arccos(x)
rre = 180.0 * x / np.pi
return rre
def relative_translation_error(gt_translations, translations):
r"""Isotropic Relative Rotation Error.
RTE = \lVert t - \bar{t} \rVert_2
Args:
gt_translations (Tensor): ground truth translation vector (*, 3)
translations (Tensor): estimated translation vector (*, 3)
Returns:
rre (Tensor): relative rotation errors (*)
"""
rte = torch.linalg.norm(gt_translations - translations, dim=-1)
return rte
def isotropic_transform_error(gt_transforms, transforms, reduction='mean'):
r"""Compute the isotropic Relative Rotation Error and Relative Translation Error.
Args:
gt_transforms (Tensor): ground truth transformation matrix (*, 4, 4)
transforms (Tensor): estimated transformation matrix (*, 4, 4)
reduction (str='mean'): reduction method, 'mean', 'sum' or 'none'
Returns:
rre (Tensor): relative rotation error.
rte (Tensor): relative translation error.
"""
assert reduction in ['mean', 'sum', 'none']
gt_rotations, gt_translations = get_rotation_translation_from_transform(gt_transforms)
rotations, translations = get_rotation_translation_from_transform(transforms)
rre = relative_rotation_error(gt_rotations, rotations) # (*)
rte = relative_translation_error(gt_translations, translations) # (*)
if reduction == 'mean':
rre = rre.mean()
rte = rte.mean()
elif reduction == 'sum':
rre = rre.sum()
rte = rte.sum()
return rre, rte
def anisotropic_transform_error(gt_transforms, transforms, reduction='mean'):
r"""Compute the anisotropic Relative Rotation Error and Relative Translation Error.
This function calls numpy-based implementation to achieve batch-wise computation and thus is non-differentiable.
Args:
gt_transforms (Tensor): ground truth transformation matrix (B, 4, 4)
transforms (Tensor): estimated transformation matrix (B, 4, 4)
reduction (str='mean'): reduction method, 'mean', 'sum' or 'none'
Returns:
r_mse (Tensor): rotation mse.
r_mae (Tensor): rotation mae.
t_mse (Tensor): translation mse.
t_mae (Tensor): translation mae.
"""
assert reduction in ['mean', 'sum', 'none']
batch_size = gt_transforms.shape[0]
gt_transforms_array = gt_transforms.detach().cpu().numpy()
transforms_array = transforms.detach().cpu().numpy()
all_r_mse = []
all_r_mae = []
all_t_mse = []
all_t_mae = []
for i in range(batch_size):
r_mse, r_mae, t_mse, t_mae = compute_transform_mse_and_mae(gt_transforms_array[i], transforms_array[i])
all_r_mse.append(r_mse)
all_r_mae.append(r_mae)
all_t_mse.append(t_mse)
all_t_mae.append(t_mae)
r_mse = torch.as_tensor(all_r_mse).to(gt_transforms)
r_mae = torch.as_tensor(all_r_mae).to(gt_transforms)
t_mse = torch.as_tensor(all_t_mse).to(gt_transforms)
t_mae = torch.as_tensor(all_t_mae).to(gt_transforms)
if reduction == 'mean':
r_mse = r_mse.mean()
r_mae = r_mae.mean()
t_mse = t_mse.mean()
t_mae = t_mae.mean()
elif reduction == 'sum':
r_mse = r_mse.sum()
r_mae = r_mae.sum()
t_mse = t_mse.sum()
t_mae = t_mae.sum()
return r_mse, r_mae, t_mse, t_mae
| 5,779 | 34.460123 | 116 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/registration/procrustes.py | import torch
import torch.nn as nn
import ipdb
def weighted_procrustes(
src_points,
ref_points,
weights=None,
weight_thresh=0.0,
eps=1e-5,
return_transform=False,
):
r"""Compute rigid transformation from `src_points` to `ref_points` using weighted SVD.
Modified from [PointDSC](https://github.com/XuyangBai/PointDSC/blob/master/models/common.py).
Args:
src_points: torch.Tensor (B, N, 3) or (N, 3)
ref_points: torch.Tensor (B, N, 3) or (N, 3)
weights: torch.Tensor (B, N) or (N,) (default: None)
weight_thresh: float (default: 0.)
eps: float (default: 1e-5)
return_transform: bool (default: False)
Returns:
R: torch.Tensor (B, 3, 3) or (3, 3)
t: torch.Tensor (B, 3) or (3,)
transform: torch.Tensor (B, 4, 4) or (4, 4)
"""
if src_points.ndim == 2:
src_points = src_points.unsqueeze(0)
ref_points = ref_points.unsqueeze(0)
if weights is not None:
weights = weights.unsqueeze(0)
squeeze_first = True
else:
squeeze_first = False
batch_size = src_points.shape[0]
if weights is None:
weights = torch.ones_like(src_points[:, :, 0])
weights = torch.where(torch.lt(weights, weight_thresh), torch.zeros_like(weights), weights)
weights = weights / (torch.sum(weights, dim=1, keepdim=True) + eps)
weights = weights.unsqueeze(2) # (B, N, 1)
src_centroid = torch.sum(src_points * weights, dim=1, keepdim=True) # (B, 1, 3)
ref_centroid = torch.sum(ref_points * weights, dim=1, keepdim=True) # (B, 1, 3)
src_points_centered = src_points - src_centroid # (B, N, 3)
ref_points_centered = ref_points - ref_centroid # (B, N, 3)
H = src_points_centered.permute(0, 2, 1) @ (weights * ref_points_centered)
U, _, V = torch.svd(H.cpu()) # H = USV^T
Ut, V = U.transpose(1, 2).cuda(), V.cuda()
eye = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).cuda()
eye[:, -1, -1] = torch.sign(torch.det(V @ Ut))
R = V @ eye @ Ut
t = ref_centroid.permute(0, 2, 1) - R @ src_centroid.permute(0, 2, 1)
t = t.squeeze(2)
if return_transform:
transform = torch.eye(4).unsqueeze(0).repeat(batch_size, 1, 1).cuda()
transform[:, :3, :3] = R
transform[:, :3, 3] = t
if squeeze_first:
transform = transform.squeeze(0)
return transform
else:
if squeeze_first:
R = R.squeeze(0)
t = t.squeeze(0)
return R, t
class WeightedProcrustes(nn.Module):
def __init__(self, weight_thresh=0.0, eps=1e-5, return_transform=False):
super(WeightedProcrustes, self).__init__()
self.weight_thresh = weight_thresh
self.eps = eps
self.return_transform = return_transform
def forward(self, src_points, tgt_points, weights=None):
return weighted_procrustes(
src_points,
tgt_points,
weights=weights,
weight_thresh=self.weight_thresh,
eps=self.eps,
return_transform=self.return_transform,
)
| 3,119 | 32.913043 | 97 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/sinkhorn/__init__.py | from geotransformer.modules.sinkhorn.learnable_sinkhorn import LearnableLogOptimalTransport
| 92 | 45.5 | 91 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/sinkhorn/learnable_sinkhorn.py | import torch
import torch.nn as nn
class LearnableLogOptimalTransport(nn.Module):
def __init__(self, num_iterations, inf=1e12):
r"""Sinkhorn Optimal transport with dustbin parameter (SuperGlue style)."""
super(LearnableLogOptimalTransport, self).__init__()
self.num_iterations = num_iterations
self.register_parameter('alpha', torch.nn.Parameter(torch.tensor(1.0)))
self.inf = inf
def log_sinkhorn_normalization(self, scores, log_mu, log_nu):
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(self.num_iterations):
u = log_mu - torch.logsumexp(scores + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(scores + u.unsqueeze(2), dim=1)
return scores + u.unsqueeze(2) + v.unsqueeze(1)
def forward(self, scores, row_masks=None, col_masks=None):
r"""Sinkhorn Optimal Transport (SuperGlue style) forward.
Args:
scores: torch.Tensor (B, M, N)
row_masks: torch.Tensor (B, M)
col_masks: torch.Tensor (B, N)
Returns:
matching_scores: torch.Tensor (B, M+1, N+1)
"""
batch_size, num_row, num_col = scores.shape
if row_masks is None:
row_masks = torch.ones(size=(batch_size, num_row), dtype=torch.bool).cuda()
if col_masks is None:
col_masks = torch.ones(size=(batch_size, num_col), dtype=torch.bool).cuda()
padded_row_masks = torch.zeros(size=(batch_size, num_row + 1), dtype=torch.bool).cuda()
padded_row_masks[:, :num_row] = ~row_masks
padded_col_masks = torch.zeros(size=(batch_size, num_col + 1), dtype=torch.bool).cuda()
padded_col_masks[:, :num_col] = ~col_masks
padded_score_masks = torch.logical_or(padded_row_masks.unsqueeze(2), padded_col_masks.unsqueeze(1))
padded_col = self.alpha.expand(batch_size, num_row, 1)
padded_row = self.alpha.expand(batch_size, 1, num_col + 1)
padded_scores = torch.cat([torch.cat([scores, padded_col], dim=-1), padded_row], dim=1)
padded_scores.masked_fill_(padded_score_masks, -self.inf)
num_valid_row = row_masks.float().sum(1)
num_valid_col = col_masks.float().sum(1)
norm = -torch.log(num_valid_row + num_valid_col) # (B,)
log_mu = torch.empty(size=(batch_size, num_row + 1)).cuda()
log_mu[:, :num_row] = norm.unsqueeze(1)
log_mu[:, num_row] = torch.log(num_valid_col) + norm
log_mu[padded_row_masks] = -self.inf
log_nu = torch.empty(size=(batch_size, num_col + 1)).cuda()
log_nu[:, :num_col] = norm.unsqueeze(1)
log_nu[:, num_col] = torch.log(num_valid_row) + norm
log_nu[padded_col_masks] = -self.inf
outputs = self.log_sinkhorn_normalization(padded_scores, log_mu, log_nu)
outputs = outputs - norm.unsqueeze(1).unsqueeze(2)
return outputs
def __repr__(self):
format_string = self.__class__.__name__ + '(num_iterations={})'.format(self.num_iterations)
return format_string
| 3,081 | 42.408451 | 107 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/__init__.py | from geotransformer.modules.transformer.conditional_transformer import (
VanillaConditionalTransformer,
PEConditionalTransformer,
RPEConditionalTransformer,
LRPEConditionalTransformer,
)
from geotransformer.modules.transformer.lrpe_transformer import LRPETransformerLayer
from geotransformer.modules.transformer.pe_transformer import PETransformerLayer
from geotransformer.modules.transformer.positional_embedding import (
SinusoidalPositionalEmbedding,
LearnablePositionalEmbedding,
)
from geotransformer.modules.transformer.rpe_transformer import RPETransformerLayer
from geotransformer.modules.transformer.vanilla_transformer import (
TransformerLayer,
TransformerDecoderLayer,
TransformerEncoder,
TransformerDecoder,
)
| 763 | 37.2 | 84 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/conditional_transformer.py | import torch.nn as nn
from geotransformer.modules.transformer.lrpe_transformer import LRPETransformerLayer
from geotransformer.modules.transformer.pe_transformer import PETransformerLayer
from geotransformer.modules.transformer.rpe_transformer import RPETransformerLayer
from geotransformer.modules.transformer.vanilla_transformer import TransformerLayer
def _check_block_type(block):
if block not in ['self', 'cross']:
raise ValueError('Unsupported block type "{}".'.format(block))
class VanillaConditionalTransformer(nn.Module):
def __init__(self, blocks, d_model, num_heads, dropout=None, activation_fn='ReLU', return_attention_scores=False):
super(VanillaConditionalTransformer, self).__init__()
self.blocks = blocks
layers = []
for block in self.blocks:
_check_block_type(block)
layers.append(TransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
self.return_attention_scores = return_attention_scores
def forward(self, feats0, feats1, masks0=None, masks1=None):
attention_scores = []
for i, block in enumerate(self.blocks):
if block == 'self':
feats0, scores0 = self.layers[i](feats0, feats0, memory_masks=masks0)
feats1, scores1 = self.layers[i](feats1, feats1, memory_masks=masks1)
else:
feats0, scores0 = self.layers[i](feats0, feats1, memory_masks=masks1)
feats1, scores1 = self.layers[i](feats1, feats0, memory_masks=masks0)
if self.return_attention_scores:
attention_scores.append([scores0, scores1])
if self.return_attention_scores:
return feats0, feats1, attention_scores
else:
return feats0, feats1
class PEConditionalTransformer(nn.Module):
def __init__(self, blocks, d_model, num_heads, dropout=None, activation_fn='ReLU', return_attention_scores=False):
super(PEConditionalTransformer, self).__init__()
self.blocks = blocks
layers = []
for block in self.blocks:
_check_block_type(block)
if block == 'self':
layers.append(PETransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
else:
layers.append(TransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
self.return_attention_scores = return_attention_scores
def forward(self, feats0, feats1, embeddings0, embeddings1, masks0=None, masks1=None):
attention_scores = []
for i, block in enumerate(self.blocks):
if block == 'self':
feats0, scores0 = self.layers[i](feats0, feats0, embeddings0, embeddings0, memory_masks=masks0)
feats1, scores1 = self.layers[i](feats1, feats1, embeddings1, embeddings1, memory_masks=masks1)
else:
feats0, scores0 = self.layers[i](feats0, feats1, memory_masks=masks1)
feats1, scores1 = self.layers[i](feats1, feats0, memory_masks=masks0)
if self.return_attention_scores:
attention_scores.append([scores0, scores1])
if self.return_attention_scores:
return feats0, feats1, attention_scores
else:
return feats0, feats1
class RPEConditionalTransformer(nn.Module):
def __init__(
self,
blocks,
d_model,
num_heads,
dropout=None,
activation_fn='ReLU',
return_attention_scores=False,
parallel=False,
):
super(RPEConditionalTransformer, self).__init__()
self.blocks = blocks
layers = []
for block in self.blocks:
_check_block_type(block)
if block == 'self':
layers.append(RPETransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
else:
layers.append(TransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
self.return_attention_scores = return_attention_scores
self.parallel = parallel
def forward(self, feats0, feats1, embeddings0, embeddings1, masks0=None, masks1=None):
attention_scores = []
for i, block in enumerate(self.blocks):
if block == 'self':
feats0, scores0 = self.layers[i](feats0, feats0, embeddings0, memory_masks=masks0)
feats1, scores1 = self.layers[i](feats1, feats1, embeddings1, memory_masks=masks1)
else:
if self.parallel:
new_feats0, scores0 = self.layers[i](feats0, feats1, memory_masks=masks1)
new_feats1, scores1 = self.layers[i](feats1, feats0, memory_masks=masks0)
feats0 = new_feats0
feats1 = new_feats1
else:
feats0, scores0 = self.layers[i](feats0, feats1, memory_masks=masks1)
feats1, scores1 = self.layers[i](feats1, feats0, memory_masks=masks0)
if self.return_attention_scores:
attention_scores.append([scores0, scores1])
if self.return_attention_scores:
return feats0, feats1, attention_scores
else:
return feats0, feats1
class LRPEConditionalTransformer(nn.Module):
def __init__(
self,
blocks,
d_model,
num_heads,
num_embeddings,
dropout=None,
activation_fn='ReLU',
return_attention_scores=False,
):
super(LRPEConditionalTransformer, self).__init__()
self.blocks = blocks
layers = []
for block in self.blocks:
_check_block_type(block)
if block == 'self':
layers.append(
LRPETransformerLayer(
d_model, num_heads, num_embeddings, dropout=dropout, activation_fn=activation_fn
)
)
else:
layers.append(TransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
self.return_attention_scores = return_attention_scores
def forward(self, feats0, feats1, emb_indices0, emb_indices1, masks0=None, masks1=None):
attention_scores = []
for i, block in enumerate(self.blocks):
if block == 'self':
feats0, scores0 = self.layers[i](feats0, feats0, emb_indices0, memory_masks=masks0)
feats1, scores1 = self.layers[i](feats1, feats1, emb_indices1, memory_masks=masks1)
else:
feats0, scores0 = self.layers[i](feats0, feats1, memory_masks=masks1)
feats1, scores1 = self.layers[i](feats1, feats0, memory_masks=masks0)
if self.return_attention_scores:
attention_scores.append([scores0, scores1])
if self.return_attention_scores:
return feats0, feats1, attention_scores
else:
return feats0, feats1
| 7,220 | 43.574074 | 118 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/lrpe_transformer.py | r"""Transformer with Learnable Relative Positional Embeddings.
Relative positional embedding is injected in each multi-head attention layer.
The shape of input tensor should be (B, N, C).
Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from geotransformer.modules.layers import build_dropout_layer
from geotransformer.modules.transformer.output_layer import AttentionOutput
from geotransformer.modules.transformer.positional_embedding import LearnablePositionalEmbedding
class LRPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, num_embeddings, dropout=None):
super(LRPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError(f'"d_model" ({d_model}) is not divisible by "num_heads" ({num_heads}).')
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.num_embeddings = num_embeddings
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.embedding = LearnablePositionalEmbedding(num_embeddings, d_model, dropout=dropout)
self.dropout = build_dropout_layer(dropout)
def transpose_for_scores(self, x):
x = x.view(x.shape[0], x.shape[1], self.num_heads, self.d_model_per_head)
x = x.permute(0, 2, 1, 3)
return x
def get_embeddings(self, q, emb_indices):
emb_all_indices = torch.arange(self.num_embeddings).cuda() # (P,)
emb_bank = rearrange(self.embedding(emb_all_indices), 'p (h c) -> h p c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,hpc->bhnp', q, emb_bank)
emb_indices = emb_indices.unsqueeze(1).expand(-1, self.num_heads, -1, -1) # (B, N, M) -> (B, H, N, M)
attention_scores = torch.gather(attention_scores, dim=-1, index=emb_indices) # (B, H, N, P) -> (B, H, N, M)
return attention_scores
def forward(
self,
input_q,
input_k,
input_v,
emb_indices_qk,
key_masks=None,
attention_factors=None,
):
r"""Scaled Dot-Product Attention with Learnable Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
emb_indices_qk: torch.Tensor (B, N, M), relative position indices
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns
hidden_states: torch.Tensor (B, N, C)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores_p = self.get_embedding_attention(q, emb_indices_qk)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class LRPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None):
super(LRPEAttentionLayer, self).__init__()
self.attention = LRPEMultiHeadAttention(d_model, num_heads, rpe_size, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class LRPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None, activation_fn='ReLU'):
super(LRPETransformerLayer, self).__init__()
self.attention = LRPEAttentionLayer(d_model, num_heads, rpe_size, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
position_states,
memory_masks=memory_masks,
attention_factors=attention_factors,
)
output_states = self.output(hidden_states)
return output_states, attention_scores
| 5,871 | 38.409396 | 116 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/output_layer.py | import torch.nn as nn
from geotransformer.modules.layers import build_act_layer, build_dropout_layer
class AttentionOutput(nn.Module):
def __init__(self, d_model, dropout=None, activation_fn='ReLU'):
super(AttentionOutput, self).__init__()
self.expand = nn.Linear(d_model, d_model * 2)
self.activation = build_act_layer(activation_fn)
self.squeeze = nn.Linear(d_model * 2, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(self, input_states):
hidden_states = self.expand(input_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.squeeze(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(input_states + hidden_states)
return output_states
| 855 | 37.909091 | 78 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/pe_transformer.py | r"""Vanilla Transformer without positional embeddings.
The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from geotransformer.modules.layers import build_dropout_layer
from geotransformer.modules.transformer.output_layer import AttentionOutput
class PEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads))
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.proj_p = nn.Linear(self.d_model, self.d_model)
self.dropout = build_dropout_layer(dropout)
def forward(
self,
input_q,
input_k,
input_v,
embed_q,
embed_k,
key_masks=None,
attention_factors=None,
):
"""Self-attention with positional embedding forward propagation.
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
embed_q: torch.Tensor (B, N, C)
embed_k: torch.Tensor (B, M, C)
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns:
hidden_states: torch.Tensor (B, C, N)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class PEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEAttentionLayer, self).__init__()
self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
input_embeddings,
memory_embeddings,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
input_embeddings,
memory_embeddings,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class PETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(PETransformerLayer, self).__init__()
self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
def forward(
self,
input_states,
memory_states,
input_embeddings,
memory_embeddings,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
input_embeddings,
memory_embeddings,
memory_masks=memory_masks,
attention_factors=attention_factors,
)
output_states = self.output(hidden_states)
return output_states, attention_scores
| 4,833 | 35.345865 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/positional_embedding.py | import numpy as np
import torch
import torch.nn as nn
from geotransformer.modules.layers import build_dropout_layer
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, d_model):
super(SinusoidalPositionalEmbedding, self).__init__()
if d_model % 2 != 0:
raise ValueError(f'Sinusoidal positional encoding with odd d_model: {d_model}')
self.d_model = d_model
div_indices = torch.arange(0, d_model, 2).float()
div_term = torch.exp(div_indices * (-np.log(10000.0) / d_model))
self.register_buffer('div_term', div_term)
def forward(self, emb_indices):
r"""Sinusoidal Positional Embedding.
Args:
emb_indices: torch.Tensor (*)
Returns:
embeddings: torch.Tensor (*, D)
"""
input_shape = emb_indices.shape
omegas = emb_indices.view(-1, 1, 1) * self.div_term.view(1, -1, 1) # (-1, d_model/2, 1)
sin_embeddings = torch.sin(omegas)
cos_embeddings = torch.cos(omegas)
embeddings = torch.cat([sin_embeddings, cos_embeddings], dim=2) # (-1, d_model/2, 2)
embeddings = embeddings.view(*input_shape, self.d_model) # (*, d_model)
embeddings = embeddings.detach()
return embeddings
class LearnablePositionalEmbedding(nn.Module):
def __init__(self, num_embeddings, embedding_dim, dropout=None):
super(LearnablePositionalEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.embeddings = nn.Embedding(num_embeddings, embedding_dim) # (L, D)
self.norm = nn.LayerNorm(embedding_dim)
self.dropout = build_dropout_layer(dropout)
def forward(self, emb_indices):
r"""Learnable Positional Embedding.
`emb_indices` are truncated to fit the finite embedding space.
Args:
emb_indices: torch.LongTensor (*)
Returns:
embeddings: torch.Tensor (*, D)
"""
input_shape = emb_indices.shape
emb_indices = emb_indices.view(-1)
max_emd_indices = torch.full_like(emb_indices, self.num_embeddings - 1)
emb_indices = torch.minimum(emb_indices, max_emd_indices)
embeddings = self.embeddings(emb_indices) # (*, D)
embeddings = self.norm(embeddings)
embeddings = self.dropout(embeddings)
embeddings = embeddings.view(*input_shape, self.embedding_dim)
return embeddings
| 2,484 | 36.651515 | 96 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/rpe_transformer.py | r"""Transformer with Relative Positional Embeddings.
Relative positional embedding is further projected in each multi-head attention layer.
The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from IPython import embed
from geotransformer.modules.layers import build_dropout_layer
from geotransformer.modules.transformer.output_layer import AttentionOutput
class RPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(RPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError('`d_model` ({}) must be a multiple of `num_heads` ({}).'.format(d_model, num_heads))
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.proj_p = nn.Linear(self.d_model, self.d_model)
self.dropout = build_dropout_layer(dropout)
def forward(self, input_q, input_k, input_v, embed_qk, key_weights=None, key_masks=None, attention_factors=None):
r"""Scaled Dot-Product Attention with Pre-computed Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
embed_qk: torch.Tensor (B, N, M, C), relative positional embedding
key_weights: torch.Tensor (B, M), soft masks for the keys
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns:
hidden_states: torch.Tensor (B, C, N)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
p = rearrange(self.proj_p(embed_qk), 'b n m (h c) -> b h n m c', h=self.num_heads)
attention_scores_p = torch.einsum('bhnc,bhnmc->bhnm', q, p)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_weights is not None:
attention_scores = attention_scores * key_weights.unsqueeze(1).unsqueeze(1)
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class RPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(RPEAttentionLayer, self).__init__()
self.attention = RPEMultiHeadAttention(d_model, num_heads, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_weights=None,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_weights=memory_weights,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class RPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(RPETransformerLayer, self).__init__()
self.attention = RPEAttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
def forward(
self,
input_states,
memory_states,
position_states,
memory_weights=None,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
position_states,
memory_weights=memory_weights,
memory_masks=memory_masks,
attention_factors=attention_factors,
)
output_states = self.output(hidden_states)
return output_states, attention_scores
| 5,309 | 39.227273 | 117 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/modules/transformer/vanilla_transformer.py | r"""Vanilla Transformer without positional embeddings.
The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from geotransformer.modules.layers import build_dropout_layer
from geotransformer.modules.transformer.output_layer import AttentionOutput
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(MultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError('`d_model` ({}) must be a multiple of `num_heads` ({}).'.format(d_model, num_heads))
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.dropout = build_dropout_layer(dropout)
def forward(
self, input_q, input_k, input_v, key_weights=None, key_masks=None, attention_factors=None, attention_masks=None
):
"""Vanilla Self-attention forward propagation.
Args:
input_q (Tensor): input tensor for query (B, N, C)
input_k (Tensor): input tensor for key (B, M, C)
input_v (Tensor): input tensor for value (B, M, C)
key_weights (Tensor): soft masks for the keys (B, M)
key_masks (BoolTensor): True if ignored, False if preserved (B, M)
attention_factors (Tensor): factors for attention matrix (B, N, M)
attention_masks (BoolTensor): True if ignored, False if preserved (B, N, M)
Returns:
hidden_states: torch.Tensor (B, C, N)
attention_scores: intermediate values
'attention_scores': torch.Tensor (B, H, N, M), attention scores before dropout
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_weights is not None:
attention_scores = attention_scores * key_weights.unsqueeze(1).unsqueeze(1)
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
if attention_masks is not None:
attention_scores = attention_scores.masked_fill(attention_masks, float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class AttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(AttentionLayer, self).__init__()
self.attention = MultiHeadAttention(d_model, num_heads, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
memory_weights=None,
memory_masks=None,
attention_factors=None,
attention_masks=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
key_weights=memory_weights,
key_masks=memory_masks,
attention_factors=attention_factors,
attention_masks=attention_masks,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class TransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(TransformerLayer, self).__init__()
self.attention = AttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
def forward(
self,
input_states,
memory_states,
memory_weights=None,
memory_masks=None,
attention_factors=None,
attention_masks=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_weights=memory_weights,
memory_masks=memory_masks,
attention_factors=attention_factors,
attention_masks=attention_masks,
)
output_states = self.output(hidden_states)
return output_states, attention_scores
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(TransformerDecoderLayer, self).__init__()
self.self_attention = AttentionLayer(d_model, num_heads, dropout=dropout)
self.cross_attention = AttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
def forward(self, input_states, memory_states, input_masks=None, memory_masks=None):
hidden_states, attention_scores = self.self_attention(input_states, input_states, memory_masks=input_masks)
hidden_states, attention_scores = self.cross_attention(hidden_states, memory_states, memory_masks=memory_masks)
output_states = self.output(hidden_states)
return output_states, attention_scores
class TransformerEncoder(nn.Module):
def __init__(self, d_model, num_heads, num_layers, dropout=None, activation_fn='ReLU'):
super(TransformerEncoder, self).__init__()
self.num_layers = num_layers
layers = []
for _ in range(num_layers):
layers.append(TransformerLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
def forward(self, feats, weights=None, masks=None, attention_factors=None, attention_masks=None):
r"""Transformer Encoder forward.
Args:
feats (Tensor): (B, N, C)
weights (Tensor=None): (B, N)
masks (BoolTensor=None): True if ignored (B, N)
attention_factors (Tensor=None): (B, N, N)
attention_masks (BoolTensor=None): (B, N, N)
Returns:
feats (Tensor): (B, N, C)
"""
for i in range(self.num_layers):
feats, _ = self.layers[i](
feats,
feats,
memory_weights=weights,
memory_masks=masks,
attention_factors=attention_factors,
attention_masks=attention_masks,
)
return feats
class TransformerDecoder(nn.Module):
def __init__(self, d_model, num_heads, num_layers, dropout=None, activation_fn='ReLU'):
super(TransformerDecoder, self).__init__()
self.num_layers = num_layers
layers = []
for _ in range(num_layers):
layers.append(TransformerDecoderLayer(d_model, num_heads, dropout=dropout, activation_fn=activation_fn))
self.layers = nn.ModuleList(layers)
def forward(self, q_feats, s_feats):
r"""Transformer Decoder forward.
Args:
q_feats (Tensor): (B, N, C)
s_feats (Tensor): (B, M, C)
Returns:
q_feats (Tensor): (B, N, C)
"""
for i in range(self.num_layers):
q_feats, _ = self.layers[i](q_feats, s_feats)
return q_feats
| 8,126 | 39.232673 | 119 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/transforms/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/transforms/functional.py | import math
import random
import numpy as np
def normalize_points(points):
r"""Normalize point cloud to a unit sphere at origin."""
points = points - points.mean(axis=0)
points = points / np.max(np.linalg.norm(points, axis=1))
return points
def sample_points(points, num_samples, normals=None):
r"""Sample the first K points."""
points = points[:num_samples]
if normals is not None:
normals = normals[:num_samples]
return points, normals
else:
return points
def random_sample_points(points, num_samples, normals=None):
r"""Randomly sample points."""
num_points = points.shape[0]
sel_indices = np.random.permutation(num_points)
if num_points > num_samples:
sel_indices = sel_indices[:num_samples]
elif num_points < num_samples:
num_iterations = num_samples // num_points
num_paddings = num_samples % num_points
all_sel_indices = [sel_indices for _ in range(num_iterations)]
if num_paddings > 0:
all_sel_indices.append(sel_indices[:num_paddings])
sel_indices = np.concatenate(all_sel_indices, axis=0)
points = points[sel_indices]
if normals is not None:
normals = normals[sel_indices]
return points, normals
else:
return points
def random_scale_shift_points(points, low=2.0 / 3.0, high=3.0 / 2.0, shift=0.2, normals=None):
r"""Randomly scale and shift point cloud."""
scale = np.random.uniform(low=low, high=high, size=(1, 3))
bias = np.random.uniform(low=-shift, high=shift, size=(1, 3))
points = points * scale + bias
if normals is not None:
normals = normals * scale
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
return points, normals
else:
return points
def random_rotate_points_along_up_axis(points, normals=None):
r"""Randomly rotate point cloud along z-axis."""
theta = np.random.rand() * 2.0 * math.pi
# fmt: off
rotation_t = np.array([
[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1],
])
# fmt: on
points = np.matmul(points, rotation_t)
if normals is not None:
normals = np.matmul(normals, rotation_t)
return points, normals
else:
return points
def random_rescale_points(points, low=0.8, high=1.2):
r"""Randomly rescale point cloud."""
scale = random.uniform(low, high)
points = points * scale
return points
def random_jitter_points(points, scale, noise_magnitude=0.05):
r"""Randomly jitter point cloud."""
noises = np.clip(np.random.normal(scale=scale, size=points.shape), a_min=-noise_magnitude, a_max=noise_magnitude)
points = points + noises
return points
def random_shuffle_points(points, normals=None):
r"""Randomly permute point cloud."""
indices = np.random.permutation(points.shape[0])
points = points[indices]
if normals is not None:
normals = normals[indices]
return points, normals
else:
return points
def random_dropout_points(points, max_p):
r"""Randomly dropout point cloud proposed in PointNet++."""
num_points = points.shape[0]
p = np.random.rand(num_points) * max_p
masks = np.random.rand(num_points) < p
points[masks] = points[0]
return points
def random_jitter_features(features, mu=0, sigma=0.01):
r"""Randomly jitter features in the original implementation of FCGF."""
if random.random() < 0.95:
features = features + np.random.normal(mu, sigma, features.shape).astype(np.float32)
return features
def random_sample_plane():
r"""Random sample a plane passing the origin and return its normal."""
phi = np.random.uniform(0.0, 2 * np.pi) # longitude
theta = np.random.uniform(0.0, np.pi) # latitude
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
normal = np.asarray([x, y, z])
return normal
def random_crop_point_cloud_with_plane(points, p_normal=None, keep_ratio=0.7, normals=None):
r"""Random crop a point cloud with a plane and keep num_samples points."""
num_samples = int(np.floor(points.shape[0] * keep_ratio + 0.5))
if p_normal is None:
p_normal = random_sample_plane() # (3,)
distances = np.dot(points, p_normal)
sel_indices = np.argsort(-distances)[:num_samples] # select the largest K points
points = points[sel_indices]
if normals is not None:
normals = normals[sel_indices]
return points, normals
else:
return points
def random_sample_viewpoint(limit=500):
r"""Randomly sample observing point from 8 directions."""
return np.random.rand(3) + np.array([limit, limit, limit]) * np.random.choice([1.0, -1.0], size=3)
def random_crop_point_cloud_with_point(points, viewpoint=None, keep_ratio=0.7, normals=None):
r"""Random crop point cloud from the observing point."""
num_samples = int(np.floor(points.shape[0] * keep_ratio + 0.5))
if viewpoint is None:
viewpoint = random_sample_viewpoint()
distances = np.linalg.norm(viewpoint - points, axis=1)
sel_indices = np.argsort(distances)[:num_samples]
points = points[sel_indices]
if normals is not None:
normals = normals[sel_indices]
return points, normals
else:
return points
| 5,392 | 32.08589 | 117 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/average_meter.py | import numpy as np
class AverageMeter:
def __init__(self, last_n=None):
self._records = []
self.last_n = last_n
def update(self, result):
if isinstance(result, (list, tuple)):
self._records += result
else:
self._records.append(result)
def reset(self):
self._records.clear()
@property
def records(self):
if self.last_n is not None:
return self._records[-self.last_n :]
else:
return self._records
def sum(self):
return np.sum(self.records)
def mean(self):
return np.mean(self.records)
def std(self):
return np.std(self.records)
def median(self):
return np.median(self.records)
| 756 | 20.027778 | 48 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/common.py | import os
import os.path as osp
import pickle
import sys
sys.path.append('/mnt/lustre/weipengjin/geotransformer')
def ensure_dir(path):
if not osp.exists(path):
os.makedirs(path)
def load_pickle(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
def dump_pickle(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f)
def get_print_format(value):
if isinstance(value, int):
return 'd'
if isinstance(value, str):
return 's'
if value == 0:
return '.3f'
if value < 1e-6:
return '.3e'
if value < 1e-3:
return '.6f'
return '.3f'
def get_format_strings(kv_pairs):
r"""Get format string for a list of key-value pairs."""
log_strings = []
for key, value in kv_pairs:
fmt = get_print_format(value)
format_string = '{}: {:' + fmt + '}'
log_strings.append(format_string.format(key, value))
return log_strings
def get_log_string(result_dict, epoch=None, max_epoch=None, iteration=None, max_iteration=None, lr=None, timer=None):
log_strings = []
if epoch is not None:
epoch_string = f'Epoch: {epoch}'
if max_epoch is not None:
epoch_string += f'/{max_epoch}'
log_strings.append(epoch_string)
if iteration is not None:
iter_string = f'iter: {iteration}'
if max_iteration is not None:
iter_string += f'/{max_iteration}'
if epoch is None:
iter_string = iter_string.capitalize()
log_strings.append(iter_string)
if 'metadata' in result_dict:
log_strings += result_dict['metadata']
for key, value in result_dict.items():
if key != 'metadata':
format_string = '{}: {:' + get_print_format(value) + '}'
log_strings.append(format_string.format(key, value))
if lr is not None:
log_strings.append('lr: {:.3e}'.format(lr))
if timer is not None:
log_strings.append(timer.tostring())
message = ', '.join(log_strings)
return message
| 2,083 | 27.547945 | 117 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/data.py | from functools import partial
import numpy as np
import torch
from geotransformer.modules.ops import grid_subsample, radius_search
from geotransformer.utils.torch import build_dataloader
# Stack mode utilities
def precompute_data_stack_mode(points, lengths, num_stages, voxel_size, radius, neighbor_limits):
assert num_stages == len(neighbor_limits)
points_list = []
lengths_list = []
neighbors_list = []
subsampling_list = []
upsampling_list = []
# grid subsampling
for i in range(num_stages):
if i > 0:
points, lengths = grid_subsample(points, lengths, voxel_size=voxel_size)
points_list.append(points)
lengths_list.append(lengths)
voxel_size *= 2
# radius search
for i in range(num_stages):
cur_points = points_list[i]
cur_lengths = lengths_list[i]
neighbors = radius_search(
cur_points,
cur_points,
cur_lengths,
cur_lengths,
radius,
neighbor_limits[i],
)
neighbors_list.append(neighbors)
if i < num_stages - 1:
sub_points = points_list[i + 1]
sub_lengths = lengths_list[i + 1]
subsampling = radius_search(
sub_points,
cur_points,
sub_lengths,
cur_lengths,
radius,
neighbor_limits[i],
)
subsampling_list.append(subsampling)
upsampling = radius_search(
cur_points,
sub_points,
cur_lengths,
sub_lengths,
radius * 2,
neighbor_limits[i + 1],
)
upsampling_list.append(upsampling)
radius *= 2
return {
'points': points_list,
'lengths': lengths_list,
'neighbors': neighbors_list,
'subsampling': subsampling_list,
'upsampling': upsampling_list,
}
def single_collate_fn_stack_mode(
data_dicts, num_stages, voxel_size, search_radius, neighbor_limits, precompute_data=True
):
r"""Collate function for single point cloud in stack mode.
Points are organized in the following order: [P_1, ..., P_B].
The correspondence indices are within each point cloud without accumulation.
Args:
data_dicts (List[Dict])
num_stages (int)
voxel_size (float)
search_radius (float)
neighbor_limits (List[int])
precompute_data (bool=True)
Returns:
collated_dict (Dict)
"""
batch_size = len(data_dicts)
# merge data with the same key from different samples into a list
collated_dict = {}
for data_dict in data_dicts:
for key, value in data_dict.items():
if isinstance(value, np.ndarray):
value = torch.from_numpy(value)
if key not in collated_dict:
collated_dict[key] = []
collated_dict[key].append(value)
# handle special keys: feats, points, normals
if 'normals' in collated_dict:
normals = torch.cat(collated_dict.pop('normals'), dim=0)
else:
normals = None
feats = torch.cat(collated_dict.pop('feats'), dim=0)
points_list = collated_dict.pop('points')
lengths = torch.LongTensor([points.shape[0] for points in points_list])
points = torch.cat(points_list, dim=0)
if batch_size == 1:
# remove wrapping brackets if batch_size is 1
for key, value in collated_dict.items():
collated_dict[key] = value[0]
if normals is not None:
collated_dict['normals'] = normals
collated_dict['features'] = feats
if precompute_data:
input_dict = precompute_data_stack_mode(points, lengths, num_stages, voxel_size, search_radius, neighbor_limits)
collated_dict.update(input_dict)
else:
collated_dict['points'] = points
collated_dict['lengths'] = lengths
collated_dict['batch_size'] = batch_size
return collated_dict
def registration_collate_fn_stack_mode(
data_dicts, num_stages, voxel_size, search_radius, neighbor_limits, precompute_data=True
):
r"""Collate function for registration in stack mode.
Points are organized in the following order: [ref_1, ..., ref_B, src_1, ..., src_B].
The correspondence indices are within each point cloud without accumulation.
Args:
data_dicts (List[Dict])
num_stages (int)
voxel_size (float)
search_radius (float)
neighbor_limits (List[int])
precompute_data (bool)
Returns:
collated_dict (Dict)
"""
batch_size = len(data_dicts)
# merge data with the same key from different samples into a list
collated_dict = {}
for data_dict in data_dicts:
for key, value in data_dict.items():
if isinstance(value, np.ndarray):
value = torch.from_numpy(value)
if key not in collated_dict:
collated_dict[key] = []
collated_dict[key].append(value)
# handle special keys: [ref_feats, src_feats] -> feats, [ref_points, src_points] -> points, lengths
feats = torch.cat(collated_dict.pop('ref_feats') + collated_dict.pop('src_feats'), dim=0)
points_list = collated_dict.pop('ref_points') + collated_dict.pop('src_points')
lengths = torch.LongTensor([points.shape[0] for points in points_list])
points = torch.cat(points_list, dim=0)
if batch_size == 1:
# remove wrapping brackets if batch_size is 1
for key, value in collated_dict.items():
collated_dict[key] = value[0]
collated_dict['features'] = feats
if precompute_data:
input_dict = precompute_data_stack_mode(points, lengths, num_stages, voxel_size, search_radius, neighbor_limits)
collated_dict.update(input_dict)
else:
collated_dict['points'] = points
collated_dict['lengths'] = lengths
collated_dict['batch_size'] = batch_size
return collated_dict
def calibrate_neighbors_stack_mode(
dataset, collate_fn, num_stages, voxel_size, search_radius, keep_ratio=0.8, sample_threshold=2000
):
# Compute higher bound of neighbors number in a neighborhood
hist_n = int(np.ceil(4 / 3 * np.pi * (search_radius / voxel_size + 1) ** 3))
neighbor_hists = np.zeros((num_stages, hist_n), dtype=np.int32)
max_neighbor_limits = [hist_n] * num_stages
# Get histogram of neighborhood sizes i in 1 epoch max.
for i in range(len(dataset)):
data_dict = collate_fn(
[dataset[i]], num_stages, voxel_size, search_radius, max_neighbor_limits, precompute_data=True
)
# update histogram
counts = [np.sum(neighbors.numpy() < neighbors.shape[0], axis=1) for neighbors in data_dict['neighbors']]
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
neighbor_hists += np.vstack(hists)
if np.min(np.sum(neighbor_hists, axis=1)) > sample_threshold:
break
cum_sum = np.cumsum(neighbor_hists.T, axis=0)
neighbor_limits = np.sum(cum_sum < (keep_ratio * cum_sum[hist_n - 1, :]), axis=0)
return neighbor_limits
def build_dataloader_stack_mode(
dataset,
collate_fn,
num_stages,
voxel_size,
search_radius,
neighbor_limits,
batch_size=1,
num_workers=1,
shuffle=False,
drop_last=False,
distributed=False,
precompute_data=True,
):
dataloader = build_dataloader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=partial(
collate_fn,
num_stages=num_stages,
voxel_size=voxel_size,
search_radius=search_radius,
neighbor_limits=neighbor_limits,
precompute_data=precompute_data,
),
drop_last=drop_last,
distributed=distributed,
)
return dataloader
| 7,976 | 30.780876 | 120 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/open3d.py | import numpy as np
import open3d as o3d
def get_color(color_name):
if color_name == 'red':
return np.asarray([1.0, 0.0, 0.0])
elif color_name == 'blue':
return np.asarray([0.0, 0.0, 1.0])
elif color_name == 'green':
return np.asarray([0.0, 1.0, 0.0])
elif color_name == 'yellow':
return np.asarray([0.0, 1.0, 1.0])
else:
raise RuntimeError(f'Unsupported color: {color_name}.')
def make_scaling_along_axis(points, axis=2, alpha=0):
if isinstance(axis, int):
new_scaling_axis = np.zeros(3)
new_scaling_axis[axis] = 1
axis = new_scaling_axis
if not isinstance(axis, np.ndarray):
axis = np.asarray(axis)
axis /= np.linalg.norm(axis)
projections = np.matmul(points, axis)
upper = np.amax(projections)
lower = np.amin(projections)
scales = 1 - ((projections - lower) / (upper - lower) * (1 - alpha) + alpha)
return scales
def make_open3d_colors(points, base_color, scaling_axis=2, scaling_alpha=0):
if not isinstance(base_color, np.ndarray):
base_color = np.asarray(base_color)
colors = np.ones_like(points) * base_color
scales = make_scaling_along_axis(points, axis=scaling_axis, alpha=scaling_alpha)
colors = colors * scales.reshape(-1, 1)
return colors
def make_open3d_point_cloud(points, colors=None, normals=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
if normals is not None:
pcd.normals = o3d.utility.Vector3dVector(normals)
return pcd
def estimate_normals(points):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.estimate_normals()
normals = np.asarray(pcd.normals)
return normals
def voxel_downsample(points, voxel_size, normals=None):
pcd = make_open3d_point_cloud(points, normals=normals)
pcd = pcd.voxel_down_sample(voxel_size)
points = np.asarray(pcd.points)
if normals is not None:
normals = np.asarray(pcd.normals)
return points, normals
else:
return points
def make_open3d_registration_feature(data):
r"""
Make open3d registration features
:param data: numpy.ndarray (N, C)
:return feats: o3d.pipelines.registration.Feature
"""
feats = o3d.pipelines.registration.Feature()
feats.data = data.T
return feats
def make_open3d_axis(axis_vector=None, origin=None, scale=1.0):
if origin is None:
origin = np.zeros((1, 3))
if axis_vector is None:
axis_vector = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float)
axis_vector = axis_vector * scale
axis_point = origin + axis_vector
points = np.concatenate([origin, axis_point], axis=0)
line = np.array([[0, 1]], dtype=np.long)
axes = o3d.geometry.LineSet()
axes.points = o3d.utility.Vector3dVector(points)
axes.lines = o3d.utility.Vector2iVector(line)
axes.paint_uniform_color(get_color('red'))
return axes
def make_open3d_axes(axis_vectors=None, origin=None, scale=1.0):
if origin is None:
origin = np.zeros((1, 3))
if axis_vectors is None:
axis_vectors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float)
axis_vectors = axis_vectors * scale
axis_points = origin + axis_vectors
points = np.concatenate([origin, axis_points], axis=0)
lines = np.array([[0, 1], [0, 2], [0, 3]], dtype=np.long)
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float)
axes = o3d.geometry.LineSet()
axes.points = o3d.utility.Vector3dVector(points)
axes.lines = o3d.utility.Vector2iVector(lines)
axes.colors = o3d.utility.Vector3dVector(colors)
return axes
def make_open3d_corr_lines(ref_corr_points, src_corr_points, label):
num_correspondences = ref_corr_points.shape[0]
corr_points = np.concatenate([ref_corr_points, src_corr_points], axis=0)
corr_indices = [(i, i + num_correspondences) for i in range(num_correspondences)]
corr_lines = o3d.geometry.LineSet()
corr_lines.points = o3d.utility.Vector3dVector(corr_points)
corr_lines.lines = o3d.utility.Vector2iVector(corr_indices)
if label == 'pos':
corr_lines.paint_uniform_color(np.asarray([0.0, 1.0, 0.0]))
elif label == 'neg':
corr_lines.paint_uniform_color(np.asarray([1.0, 0.0, 0.0]))
else:
raise ValueError('Unsupported `label` {} for correspondences'.format(label))
return corr_lines
def open3d_draw(*geometries):
o3d.visualization.draw_geometries(geometries)
def registration_with_ransac_from_feats(
src_points,
ref_points,
src_feats,
ref_feats,
distance_threshold=0.05,
ransac_n=3,
num_iterations=50000,
val_iterations=1000,
):
r"""
Compute the transformation matrix from src_points to ref_points
"""
src_pcd = make_open3d_point_cloud(src_points)
ref_pcd = make_open3d_point_cloud(ref_points)
src_feats = make_open3d_registration_feature(src_feats)
ref_feats = make_open3d_registration_feature(ref_feats)
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
src_pcd,
ref_pcd,
src_feats,
ref_feats,
distance_threshold,
estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
ransac_n=ransac_n,
checkers=[
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold),
],
criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(num_iterations, val_iterations),
)
return result.transformation
def registration_with_ransac_from_correspondences(
src_points,
ref_points,
correspondences=None,
distance_threshold=0.05,
ransac_n=3,
num_iterations=10000,
):
r"""
Compute the transformation matrix from src_points to ref_points
"""
src_pcd = make_open3d_point_cloud(src_points)
ref_pcd = make_open3d_point_cloud(ref_points)
if correspondences is None:
indices = np.arange(src_points.shape[0])
correspondences = np.stack([indices, indices], axis=1)
correspondences = o3d.utility.Vector2iVector(correspondences)
result = o3d.pipelines.registration.registration_ransac_based_on_correspondence(
src_pcd,
ref_pcd,
correspondences,
distance_threshold,
estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
ransac_n=ransac_n,
criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(num_iterations, num_iterations),
)
return result.transformation
| 6,827 | 32.80198 | 102 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/pointcloud.py | from typing import Tuple, List, Optional, Union, Any
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.transform import Rotation
import math
# Basic Utilities
def get_nearest_neighbor(
q_points: np.ndarray,
s_points: np.ndarray,
return_index: bool = False,
):
r"""Compute the nearest neighbor for the query points in support points."""
s_tree = cKDTree(s_points)
distances, indices = s_tree.query(q_points, k=1) #, n_jobs=-1
if return_index:
return distances, indices
else:
return distances
def regularize_normals(points, normals, positive=True):
r"""Regularize the normals towards the positive/negative direction to the origin point.
positive: the origin point is on positive direction of the normals.
negative: the origin point is on negative direction of the normals.
"""
dot_products = -(points * normals).sum(axis=1, keepdims=True)
direction = dot_products > 0
if positive:
normals = normals * direction - normals * (1 - direction)
else:
normals = normals * (1 - direction) - normals * direction
return normals
# Transformation Utilities
def apply_transform(points: np.ndarray, transform: np.ndarray, normals: Optional[np.ndarray] = None):
rotation = transform[:3, :3]
translation = transform[:3, 3]
points = np.matmul(points, rotation.T) + translation
if normals is not None:
normals = np.matmul(normals, rotation.T)
return points, normals
else:
return points
def compose_transforms(transforms: List[np.ndarray]) -> np.ndarray:
r"""
Compose transforms from the first one to the last one.
T = T_{n_1} \circ T_{n_2} \circ ... \circ T_1 \circ T_0
"""
final_transform = transforms[0]
for transform in transforms[1:]:
final_transform = np.matmul(transform, final_transform)
return final_transform
def get_transform_from_rotation_translation(rotation: np.ndarray, translation: np.ndarray) -> np.ndarray:
r"""Get rigid transform matrix from rotation matrix and translation vector.
Args:
rotation (array): (3, 3)
translation (array): (3,)
Returns:
transform: (4, 4)
"""
transform = np.eye(4)
transform[:3, :3] = rotation
transform[:3, 3] = translation
return transform
def get_rotation_translation_from_transform(transform: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
r"""Get rotation matrix and translation vector from rigid transform matrix.
Args:
transform (array): (4, 4)
Returns:
rotation (array): (3, 3)
translation (array): (3,)
"""
rotation = transform[:3, :3]
translation = transform[:3, 3]
return rotation, translation
def inverse_transform(transform: np.ndarray) -> np.ndarray:
r"""Inverse rigid transform.
Args:
transform (array): (4, 4)
Return:
inv_transform (array): (4, 4)
"""
rotation, translation = get_rotation_translation_from_transform(transform) # (3, 3), (3,)
inv_rotation = rotation.T # (3, 3)
inv_translation = -np.matmul(inv_rotation, translation) # (3,)
inv_transform = get_transform_from_rotation_translation(inv_rotation, inv_translation) # (4, 4)
return inv_transform
def random_sample_rotation(rotation_factor: float = 1.0) -> np.ndarray:
# angle_z, angle_y, angle_x
euler = np.random.rand(3) * np.pi * 2 / rotation_factor # (0, 2 * pi / rotation_range)
rotation = Rotation.from_euler('zyx', euler).as_matrix()
return rotation
def random_sample_rotation_v2() -> np.ndarray:
axis = np.random.rand(3) - 0.5
axis = axis / np.linalg.norm(axis) + 1e-8
theta = np.pi * np.random.rand()
euler = axis * theta
rotation = Rotation.from_euler('zyx', euler).as_matrix()
return rotation
def random_sample_transform(rotation_magnitude: float, translation_magnitude: float) -> np.ndarray:
euler = np.random.rand(3) * np.pi * rotation_magnitude / 180.0 # (0, rot_mag)
rotation = Rotation.from_euler('zyx', euler).as_matrix()
translation = np.random.uniform(-translation_magnitude, translation_magnitude, 3)
transform = get_transform_from_rotation_translation(rotation, translation)
return transform
# Sampling methods
def random_sample_keypoints(
points: np.ndarray,
feats: np.ndarray,
num_keypoints: int,
) -> Tuple[np.ndarray, np.ndarray]:
num_points = points.shape[0]
if num_points > num_keypoints:
indices = np.random.choice(num_points, num_keypoints, replace=False)
points = points[indices]
feats = feats[indices]
return points, feats
def sample_keypoints_with_scores(
points: np.ndarray,
feats: np.ndarray,
scores: np.ndarray,
num_keypoints: int,
) -> Tuple[np.ndarray, np.ndarray]:
num_points = points.shape[0]
if num_points > num_keypoints:
indices = np.argsort(-scores)[:num_keypoints]
points = points[indices]
feats = feats[indices]
return points, feats
def random_sample_keypoints_with_scores(
points: np.ndarray,
feats: np.ndarray,
scores: np.ndarray,
num_keypoints: int,
) -> Tuple[np.ndarray, np.ndarray]:
num_points = points.shape[0]
if num_points > num_keypoints:
indices = np.arange(num_points)
probs = scores / np.sum(scores)
indices = np.random.choice(indices, num_keypoints, replace=False, p=probs)
points = points[indices]
feats = feats[indices]
return points, feats
def sample_keypoints_with_nms(
points: np.ndarray,
feats: np.ndarray,
scores: np.ndarray,
num_keypoints: int,
radius: float,
) -> Tuple[np.ndarray, np.ndarray]:
num_points = points.shape[0]
if num_points > num_keypoints:
radius2 = radius ** 2
masks = np.ones(num_points, dtype=np.bool)
sorted_indices = np.argsort(scores)[::-1]
sorted_points = points[sorted_indices]
sorted_feats = feats[sorted_indices]
indices = []
for i in range(num_points):
if masks[i]:
indices.append(i)
if len(indices) == num_keypoints:
break
if i + 1 < num_points:
current_masks = np.sum((sorted_points[i + 1 :] - sorted_points[i]) ** 2, axis=1) < radius2
masks[i + 1 :] = masks[i + 1 :] & ~current_masks
points = sorted_points[indices]
feats = sorted_feats[indices]
return points, feats
def random_sample_keypoints_with_nms(
points: np.ndarray,
feats: np.ndarray,
scores: np.ndarray,
num_keypoints: int,
radius: float,
) -> Tuple[np.ndarray, np.ndarray]:
num_points = points.shape[0]
if num_points > num_keypoints:
radius2 = radius ** 2
masks = np.ones(num_points, dtype=np.bool)
sorted_indices = np.argsort(scores)[::-1]
sorted_points = points[sorted_indices]
sorted_feats = feats[sorted_indices]
indices = []
for i in range(num_points):
if masks[i]:
indices.append(i)
if i + 1 < num_points:
current_masks = np.sum((sorted_points[i + 1 :] - sorted_points[i]) ** 2, axis=1) < radius2
masks[i + 1 :] = masks[i + 1 :] & ~current_masks
indices = np.array(indices)
if len(indices) > num_keypoints:
sorted_scores = scores[sorted_indices]
scores = sorted_scores[indices]
probs = scores / np.sum(scores)
indices = np.random.choice(indices, num_keypoints, replace=False, p=probs)
points = sorted_points[indices]
feats = sorted_feats[indices]
return points, feats
# depth image utilities
def convert_depth_mat_to_points(
depth_mat: np.ndarray, intrinsics: np.ndarray, scaling_factor: float = 1000.0, distance_limit: float = 6.0
):
r"""Convert depth image to point cloud.
Args:
depth_mat (array): (H, W)
intrinsics (array): (3, 3)
scaling_factor (float=1000.)
Returns:
points (array): (N, 3)
"""
focal_x = intrinsics[0, 0]
focal_y = intrinsics[1, 1]
center_x = intrinsics[0, 2]
center_y = intrinsics[1, 2]
height, width = depth_mat.shape
coords = np.arange(height * width)
u = coords % width
v = coords / width
depth = depth_mat.flatten()
z = depth / scaling_factor
z[z > distance_limit] = 0.0
x = (u - center_x) * z / focal_x
y = (v - center_y) * z / focal_y
points = np.stack([x, y, z], axis=1)
points = points[depth > 0]
return points
def eulerAnglesToRotationMatrix(theta) :
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R | 9,576 | 31.137584 | 110 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/registration.py | import warnings
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.transform import Rotation
from geotransformer.utils.pointcloud import (
apply_transform,
get_nearest_neighbor,
get_rotation_translation_from_transform,
)
# Metrics
def compute_relative_rotation_error(gt_rotation: np.ndarray, est_rotation: np.ndarray):
r"""Compute the isotropic Relative Rotation Error.
RRE = acos((trace(R^T \cdot \bar{R}) - 1) / 2)
Args:
gt_rotation (array): ground truth rotation matrix (3, 3)
est_rotation (array): estimated rotation matrix (3, 3)
Returns:
rre (float): relative rotation error.
"""
x = 0.5 * (np.trace(np.matmul(est_rotation.T, gt_rotation)) - 1.0)
x = np.clip(x, -1.0, 1.0)
x = np.arccos(x)
rre = 180.0 * x / np.pi
return rre
def compute_relative_translation_error(gt_translation: np.ndarray, est_translation: np.ndarray):
r"""Compute the isotropic Relative Translation Error.
RTE = \lVert t - \bar{t} \rVert_2
Args:
gt_translation (array): ground truth translation vector (3,)
est_translation (array): estimated translation vector (3,)
Returns:
rte (float): relative translation error.
"""
return np.linalg.norm(gt_translation - est_translation)
def compute_registration_error(gt_transform: np.ndarray, est_transform: np.ndarray):
r"""Compute the isotropic Relative Rotation Error and Relative Translation Error.
Args:
gt_transform (array): ground truth transformation matrix (4, 4)
est_transform (array): estimated transformation matrix (4, 4)
Returns:
rre (float): relative rotation error.
rte (float): relative translation error.
"""
gt_rotation, gt_translation = get_rotation_translation_from_transform(gt_transform)
est_rotation, est_translation = get_rotation_translation_from_transform(est_transform)
rre = compute_relative_rotation_error(gt_rotation, est_rotation)
rte = compute_relative_translation_error(gt_translation, est_translation)
return rre, rte
def compute_rotation_mse_and_mae(gt_rotation: np.ndarray, est_rotation: np.ndarray):
r"""Compute anisotropic rotation error (MSE and MAE)."""
gt_euler_angles = Rotation.from_dcm(gt_rotation).as_euler('xyz', degrees=True) # (3,)
est_euler_angles = Rotation.from_dcm(est_rotation).as_euler('xyz', degrees=True) # (3,)
mse = np.mean((gt_euler_angles - est_euler_angles) ** 2)
mae = np.mean(np.abs(gt_euler_angles - est_euler_angles))
return mse, mae
def compute_translation_mse_and_mae(gt_translation: np.ndarray, est_translation: np.ndarray):
r"""Compute anisotropic translation error (MSE and MAE)."""
mse = np.mean((gt_translation - est_translation) ** 2)
mae = np.mean(np.abs(gt_translation - est_translation))
return mse, mae
def compute_transform_mse_and_mae(gt_transform: np.ndarray, est_transform: np.ndarray):
r"""Compute anisotropic rotation and translation error (MSE and MAE)."""
gt_rotation, gt_translation = get_rotation_translation_from_transform(gt_transform)
est_rotation, est_translation = get_rotation_translation_from_transform(est_transform)
r_mse, r_mae = compute_rotation_mse_and_mae(gt_rotation, est_rotation)
t_mse, t_mae = compute_translation_mse_and_mae(gt_translation, est_translation)
return r_mse, r_mae, t_mse, t_mae
def compute_registration_rmse(src_points: np.ndarray, gt_transform: np.ndarray, est_transform: np.ndarray):
r"""Compute re-alignment error (approximated RMSE in 3DMatch).
Used in Rotated 3DMatch.
Args:
src_points (array): source point cloud. (N, 3)
gt_transform (array): ground-truth transformation. (4, 4)
est_transform (array): estimated transformation. (4, 4)
Returns:
error (float): root mean square error.
"""
gt_points = apply_transform(src_points, gt_transform)
est_points = apply_transform(src_points, est_transform)
error = np.linalg.norm(gt_points - est_points, axis=1).mean()
return error
def compute_modified_chamfer_distance(
raw_points: np.ndarray,
ref_points: np.ndarray,
src_points: np.ndarray,
gt_transform: np.ndarray,
est_transform: np.ndarray,
):
r"""Compute the modified chamfer distance (RPMNet)."""
# P_t -> Q_raw
aligned_src_points = apply_transform(src_points, est_transform)
chamfer_distance_p_q = get_nearest_neighbor(aligned_src_points, raw_points).mean()
# Q -> P_raw
composed_transform = np.matmul(est_transform, np.linalg.inv(gt_transform))
aligned_raw_points = apply_transform(raw_points, composed_transform)
chamfer_distance_q_p = get_nearest_neighbor(ref_points, aligned_raw_points).mean()
# sum up
chamfer_distance = chamfer_distance_p_q + chamfer_distance_q_p
return chamfer_distance
def compute_correspondence_residual(ref_corr_points, src_corr_points, transform):
r"""Computing the mean distance between a set of correspondences."""
src_corr_points = apply_transform(src_corr_points, transform)
residuals = np.sqrt(((ref_corr_points - src_corr_points) ** 2).sum(1))
mean_residual = np.mean(residuals)
return mean_residual
def compute_inlier_ratio(ref_corr_points, src_corr_points, transform, positive_radius=0.1):
r"""Computing the inlier ratio between a set of correspondences."""
src_corr_points = apply_transform(src_corr_points, transform)
residuals = np.sqrt(((ref_corr_points - src_corr_points) ** 2).sum(1))
if residuals.shape[0] > 0:
inlier_ratio = np.mean(residuals < positive_radius)
else:
inlier_ratio = 0
# print("inlier_ratio:",inlier_ratio)
return inlier_ratio
def compute_overlap(ref_points, src_points, transform=None, positive_radius=0.1):
r"""Compute the overlap of two point clouds."""
if transform is not None:
src_points = apply_transform(src_points, transform)
nn_distances = get_nearest_neighbor(ref_points, src_points)
if nn_distances.shape[0] > 0:
overlap = np.mean(nn_distances < positive_radius)
else:
overlap = 0
return overlap
# Ground Truth Utilities
def get_correspondences(ref_points, src_points, transform, matching_radius):
r"""Find the ground truth correspondences within the matching radius between two point clouds.
Return correspondence indices [indices in ref_points, indices in src_points]
"""
src_points = apply_transform(src_points, transform)
src_tree = cKDTree(src_points)
indices_list = src_tree.query_ball_point(ref_points, matching_radius)
corr_indices = np.array(
[(i, j) for i, indices in enumerate(indices_list) for j in indices],
dtype=np.long,
)
return corr_indices
# Matching Utilities
def extract_corr_indices_from_feats(
ref_feats: np.ndarray,
src_feats: np.ndarray,
mutual: bool = False,
bilateral: bool = False,
):
r"""Extract correspondence indices from features.
Args:
ref_feats (array): (N, C)
src_feats (array): (M, C)
mutual (bool = False): whether use mutual matching
bilateral (bool = False): whether use bilateral non-mutual matching, ignored if `mutual` is True.
Returns:
ref_corr_indices: (M,)
src_corr_indices: (M,)
"""
ref_nn_indices = get_nearest_neighbor(ref_feats, src_feats, return_index=True)[1]
if mutual or bilateral:
src_nn_indices = get_nearest_neighbor(src_feats, ref_feats, return_index=True)[1]
ref_indices = np.arange(ref_feats.shape[0])
if mutual:
ref_masks = np.equal(src_nn_indices[ref_nn_indices], ref_indices)
ref_corr_indices = ref_indices[ref_masks]
src_corr_indices = ref_nn_indices[ref_corr_indices]
else:
src_indices = np.arange(src_feats.shape[0])
ref_corr_indices = np.concatenate([ref_indices, src_nn_indices], axis=0)
src_corr_indices = np.concatenate([ref_nn_indices, src_indices], axis=0)
else:
ref_corr_indices = np.arange(ref_feats.shape[0])
src_corr_indices = ref_nn_indices
return ref_corr_indices, src_corr_indices
def extract_correspondences_from_feats(
ref_points: np.ndarray,
src_points: np.ndarray,
ref_feats: np.ndarray,
src_feats: np.ndarray,
mutual: bool = False,
return_feat_dist: bool = False,
):
r"""Extract correspondences from features."""
ref_corr_indices, src_corr_indices = extract_corr_indices_from_feats(ref_feats, src_feats, mutual=mutual)
ref_corr_points = ref_points[ref_corr_indices]
src_corr_points = src_points[src_corr_indices]
outputs = [ref_corr_points, src_corr_points]
if return_feat_dist:
ref_corr_feats = ref_feats[ref_corr_indices]
src_corr_feats = src_feats[src_corr_indices]
feat_dists = np.linalg.norm(ref_corr_feats - src_corr_feats, axis=1)
outputs.append(feat_dists)
return outputs
# Evaluation Utilities
def evaluate_correspondences(ref_points, src_points, transform, positive_radius=0.1):
overlap = compute_overlap(ref_points, src_points, transform, positive_radius=positive_radius)
inlier_ratio = compute_inlier_ratio(ref_points, src_points, transform, positive_radius=positive_radius)
residual = compute_correspondence_residual(ref_points, src_points, transform)
return {
'overlap': overlap,
'inlier_ratio': inlier_ratio,
'residual': residual,
'num_corr': ref_points.shape[0],
}
def evaluate_sparse_correspondences(ref_points, src_points, ref_corr_indices, src_corr_indices, gt_corr_indices):
ref_gt_corr_indices = gt_corr_indices[:, 0]
src_gt_corr_indices = gt_corr_indices[:, 1]
gt_corr_mat = np.zeros((ref_points.shape[0], src_points.shape[0]))
gt_corr_mat[ref_gt_corr_indices, src_gt_corr_indices] = 1.0
num_gt_correspondences = gt_corr_mat.sum()
pred_corr_mat = np.zeros_like(gt_corr_mat)
pred_corr_mat[ref_corr_indices, src_corr_indices] = 1.0
num_pred_correspondences = pred_corr_mat.sum()
pos_corr_mat = gt_corr_mat * pred_corr_mat
num_pos_correspondences = pos_corr_mat.sum()
precision = num_pos_correspondences / (num_pred_correspondences + 1e-12)
recall = num_pos_correspondences / (num_gt_correspondences + 1e-12)
pos_corr_mat = pos_corr_mat > 0
gt_corr_mat = gt_corr_mat > 0
ref_hit_ratio = np.any(pos_corr_mat, axis=1).sum() / (np.any(gt_corr_mat, axis=1).sum() + 1e-12)
src_hit_ratio = np.any(pos_corr_mat, axis=0).sum() / (np.any(gt_corr_mat, axis=0).sum() + 1e-12)
hit_ratio = 0.5 * (ref_hit_ratio + src_hit_ratio)
return {
'precision': precision,
'recall': recall,
'hit_ratio': hit_ratio,
}
| 10,802 | 36.123711 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/summary_board.py | from typing import Optional, List
from geotransformer.utils.average_meter import AverageMeter
from geotransformer.utils.common import get_print_format
class SummaryBoard:
r"""Summary board."""
def __init__(self, names: Optional[List[str]] = None, last_n: Optional[int] = None, adaptive=False):
r"""Instantiate a SummaryBoard.
Args:
names (List[str]=None): create AverageMeter with the names.
last_n (int=None): only the last n records are used.
adaptive (bool=False): whether register basic meters automatically on the fly.
"""
self.meter_dict = {}
self.meter_names = []
self.last_n = last_n
self.adaptive = adaptive
if names is not None:
self.register_all(names)
def register_meter(self, name):
self.meter_dict[name] = AverageMeter(last_n=self.last_n)
self.meter_names.append(name)
def register_all(self, names):
for name in names:
self.register_meter(name)
def reset_meter(self, name):
self.meter_dict[name].reset()
def reset_all(self):
for name in self.meter_names:
self.reset_meter(name)
def check_name(self, name):
if name not in self.meter_names:
if self.adaptive:
self.register_meter(name)
else:
raise KeyError('No meter for key "{}".'.format(name))
def update(self, name, value):
self.check_name(name)
self.meter_dict[name].update(value)
def update_from_result_dict(self, result_dict):
if not isinstance(result_dict, dict):
raise TypeError('`result_dict` must be a dict: {}.'.format(type(result_dict)))
for key, value in result_dict.items():
if key not in self.meter_names and self.adaptive:
self.register_meter(key)
if key in self.meter_names:
self.meter_dict[key].update(value)
def sum(self, name):
self.check_name(name)
return self.meter_dict[name].sum()
def mean(self, name):
self.check_name(name)
return self.meter_dict[name].mean()
def std(self, name):
self.check_name(name)
return self.meter_dict[name].std()
def median(self, name):
self.check_name(name)
return self.meter_dict[name].median()
def tostring(self, names=None):
if names is None:
names = self.meter_names
items = []
for name in names:
value = self.meter_dict[name].mean()
fmt = get_print_format(value)
format_string = '{}: {:' + fmt + '}'
items.append(format_string.format(name, value))
summary = ', '.join(items)
return summary
def summary(self, names=None):
if names is None:
names = self.meter_names
summary_dict = {name: self.meter_dict[name].mean() for name in names}
return summary_dict
| 2,990 | 30.819149 | 104 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/timer.py | import time
class Timer:
def __init__(self):
self.total_prepare_time = 0
self.total_process_time = 0
self.count_prepare_time = 0
self.count_process_time = 0
self.last_time = time.time()
def reset(self):
self.total_prepare_time = 0
self.total_process_time = 0
self.count_prepare_time = 0
self.count_process_time = 0
self.last_time = time.time()
def record_time(self):
self.last_time = time.time()
def add_prepare_time(self):
current_time = time.time()
self.total_prepare_time += current_time - self.last_time
self.count_prepare_time += 1
self.last_time = current_time
def add_process_time(self):
current_time = time.time()
self.total_process_time += current_time - self.last_time
self.count_process_time += 1
self.last_time = current_time
def get_prepare_time(self):
return self.total_prepare_time / (self.count_prepare_time + 1e-12)
def get_process_time(self):
return self.total_process_time / (self.count_process_time + 1e-12)
def tostring(self):
summary = 'time: '
if self.count_prepare_time > 0:
summary += '{:.3f}s/'.format(self.get_prepare_time())
summary += '{:.3f}s'.format(self.get_process_time())
return summary
class TimerDict:
def __init__(self):
self.total_time = {}
self.count_time = {}
self.last_time = {}
self.timer_keys = []
def add_timer(self, key):
self.total_time[key] = 0.0
self.count_time[key] = 0
self.last_time[key] = 0.0
self.timer_keys.append(key)
def tic(self, key):
if key not in self.timer_keys:
self.add_timer(key)
self.last_time[key] = time.time()
def toc(self, key):
assert key in self.timer_keys
duration = time.time() - self.last_time[key]
self.total_time[key] += duration
self.count_time[key] += 1
def get_time(self, key):
assert key in self.timer_keys
return self.total_time[key] / (float(self.count_time[key]) + 1e-12)
def summary(self, keys):
summary = 'time: '
summary += '/'.join(['{:.3f}s'.format(self.get_time(key)) for key in keys])
return summary
| 2,338 | 28.2375 | 83 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/torch.py | import math
import random
from typing import Callable
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.data
import torch.backends.cudnn as cudnn
# Distributed Data Parallel Utilities
def all_reduce_tensor(tensor, world_size=1):
r"""Average reduce a tensor across all workers."""
reduced_tensor = tensor.clone()
dist.all_reduce(reduced_tensor)
reduced_tensor /= world_size
return reduced_tensor
def all_reduce_tensors(x, world_size=1):
r"""Average reduce all tensors across all workers."""
if isinstance(x, list):
x = [all_reduce_tensors(item, world_size=world_size) for item in x]
elif isinstance(x, tuple):
x = (all_reduce_tensors(item, world_size=world_size) for item in x)
elif isinstance(x, dict):
x = {key: all_reduce_tensors(value, world_size=world_size) for key, value in x.items()}
elif isinstance(x, torch.Tensor):
x = all_reduce_tensor(x, world_size=world_size)
return x
# Dataloader Utilities
def reset_seed_worker_init_fn(worker_id):
r"""Reset seed for data loader worker."""
seed = torch.initial_seed() % (2 ** 32)
# print(worker_id, seed)
np.random.seed(seed)
random.seed(seed)
def build_dataloader(
dataset,
batch_size=1,
num_workers=1,
shuffle=None,
collate_fn=None,
pin_memory=False,
drop_last=False,
distributed=False,
):
if distributed:
sampler = torch.utils.data.DistributedSampler(dataset)
shuffle = False
else:
sampler = None
shuffle = shuffle
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
sampler=sampler,
collate_fn=collate_fn,
worker_init_fn=reset_seed_worker_init_fn,
pin_memory=pin_memory,
drop_last=drop_last,
)
return data_loader
# Common Utilities
def initialize(seed=None, cudnn_deterministic=True, autograd_anomaly_detection=False):
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
if cudnn_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
else:
cudnn.benchmark = True
cudnn.deterministic = False
torch.autograd.set_detect_anomaly(autograd_anomaly_detection)
def release_cuda(x):
r"""Release all tensors to item or numpy array."""
if isinstance(x, list):
x = [release_cuda(item) for item in x]
elif isinstance(x, tuple):
x = (release_cuda(item) for item in x)
elif isinstance(x, dict):
x = {key: release_cuda(value) for key, value in x.items()}
elif isinstance(x, torch.Tensor):
if x.numel() == 1:
x = x.item()
else:
x = x.detach().cpu().numpy()
return x
def to_cuda(x):
r"""Move all tensors to cuda."""
if isinstance(x, list):
x = [to_cuda(item) for item in x]
elif isinstance(x, tuple):
x = (to_cuda(item) for item in x)
elif isinstance(x, dict):
x = {key: to_cuda(value) for key, value in x.items()}
elif isinstance(x, torch.Tensor):
x = x.cuda()
return x
def load_weights(model, snapshot):
r"""Load weights and check keys."""
state_dict = torch.load(snapshot)
model_dict = state_dict['model']
model.load_state_dict(model_dict, strict=False)
snapshot_keys = set(model_dict.keys())
model_keys = set(model.model_dict().keys())
missing_keys = model_keys - snapshot_keys
unexpected_keys = snapshot_keys - model_keys
return missing_keys, unexpected_keys
# Learning Rate Scheduler
class CosineAnnealingFunction(Callable):
def __init__(self, max_epoch, eta_min=0.0):
self.max_epoch = max_epoch
self.eta_min = eta_min
def __call__(self, last_epoch):
next_epoch = last_epoch + 1
return self.eta_min + 0.5 * (1.0 - self.eta_min) * (1.0 + math.cos(math.pi * next_epoch / self.max_epoch))
class WarmUpCosineAnnealingFunction(Callable):
def __init__(self, total_steps, warmup_steps, eta_init=0.1, eta_min=0.1):
self.total_steps = total_steps
self.warmup_steps = warmup_steps
self.normal_steps = total_steps - warmup_steps
self.eta_init = eta_init
self.eta_min = eta_min
def __call__(self, last_step):
# last_step starts from -1, which means last_steps=0 indicates the first call of lr annealing.
next_step = last_step + 1
if next_step < self.warmup_steps:
return self.eta_init + (1.0 - self.eta_init) / self.warmup_steps * next_step
else:
if next_step > self.total_steps:
return self.eta_min
next_step -= self.warmup_steps
return self.eta_min + 0.5 * (1.0 - self.eta_min) * (1 + np.cos(np.pi * next_step / self.normal_steps))
def build_warmup_cosine_lr_scheduler(optimizer, total_steps, warmup_steps, eta_init=0.1, eta_min=0.1, grad_acc_steps=1):
total_steps //= grad_acc_steps
warmup_steps //= grad_acc_steps
cosine_func = WarmUpCosineAnnealingFunction(total_steps, warmup_steps, eta_init=eta_init, eta_min=eta_min)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, cosine_func)
return scheduler
| 5,365 | 28.977654 | 120 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/utils/visualization.py | import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
from sklearn.manifold import TSNE
from tqdm import tqdm
from geotransformer.utils.open3d import (
make_open3d_point_cloud,
make_open3d_axes,
make_open3d_corr_lines,
)
def draw_point_to_node(points, nodes, point_to_node, node_colors=None):
if node_colors is None:
node_colors = np.random.rand(*nodes.shape)
# point_colors = node_colors[point_to_node] * make_scaling_along_axis(points, alpha=0.3).reshape(-1, 1)
point_colors = node_colors[point_to_node]
node_colors = np.ones_like(nodes) * np.array([[1, 0, 0]])
ncd = make_open3d_point_cloud(nodes, colors=node_colors)
pcd = make_open3d_point_cloud(points, colors=point_colors)
axes = make_open3d_axes()
o3d.visualization.draw([pcd, ncd, axes])
def draw_node_correspondences(
ref_points,
ref_nodes,
ref_point_to_node,
src_points,
src_nodes,
src_point_to_node,
node_correspondences,
ref_node_colors=None,
src_node_colors=None,
offsets=(0, 2, 0),
):
src_nodes = src_nodes + offsets
src_points = src_points + offsets
if ref_node_colors is None:
ref_node_colors = np.random.rand(*ref_nodes.shape)
# src_point_colors = src_node_colors[src_point_to_node] * make_scaling_along_axis(src_points).reshape(-1, 1)
ref_point_colors = ref_node_colors[ref_point_to_node]
ref_node_colors = np.ones_like(ref_nodes) * np.array([[1, 0, 0]])
if src_node_colors is None:
src_node_colors = np.random.rand(*src_nodes.shape)
# tgt_point_colors = tgt_node_colors[tgt_point_to_node] * make_scaling_along_axis(tgt_points).reshape(-1, 1)
src_point_colors = src_node_colors[src_point_to_node]
src_node_colors = np.ones_like(src_nodes) * np.array([[1, 0, 0]])
ref_ncd = make_open3d_point_cloud(ref_nodes, colors=ref_node_colors)
ref_pcd = make_open3d_point_cloud(ref_points, colors=ref_point_colors)
src_ncd = make_open3d_point_cloud(src_nodes, colors=src_node_colors)
src_pcd = make_open3d_point_cloud(src_points, colors=src_point_colors)
corr_lines = make_open3d_corr_lines(ref_nodes, src_nodes, node_correspondences)
axes = make_open3d_axes(scale=0.1)
o3d.visualization.draw([ref_pcd, ref_ncd, src_pcd, src_ncd, corr_lines, axes])
def get_colors_with_tsne(data):
r"""
Use t-SNE to project high-dimension feats to rgbd
:param data: (N, C)
:return colors: (N, 3)
"""
tsne = TSNE(n_components=1, perplexity=40, n_iter=300, random_state=0)
tsne_results = tsne.fit_transform(data).reshape(-1)
tsne_min = np.min(tsne_results)
tsne_max = np.max(tsne_results)
normalized_tsne_results = (tsne_results - tsne_min) / (tsne_max - tsne_min)
colors = plt.cm.Spectral(normalized_tsne_results)[:, :3]
return colors
def write_points_to_obj(file_name, points, colors=None, radius=0.02, resolution=6):
sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius, resolution=resolution)
vertices = np.asarray(sphere.vertices)
triangles = np.asarray(sphere.triangles) + 1
v_lines = []
f_lines = []
num_point = points.shape[0]
for i in tqdm(range(num_point)):
n = i * vertices.shape[0]
for j in range(vertices.shape[0]):
new_vertex = points[i] + vertices[j]
line = 'v {:.6f} {:.6f} {:.6f}'.format(new_vertex[0], new_vertex[1], new_vertex[2])
if colors is not None:
line += ' {:.6f} {:.6f} {:.6f}'.format(colors[i, 0], colors[i, 1], colors[i, 2])
v_lines.append(line + '\n')
for j in range(triangles.shape[0]):
new_triangle = triangles[j] + n
line = 'f {} {} {}\n'.format(new_triangle[0], new_triangle[1], new_triangle[2])
f_lines.append(line)
with open(file_name, 'w') as f:
f.writelines(v_lines)
f.writelines(f_lines)
def convert_points_to_mesh(points, colors=None, radius=0.02, resolution=6):
sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius, resolution=resolution)
vertices = np.asarray(sphere.vertices)
triangles = np.asarray(sphere.triangles)
new_vertices = points[:, None, :] + vertices[None, :, :]
if colors is not None:
new_vertex_colors = np.broadcast_to(colors[:, None, :], new_vertices.shape)
new_vertices = new_vertices.reshape(-1, 3)
new_vertex_colors = new_vertex_colors.reshape(-1, 3)
bases = np.arange(points.shape[0]) * vertices.shape[0]
new_triangles = bases[:, None, None] + triangles[None, :, :]
new_triangles = new_triangles.reshape(-1, 3)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(new_vertices)
mesh.vertex_colors = o3d.utility.Vector3dVector(new_vertex_colors)
mesh.triangles = o3d.utility.Vector3iVector(new_triangles)
return mesh
def write_points_to_ply(file_name, points, colors=None, radius=0.02, resolution=6):
mesh = convert_points_to_mesh(points, colors=colors, radius=radius, resolution=resolution)
o3d.io.write_triangle_mesh(file_name, mesh, write_vertex_normals=False)
def write_correspondences_to_obj(file_name, src_corr_points, tgt_corr_points):
v_lines = []
l_lines = []
num_corr = src_corr_points.shape[0]
for i in tqdm(range(num_corr)):
n = i * 2
src_point = src_corr_points[i]
tgt_point = tgt_corr_points[i]
line = 'v {:.6f} {:.6f} {:.6f}\n'.format(src_point[0], src_point[1], src_point[2])
v_lines.append(line)
line = 'v {:.6f} {:.6f} {:.6f}\n'.format(tgt_point[0], tgt_point[1], tgt_point[2])
v_lines.append(line)
line = 'l {} {}\n'.format(n + 1, n + 2)
l_lines.append(line)
with open(file_name, 'w') as f:
f.writelines(v_lines)
f.writelines(l_lines)
| 5,831 | 35.679245 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/include/calibration.hpp | /*
* Copyright (C) 2021 by Autonomous Driving Group, Shanghai AI Laboratory
* Limited. All rights reserved.
* Yan Guohang <[email protected]>
*/
#pragma once
#include <Eigen/Dense>
#include <array>
#include <map>
#include <memory>
#include <pcl/io/pcd_io.h>
#include <string>
#include <vector>
#include "logging.hpp"
#include "registration_icp.hpp"
struct InitialExtrinsic {
Eigen::Vector3d euler_angles;
Eigen::Vector3d t_matrix;
};
struct PlaneParam {
PlaneParam() {}
PlaneParam(const Eigen::Vector3d &n, double i) : normal(n), intercept(i) {}
Eigen::Vector3d normal;
double intercept;
};
class Calibrator {
public:
Calibrator();
void LoadCalibrationData(
const std::map<int32_t, pcl::PointCloud<pcl::PointXYZI>> lidar_points,
const std::map<int32_t, InitialExtrinsic> extrinsics);
Eigen::Matrix3d GetRotation(double roll, double pitch, double yaw);
Eigen::Matrix4d GetMatrix(const Eigen::Vector3d &translation,
const Eigen::Matrix3d &rotation);
void Calibrate();
bool
GroundPlaneExtraction(const pcl::PointCloud<pcl::PointXYZI>::Ptr &in_cloud,
pcl::PointCloud<pcl::PointXYZI>::Ptr g_cloud,
pcl::PointCloud<pcl::PointXYZI>::Ptr ng_cloud,
PlaneParam &plane);
std::map<int32_t, Eigen::Matrix4d> GetFinalTransformation();
private:
std::map<int32_t, pcl::PointCloud<pcl::PointXYZI>> pcs_;
std::map<int32_t, Eigen::Matrix4d> init_extrinsics_;
std::map<int32_t, Eigen::Matrix4d> refined_extrinsics_;
std::unique_ptr<ICPRegistrator> registrator_;
};
| 1,609 | 28.814815 | 77 | hpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/include/logging.hpp | /*
* Copyright (C) 2021 by Autonomous Driving Group, Shanghai AI Laboratory
* Limited. All rights reserved.
* Yan Guohang <[email protected]>
*/
#ifndef LOGGING_HPP_
#define LOGGING_HPP_
#define OUTPUT
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/') + 1) : __FILE__)
#ifdef OUTPUT
#define LOGI(...) \
(printf("[INFO] [%d@%s] ", __LINE__, __FILENAME__), printf(__VA_ARGS__), \
printf("\n"))
#define LOGW(...) \
(printf("\33[33m[WARN] [%d@%s] ", __LINE__, __FILENAME__), \
printf(__VA_ARGS__), printf("\033[0m\n"))
#define LOGE(...) \
(printf("\33[31m[ERROR] [%d@%s] ", __LINE__, __FILENAME__), \
printf(__VA_ARGS__), printf("\033[0m\n"))
#else
#define LOGI(...) ((void)0)
#define LOGW(...) ((void)0)
#define LOGE(...) ((void)0)
#endif
#ifdef DEBUG
#define LOGDEBUG(...) (printf(__VA_ARGS__), printf("\n"))
#else
#define LOGDEBUG(...) ((void)0)
#endif
#endif // LOGGING_HPP_ | 1,208 | 33.542857 | 80 | hpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/include/registration_icp.hpp | #pragma once
#include "logging.hpp"
#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Geometry>
#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/octree/octree_search.h>
#include <pcl/point_cloud.h>
#include <pcl/registration/icp.h>
class ICPRegistrator {
public:
ICPRegistrator();
void SetTargetCloud(const pcl::PointCloud<pcl::PointXYZI>::Ptr &gcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &ngcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud);
void SetSourceCloud(const pcl::PointCloud<pcl::PointXYZI>::Ptr &gcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &ngcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud);
bool RegistrationByICP(const Eigen::Matrix4d &init_guess,
Eigen::Matrix4d &transform);
bool RegistrationByICP2(const Eigen::Matrix4d &init_guess,
Eigen::Matrix4d &refined_extrinsic);
Eigen::Matrix4d GetFinalTransformation();
double CalculateICPError(const pcl::KdTreeFLANN<pcl::PointXYZI> &kdtree,
const Eigen::Matrix4d &init_guess, float cur_yaw);
void computeNormals(const pcl::PointCloud<pcl::PointXYZI>::Ptr in_pts,
pcl::PointCloud<pcl::PointXYZINormal>::Ptr out_pts);
bool RegistrationByVoxelOccupancy(const Eigen::Matrix4d &init_guess,
Eigen::Matrix4d &refined_extrinsic);
size_t ComputeVoxelOccupancy(const Eigen::Matrix4d &init_guess);
private:
pcl::PointCloud<pcl::PointXYZI>::Ptr tgt_gcloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr tgt_ngcloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr tgt_cloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr src_gcloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr src_ngcloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr src_cloud_;
pcl::PointCloud<pcl::PointXYZI>::Ptr all_cloud_;
pcl::octree::OctreePointCloudSearch<pcl::PointXYZI>::Ptr all_octree_;
Eigen::Matrix4d final_transformation_;
}; | 2,114 | 41.3 | 77 | hpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/src/calibration.cpp | /*
* Copyright (C) 2021 by Autonomous Driving Group, Shanghai AI Laboratory
* Limited. All rights reserved.
* Yan Guohang <[email protected]>
*/
#include "calibration.hpp"
#include <pcl/common/transforms.h>
#include <pcl/conversions.h>
#include <pcl/features/normal_3d.h>
#include <pcl/filters/conditional_removal.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/passthrough.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/point_types.h>
#include <pcl/registration/gicp.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/icp_nl.h>
#include <pcl/registration/ndt.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
const double eps = 1.0e-6;
Calibrator::Calibrator() { registrator_.reset(new ICPRegistrator); }
void Calibrator::LoadCalibrationData(
const std::map<int32_t, pcl::PointCloud<pcl::PointXYZI>> lidar_points,
const std::map<int32_t, InitialExtrinsic> extrinsics) {
pcs_ = lidar_points;
for (auto src : extrinsics) {
int32_t device_id = src.first;
InitialExtrinsic extrinsic = src.second;
Eigen::Matrix3d rotation;
Eigen::AngleAxisd Rx(
Eigen::AngleAxisd(extrinsic.euler_angles[0], Eigen::Vector3d::UnitX()));
Eigen::AngleAxisd Ry(
Eigen::AngleAxisd(extrinsic.euler_angles[1], Eigen::Vector3d::UnitY()));
Eigen::AngleAxisd Rz(
Eigen::AngleAxisd(extrinsic.euler_angles[2], Eigen::Vector3d::UnitZ()));
rotation = Rz * Ry * Rx;
Eigen::Matrix3d rot = rotation;
Eigen::Matrix4d init_ext = Eigen::Matrix4d::Identity();
init_ext.block<3, 1>(0, 3) = extrinsic.t_matrix;
init_ext.block<3, 3>(0, 0) = rot;
init_extrinsics_.insert(std::make_pair(device_id, init_ext));
}
}
void Calibrator::Calibrate() {
Eigen::Matrix4d transform = Eigen::Matrix4d::Identity();
Eigen::Matrix4d curr_transform = Eigen::Matrix4d::Identity();
int32_t master_id = 0;
auto master_iter = pcs_.find(master_id);
pcl::PointCloud<pcl::PointXYZI> master_pc = master_iter->second;
pcl::PointCloud<pcl::PointXYZI>::Ptr master_pc_ptr = master_pc.makeShared();
PlaneParam master_gplane;
pcl::PointCloud<pcl::PointXYZI>::Ptr master_gcloud(
new pcl::PointCloud<pcl::PointXYZI>);
pcl::PointCloud<pcl::PointXYZI>::Ptr master_ngcloud(
new pcl::PointCloud<pcl::PointXYZI>);
bool ret = GroundPlaneExtraction(master_pc_ptr, master_gcloud, master_ngcloud,
master_gplane);
if (!ret) {
LOGE("ground plane extraction failed.\n");
return;
}
registrator_->SetTargetCloud(master_gcloud, master_ngcloud, master_pc_ptr);
Eigen::Vector3d t_mp(0, 0,
-master_gplane.intercept / master_gplane.normal(2));
for (auto iter = pcs_.begin(); iter != pcs_.end(); iter++) {
int32_t slave_id = iter->first;
if (slave_id == master_id)
continue;
pcl::PointCloud<pcl::PointXYZI> slave_pc = iter->second;
pcl::PointCloud<pcl::PointXYZI> slave_original_pc = slave_pc;
if (init_extrinsics_.find(slave_id) == init_extrinsics_.end()) {
LOGE("cannot find the init extrinsic, id: %d\n", slave_id);
return;
}
Eigen::Matrix4d init_ext = init_extrinsics_[slave_id];
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_pc_ptr = slave_pc.makeShared();
pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_after_Condition(
new pcl::PointCloud<pcl::PointXYZI>);
PlaneParam slave_gplane;
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_gcloud(
new pcl::PointCloud<pcl::PointXYZI>);
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_ngcloud(
new pcl::PointCloud<pcl::PointXYZI>);
// earse the points close to LiDAR
if (slave_id) {
pcl::ConditionAnd<pcl::PointXYZI>::Ptr range_condition(
new pcl::ConditionAnd<pcl::PointXYZI>());
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"x", pcl::ComparisonOps::GT, -1)));
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"x", pcl::ComparisonOps::LT, 1))); //
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"y", pcl::ComparisonOps::GT, -1.0)));
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"y", pcl::ComparisonOps::LT, 1)));
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"z", pcl::ComparisonOps::GT, -1)));
range_condition->addComparison(
pcl::FieldComparison<pcl::PointXYZI>::ConstPtr(
new pcl::FieldComparison<pcl::PointXYZI>(
"z", pcl::ComparisonOps::LT, 1)));
pcl::ConditionalRemoval<pcl::PointXYZI> condition;
condition.setCondition(range_condition);
condition.setInputCloud(slave_pc_ptr);
condition.setKeepOrganized(false);
condition.filter(*cloud_after_Condition);
pcl::KdTreeFLANN<pcl::PointXYZI> kdtree;
pcl::PointXYZI searchPoint;
int K = 1;
std::vector<int> pointIdxNKNSearch(K);
std::vector<float> pointNKNSquaredDistance(K);
std::vector<pcl::PointXYZI> DeleteData;
int num = 0;
for (auto iter = cloud_after_Condition->begin();
iter != cloud_after_Condition->end(); iter++) {
searchPoint.x = iter->x;
searchPoint.y = iter->y;
searchPoint.z = iter->z;
kdtree.setInputCloud(slave_pc_ptr);
num = kdtree.nearestKSearch(searchPoint, K, pointIdxNKNSearch,
pointNKNSquaredDistance);
if (num > 0) {
if (sqrt(pointNKNSquaredDistance[0]) < eps) {
auto iterB = slave_pc_ptr->begin() + pointIdxNKNSearch[0];
slave_pc_ptr->erase(iterB);
DeleteData.push_back(searchPoint);
if (slave_pc_ptr->size() == 0) {
break;
}
searchPoint.x = 0;
searchPoint.y = 0;
searchPoint.z = 0;
num = 0;
pointIdxNKNSearch.clear();
pointNKNSquaredDistance.clear();
}
}
}
}
ret = GroundPlaneExtraction(slave_pc_ptr, slave_gcloud, slave_ngcloud,
slave_gplane);
if (!ret) {
LOGE("ground plane extraction failed.\n");
continue;
}
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_original_pc_ptr =
slave_original_pc.makeShared();
PlaneParam slave_original_gplane;
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_original_ngcloud(
new pcl::PointCloud<pcl::PointXYZI>);
pcl::PointCloud<pcl::PointXYZI>::Ptr slave_original_gcloud(
new pcl::PointCloud<pcl::PointXYZI>);
ret = GroundPlaneExtraction(slave_original_pc_ptr, slave_original_gcloud,
slave_original_ngcloud, slave_original_gplane);
registrator_->SetSourceCloud(slave_original_gcloud, slave_original_ngcloud,
slave_original_pc_ptr);
// ground normal direction
Eigen::Vector3f ground_point(
0, 0, (slave_gplane.intercept) / (-slave_gplane.normal(2)));
Eigen::Vector3f point2plane_vector;
int Ontheground = 0;
int Undertheground = 0;
for (auto iter = slave_ngcloud->begin(); iter < slave_ngcloud->end() - 100;
iter += 100) {
Eigen::Vector3f samplePoint(iter->x, iter->y, iter->z);
point2plane_vector = samplePoint - ground_point;
if ((point2plane_vector(0) * slave_gplane.normal(0) +
point2plane_vector(1) * slave_gplane.normal(1) +
point2plane_vector(2) * slave_gplane.normal(2)) >= 0) {
Ontheground++;
} else {
Undertheground++;
}
}
// ground plane align
Eigen::Vector3d rot_axis2 = slave_gplane.normal.cross(master_gplane.normal);
rot_axis2.normalize();
double alpha2 = std::acos(slave_gplane.normal.dot(master_gplane.normal));
Eigen::Matrix3d R_ms;
R_ms = Eigen::AngleAxisd(alpha2, rot_axis2);
Eigen::Vector3d slave_intcpt_local(
0, 0, -slave_gplane.intercept / slave_gplane.normal(2));
Eigen::Vector3d slave_intcpt_master = R_ms * slave_intcpt_local;
Eigen::Vector3d t_ms(0, 0, t_mp(2) - slave_intcpt_master(2));
Eigen::Matrix4d T_ms = Eigen::Matrix4d::Identity();
T_ms.block<3, 1>(0, 3) = t_ms;
T_ms.block<3, 3>(0, 0) = R_ms;
double z_error = std::fabs(t_ms(2) - init_ext(2, 3));
if (z_error > 0.5) {
slave_gplane.normal = -slave_gplane.normal;
slave_gplane.intercept = -slave_gplane.intercept;
rot_axis2 = slave_gplane.normal.cross(master_gplane.normal);
rot_axis2.normalize();
alpha2 = std::acos(slave_gplane.normal.dot(master_gplane.normal));
R_ms = Eigen::AngleAxisd(alpha2, rot_axis2);
slave_intcpt_local = Eigen::Vector3d(
0, 0, -slave_gplane.intercept / slave_gplane.normal(2));
slave_intcpt_master = R_ms * slave_intcpt_local;
t_ms = Eigen::Vector3d(0, 0, t_mp(2) - slave_intcpt_master(2));
T_ms.block<3, 1>(0, 3) = t_ms;
T_ms.block<3, 3>(0, 0) = R_ms;
}
curr_transform = init_ext * T_ms;
registrator_->RegistrationByICP(curr_transform, transform);
Eigen::Matrix4d final_opt_result;
registrator_->RegistrationByICP2(transform, final_opt_result);
refined_extrinsics_.insert(std::make_pair(slave_id, final_opt_result));
}
}
bool Calibrator::GroundPlaneExtraction(
const pcl::PointCloud<pcl::PointXYZI>::Ptr &in_cloud,
pcl::PointCloud<pcl::PointXYZI>::Ptr g_cloud,
pcl::PointCloud<pcl::PointXYZI>::Ptr ng_cloud, PlaneParam &plane) {
pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers(new pcl::PointIndices);
pcl::SACSegmentation<pcl::PointXYZI> seg;
seg.setOptimizeCoefficients(true);
seg.setModelType(pcl::SACMODEL_PLANE);
seg.setMethodType(pcl::SAC_RANSAC);
seg.setDistanceThreshold(0.2);
seg.setInputCloud(in_cloud);
seg.segment(*inliers, *coefficients);
if (inliers->indices.size() == 0) {
PCL_ERROR("Could not estimate a planar model for the given dataset.");
return false;
}
pcl::ExtractIndices<pcl::PointXYZI> extract;
extract.setInputCloud(in_cloud);
extract.setIndices(inliers);
extract.filter(*g_cloud);
extract.setNegative(true);
extract.filter(*ng_cloud);
plane.normal(0) = coefficients->values[0];
plane.normal(1) = coefficients->values[1];
plane.normal(2) = coefficients->values[2];
plane.intercept = coefficients->values[3];
return true;
}
std::map<int32_t, Eigen::Matrix4d> Calibrator::GetFinalTransformation() {
return refined_extrinsics_;
} | 11,055 | 39.498168 | 80 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/src/registration_icp.cpp | #include "registration_icp.hpp"
#include <limits>
#include <pcl/common/transforms.h>
#include <pcl/features/normal_3d.h>
#include <pcl/registration/gicp.h>
#include <pcl/registration/icp_nl.h>
#include <pcl/registration/ndt.h>
ICPRegistrator::ICPRegistrator() {
all_cloud_.reset(new pcl::PointCloud<pcl::PointXYZI>());
all_octree_.reset(
new pcl::octree::OctreePointCloudSearch<pcl::PointXYZI>(0.05));
all_octree_->setInputCloud(all_cloud_);
}
void ICPRegistrator::SetTargetCloud(
const pcl::PointCloud<pcl::PointXYZI>::Ptr &gcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &ngcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) {
tgt_gcloud_ = gcloud;
tgt_ngcloud_ = ngcloud;
tgt_cloud_ = cloud;
}
void ICPRegistrator::SetSourceCloud(
const pcl::PointCloud<pcl::PointXYZI>::Ptr &gcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &ngcloud,
const pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) {
src_gcloud_ = gcloud;
src_ngcloud_ = ngcloud;
src_cloud_ = cloud;
}
Eigen::Matrix4d ICPRegistrator::GetFinalTransformation() {
return final_transformation_;
}
Eigen::Matrix4d GetDeltaT(const float yaw) {
Eigen::Matrix3d deltaR = Eigen::Matrix3d(
Eigen::AngleAxisd(yaw * M_PI / 180.0, Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(0, Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(0, Eigen::Vector3d::UnitX()));
Eigen::Matrix4d deltaT = Eigen::Matrix4d::Identity();
deltaT.block<3, 3>(0, 0) = deltaR;
return deltaT;
}
bool ICPRegistrator::RegistrationByICP(const Eigen::Matrix4d &init_guess,
Eigen::Matrix4d &transform) {
pcl::KdTreeFLANN<pcl::PointXYZI> kdtree;
kdtree.setInputCloud(tgt_ngcloud_);
double cur_yaw = 0;
double min_error = CalculateICPError(kdtree, init_guess, cur_yaw);
double best_yaw = cur_yaw;
float degree_2_radian = 0.017453293;
int iter_cnt = 0;
double step = 5; // Resolution is 5°
int search_range = 10;
while (iter_cnt < 5) {
for (int delta = -search_range; delta < search_range; delta++) {
double yaw = cur_yaw + delta * step * degree_2_radian;
double error = CalculateICPError(kdtree, init_guess, yaw);
if (error < min_error) {
min_error = error;
best_yaw = yaw;
}
}
search_range = static_cast<int>(search_range / 2 + 0.5);
step /= 2;
cur_yaw = best_yaw;
iter_cnt++;
}
Eigen::Matrix4d T = GetDeltaT(best_yaw);
T = T * init_guess;
transform = T;
return true;
}
double ICPRegistrator::CalculateICPError(
const pcl::KdTreeFLANN<pcl::PointXYZI> &kdtree,
const Eigen::Matrix4d &init_guess, float cur_yaw) {
Eigen::Matrix4d T = GetDeltaT(cur_yaw) * init_guess;
pcl::PointCloud<pcl::PointXYZI> trans_cloud;
pcl::transformPointCloud(*src_ngcloud_, trans_cloud, T);
double dist_sum = 0;
for (size_t j = 0; j < trans_cloud.points.size(); j++) {
std::vector<int> indices;
std::vector<float> distances;
int k = 1;
pcl::PointXYZI point = trans_cloud.points[j];
int size = kdtree.nearestKSearch(point, k, indices, distances);
if (distances.size() > 0) {
dist_sum += distances[0];
} else {
LOGI("no nearest neighbors found");
}
}
return dist_sum;
}
bool ICPRegistrator::RegistrationByICP2(const Eigen::Matrix4d &init_guess,
Eigen::Matrix4d &refined_extrinsic) {
pcl::PointCloud<pcl::PointXYZI> trans_cloud;
pcl::transformPointCloud(*src_cloud_, trans_cloud, init_guess);
LOGI("compute normals of source points cloud.");
pcl::PointCloud<pcl::PointXYZINormal>::Ptr cloud_before_normal(
new pcl::PointCloud<pcl::PointXYZINormal>);
computeNormals(trans_cloud.makeShared(), cloud_before_normal);
LOGI("compute normals of target points cloud.");
pcl::PointCloud<pcl::PointXYZINormal>::Ptr cloud_tgt_normal(
new pcl::PointCloud<pcl::PointXYZINormal>);
computeNormals(tgt_cloud_, cloud_tgt_normal);
pcl::IterativeClosestPointWithNormals<pcl::PointXYZINormal,
pcl::PointXYZINormal>
icp;
icp.setInputSource(cloud_before_normal);
icp.setInputTarget(cloud_tgt_normal);
icp.setMaximumIterations(10);
// icp.setMaxCorrespondenceDistance(1.0); // 1.5m
icp.setMaxCorrespondenceDistance(0.3); // 1.5m
pcl::PointCloud<pcl::PointXYZINormal> cloud_out;
icp.align(cloud_out);
Eigen::Matrix4f transform = icp.getFinalTransformation();
refined_extrinsic = transform.cast<double>() * init_guess;
return true;
}
void ICPRegistrator::computeNormals(
const pcl::PointCloud<pcl::PointXYZI>::Ptr in_pts,
pcl::PointCloud<pcl::PointXYZINormal>::Ptr out_pts) {
pcl::NormalEstimation<pcl::PointXYZI, pcl::PointXYZINormal> norm_est;
pcl::search::KdTree<pcl::PointXYZI>::Ptr tree(
new pcl::search::KdTree<pcl::PointXYZI>());
norm_est.setSearchMethod(tree);
norm_est.setKSearch(40);
// norm_est.setRadiusSearch(5);
norm_est.setInputCloud(in_pts);
norm_est.compute(*out_pts);
LOGI("normal point cloud number: %d\n", out_pts->size());
for (int i = 0; i < out_pts->size(); ++i) {
(*out_pts)[i].x = (*in_pts)[i].x;
(*out_pts)[i].y = (*in_pts)[i].y;
(*out_pts)[i].z = (*in_pts)[i].z;
}
}
| 5,233 | 33.20915 | 77 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/octree_optimize/src/run_lidar2lidar.cpp | #include <chrono> // NOLINT
#include <iostream>
#include <pcl/common/transforms.h>
#include <thread> // NOLINT
#include <time.h>
#include "calibration.hpp"
unsigned char color_map[10][3] = {{255, 255, 255}, // "white"
{255, 0, 0}, // "red"
{0, 255, 0}, // "green"
{0, 0, 255}, // "blue"
{255, 255, 0}, // "yellow"
{255, 0, 255}, // "pink"
{50, 255, 255}, // "light-blue"
{135, 60, 0}, //
{150, 240, 80}, //
{80, 30, 180}}; //
void LoadPointCloud(
const std::string &filename,
std::map<int32_t, pcl::PointCloud<pcl::PointXYZI>> &lidar_points) {
std::ifstream file(filename);
if (!file.is_open()) {
std::cout << "[ERROR] open file " << filename << " failed." << std::endl;
exit(1);
}
std::string line, tmpStr;
while (getline(file, line)) {
int32_t device_id;
std::string point_cloud_path;
pcl::PointCloud<pcl::PointXYZI>::Ptr cloud(
new pcl::PointCloud<pcl::PointXYZI>);
std::stringstream ss(line);
ss >> tmpStr >> device_id;
getline(file, line);
ss = std::stringstream(line);
ss >> tmpStr >> point_cloud_path;
if (pcl::io::loadPCDFile(point_cloud_path, *cloud) < 0) {
std::cout << "[ERROR] cannot open pcd_file: " << point_cloud_path << "\n";
exit(1);
}
lidar_points.insert(std::make_pair(device_id, *cloud));
}
}
void LoadCalibFile(const std::string &filename,
std::map<int32_t, InitialExtrinsic> &calib_extrinsic) {
std::ifstream file(filename);
if (!file.is_open()) {
std::cout << "open file " << filename << " failed." << std::endl;
exit(1);
}
float degree_2_radian = 0.017453293;
std::string line, tmpStr;
while (getline(file, line)) {
int32_t device_id;
InitialExtrinsic extrinsic;
std::stringstream ss(line);
ss >> tmpStr >> device_id;
getline(file, line);
ss = std::stringstream(line);
ss >> tmpStr >> extrinsic.euler_angles[0] >> extrinsic.euler_angles[1] >>
extrinsic.euler_angles[2] >> extrinsic.t_matrix[0] >>
extrinsic.t_matrix[1] >> extrinsic.t_matrix[2];
extrinsic.euler_angles[0] = extrinsic.euler_angles[0] * degree_2_radian;
extrinsic.euler_angles[1] = extrinsic.euler_angles[1] * degree_2_radian;
extrinsic.euler_angles[2] = extrinsic.euler_angles[2] * degree_2_radian;
calib_extrinsic.insert(std::make_pair(device_id, extrinsic));
}
}
int main(int argc, char *argv[]) {
if (argc != 3) {
std::cout << "Usage: ./run_lidar2lidar <lidar_file> <calib_file>"
"\nexample:\n\t"
"./bin/run_lidar2lidar data/0001/lidar_cloud_path.txt "
"data/0001/initial_extrinsic.txt"
<< std::endl;
return 0;
}
auto lidar_file = argv[1];
auto calib_file = argv[2];
std::map<int32_t, pcl::PointCloud<pcl::PointXYZI>> lidar_points;
LoadPointCloud(lidar_file, lidar_points);
std::map<int32_t, InitialExtrinsic> extrinsics;
LoadCalibFile(calib_file, extrinsics);
// calibration
Calibrator calibrator;
calibrator.LoadCalibrationData(lidar_points, extrinsics);
auto time_begin = std::chrono::steady_clock::now();
calibrator.Calibrate();
auto time_end = std::chrono::steady_clock::now();
std::cout << "calib cost "
<< std::chrono::duration<double>(time_end - time_begin).count()
<< "s" << std::endl;
std::map<int32_t, Eigen::Matrix4d> refined_extrinsics =
calibrator.GetFinalTransformation();
// stitching
pcl::PointCloud<pcl::PointXYZRGB>::Ptr all_cloud(
new pcl::PointCloud<pcl::PointXYZRGB>);
auto master_iter = lidar_points.find(0);
pcl::PointCloud<pcl::PointXYZI> master_pc = master_iter->second;
for (auto src : master_pc.points) {
int32_t master_id = 0;
pcl::PointXYZRGB point;
point.x = src.x;
point.y = src.y;
point.z = src.z;
point.r = color_map[master_id % 7][0];
point.g = color_map[master_id % 7][1];
point.b = color_map[master_id % 7][2];
all_cloud->push_back(point);
}
for (auto iter = refined_extrinsics.begin(); iter != refined_extrinsics.end();
iter++) {
int32_t slave_id = iter->first;
Eigen::Matrix4d transform = iter->second;
auto slave_iter = lidar_points.find(slave_id);
pcl::PointCloud<pcl::PointXYZI> slave_pc = slave_iter->second;
pcl::PointCloud<pcl::PointXYZI> trans_cloud;
pcl::transformPointCloud(slave_pc, trans_cloud, transform);
for (auto src : trans_cloud.points) {
pcl::PointXYZRGB point;
point.x = src.x;
point.y = src.y;
point.z = src.z;
point.r = color_map[slave_id % 7][0];
point.g = color_map[slave_id % 7][1];
point.b = color_map[slave_id % 7][2];
all_cloud->push_back(point);
}
}
all_cloud->height = 1;
all_cloud->width = all_cloud->points.size();
std::string path = "stitching.pcd";
pcl::io::savePCDFileBinary(path, *all_cloud);
return 0;
}
| 5,193 | 34.575342 | 80 | cpp |
null | LOCA-main/README.md | # Learning Operators with Coupled Attention

Code and data accompanying the manuscript titled "Learning Operators with Coupled Attention", authored by Georgios Kissas*, Jacob H. Seidman*, Leonardo Ferreira Guilhoto, Victor M. Preciado, George J.Pappas and Paris Perdikaris.
\* These authors contributed equally.
# Abstract
Supervised operator learning is an emerging machine learning paradigm with applications to modeling the evolution maps of spatio-temporal dynamical systems and approximating general black-box relationships between functional data. We propose a novel operator learning method, LOCA (Learning Operators with Coupled Attention), motivated from the attention mechanism. The input functions are mapped to a finite set of features which are then averaged with attention weights that depend on the output query locations. By coupling these attention weights together with an integral transform, LOCA is able explicitly learn correlations in the target output functions, enabling us to approximate nonlinear operators even when the number of output function measurements is very small. Our formulation is accompanied by rigorous approximation theoretic guarantees on the expressiveness of the proposed model. Empirically, we evaluate the performance of LOCA on several operator learning scenarios involving systems governed by ordinary and partial differential equations, as well as a black-box climate prediction problem. Through these scenarios we demonstrate state of the art accuracy, robustness with respect to noisy input data, and a consistently small spread of errors over testing data sets, even for out-of-distribution prediction tasks.
# Citation
@article{JMLR:v23:21-1521,
author = {Georgios Kissas and Jacob H. Seidman and Leonardo Ferreira Guilhoto and Victor M. Preciado and George J. Pappas and Paris Perdikaris},
title = {Learning Operators with Coupled Attention},
journal = {Journal of Machine Learning Research},
year = {2022},
volume = {23},
number = {215},
pages = {1--63},
url = {http://jmlr.org/papers/v23/21-1521.html}
}
The repository contains all the necassary code and data to reproduce the results in the paper.
You can find a LOCA tutorial with explanation for the Darcy flow example [here](https://colab.research.google.com/drive/1axxLGhgwipCSw9WQVMBklvQdW_K99E1D?usp=sharing).
The training and testing data sets accompanying the manuscript can be found [here](https://drive.google.com/uc?export=download&id=1Mv6fegJ_sk-9oiroR2AtHWwbmYV6IYbv) and the codes to plot the results as well as the data to reproduce the figures in the manuscript can be found [here](https://drive.google.com/uc?export=download&id=1M94UmzTAU9scMmEFSV83lm2_sdhAHIzx).
You can find the codes for LOCA, DeepONet and FNO used for each example in this paper under the respective folder names.
## ⚠️ The LOCA methodology and code cannot be used for commercial purposes (protected by a patent at the University of Pennsylvania).⚠️
| 3,176 | 78.425 | 1,338 | md |
null | LOCA-main/Antiderivative/DeepONet/DeepONet_Antiderivative.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy.polynomial import polyutils
from jax.experimental.stax import Dense, Gelu
from jax.experimental import stax
import os
from scipy.integrate import solve_ivp
import timeit
from jax.experimental import optimizers
from absl import app
import jax
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, vjp
from functools import partial
from torch.utils import data
from tqdm import trange
import itertools
import scipy.signal as signal
from kymatio.numpy import Scattering1D
from jax.experimental.ode import odeint
from jax.config import config
from numpy.polynomial.legendre import leggauss
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
class PositionalEncodingU:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(seed), self.in_shape)
params = (trunk_params, branch_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-1):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=1):
trunk_params, branch_params = params
inputsxu, inputsy = inputs
t = self.trunk_apply(trunk_params, inputsy).reshape(inputsy.shape[0], inputsy.shape[1], ds, int(100/ds))
b = self.branch_apply(branch_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((y.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
# Define RBF kernel
def RBF(x1, x2, params):
output_scale, lengthscales = params
diffs = jnp.expand_dims(x1 / lengthscales, 1) - \
jnp.expand_dims(x2 / lengthscales, 0)
r2 = jnp.sum(diffs**2, axis=2)
return output_scale * jnp.exp(-0.5 * r2)
# Geneate training data corresponding to one input sample
def generate_one_training_data(key, m=100, P=1):
# Sample GP prior at a fine grid
N = 512
length_scale = 0.9
gp_params = (1.0, length_scale)
# key1, key2 = random.split(key,num=2)
# z = random.uniform(key1, minval=-2, maxval=2)
# output_scale = 10**z
# z = random.uniform(key2, minval=-2, maxval=0)
# length_scale = 10**z
# gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Ijnput sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y = jnp.linspace(0, 1, P)
s = odeint(u_fn, 0.0, y)
return u, y, s
# Geneate test data corresponding to one input sample
def generate_one_test_data(key, m=100, P=100):
# Sample GP prior at a fine grid
N = 512
length_scale = 0.1
gp_params = (1.0, length_scale)
# key1, key2 = random.split(key,num=2)
# z = random.uniform(key1, minval=-2, maxval=2)
# output_scale = 10**z
# z = random.uniform(key2, minval=-2, maxval=0)
# length_scale = 10**z
# gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Input sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y = jnp.linspace(0, 1, P)
s = odeint(u_fn, 0.0, y)
return u, y, s
# Geneate training data corresponding to N input sample
def generate_training_data(key, N, m, P):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_training_data(key, m, P))
u_train, y_train, s_train = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u_train, y_train, s_train
# Geneate test data corresponding to N input sample
def generate_test_data(key, N, m, P):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_test_data(key, m, P))
u, y, s = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u, y, s
TRAINING_ITERATIONS = 50000
P = 100
m = 1000
num_train = 1000
num_test = 1000
training_batch_size = 100
du = 1
dy = 1
ds = 1
n_hat = 100
Nx = P
index = 9
length_scale = 0.9
H_y = 2
H_u = 2
# Create the dataset
key_train = random.PRNGKey(0)
U_train, y_train, s_train = generate_training_data(key_train, num_train, m, Nx)
key_test = random.PRNGKey(12345)
U_test, y_test, s_test = generate_test_data(key_test, num_test, m, Nx)
# Make all array to be jax numpy format
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_train = jnp.asarray(U_train)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
U_test = jnp.asarray(U_test)
U_train = jnp.reshape(U_train,(num_test,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
s_train_mean = jnp.mean(s_train,axis=0)
s_train_std = jnp.std(s_train,axis=0) + 1e-03
s_train = (s_train - s_train_mean)/s_train_std
# Perform the scattering transform for the inputs yh
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
branch_layers = [m*(du*H_u+du), 512, 512, ds*n_hat]
trunk_layers = [H_y*dy + dy, 512, 512, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predictT(params, (U_test, y_test))
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s_test[i,:,0]- uCNN_test[i,:,0],2)/norm(s_test[i,:,0],2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (U_train, y_train))
train_error_u = []
for i in range(0,num_test):
train_error_u.append(norm(s_train[i,:,0]- uCNN_train[i,:,0],2)/norm(s_train[i,:,0],2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
np.savez_compressed("/scratch/gkissas/Antiderivative/DON/Antiderivative_test_P%d_m%d_ls%f_id%d_DON.npz"%(P,m,length_scale,index), uCNN_super_all_test=uCNN_test, U_test=U_test, s_all_test=s_test, test_error=test_error_u) | 15,651 | 35.484848 | 219 | py |
null | LOCA-main/Antiderivative/FNO/FNOAntiderivative.py | """
@author: Zongyi Li
This file is the Fourier Neural Operator for 1D problem such as the (time-independent) Burgers equation discussed in Section 5.1 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import matplotlib.pyplot as plt
import operator
from functools import reduce
from functools import partial
from timeit import default_timer
from utilities3 import *
from jax import random, vmap, jit
import jax.numpy as jnp
from jax.experimental.ode import odeint
from jax.config import config
import argparse
import os
seed = np.random.randint(10000)
torch.manual_seed(seed)
np.random.seed(seed)
################################################################
# 1d fourier layer
################################################################
class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes1):
super(SpectralConv1d, self).__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.scale = (1 / (in_channels*out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat))
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)
out_ft[:, :, :self.modes1] = self.compl_mul1d(x_ft[:, :, :self.modes1], self.weights1)
#Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
class FNO1d(nn.Module):
def __init__(self, modes, width):
super(FNO1d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.padding = 2 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 2, 1)
x = F.pad(x, [0,self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x[..., :-self.padding] # pad the domain if input is non-periodic
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x = shape[0], shape[1]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1).repeat([batchsize, 1, 1])
return gridx.to(device)
# Define RBF kernel
def RBF(x1, x2, params):
output_scale, lengthscales = params
diffs = jnp.expand_dims(x1 / lengthscales, 1) - \
jnp.expand_dims(x2 / lengthscales, 0)
r2 = jnp.sum(diffs**2, axis=2)
return output_scale * jnp.exp(-0.5 * r2)
# Geneate training data corresponding to one input sample
def generate_one_training_data(key, m=100, P=1, ls=1):
# Sample GP prior at a fine grid
N = 512
# length_scale = ls
# gp_params = (1.0, length_scale)
key1, key2 = random.split(key,num=2)
z = random.uniform(key1, minval=-2, maxval=2)
output_scale = 10**z
z = random.uniform(key2, minval=-2, maxval=0)
length_scale = 10**z
gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Ijnput sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y = jnp.linspace(0, 1, P)
s = odeint(u_fn, 0.0, y)
return u, y, s
# Geneate test data corresponding to one input sample
def generate_one_test_data(key, m=100, P=100, ls =0.1):
# Sample GP prior at a fine grid
N = 512
# length_scale = ls
# gp_params = (1.0, length_scale)
key1, key2 = random.split(key,num=2)
z = random.uniform(key1, minval=-2, maxval=2)
output_scale = 10**z
z = random.uniform(key2, minval=-2, maxval=0)
length_scale = 10**z
gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Input sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y = jnp.linspace(0, 1, P)
s = odeint(u_fn, 0.0, y)
return u, y, s
# Geneate training data corresponding to N input sample
def generate_training_data(key, N, m, P, ls):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_training_data(key, m, P, ls))
u_train, y_train, s_train = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u_train, y_train, s_train
# Geneate test data corresponding to N input sample
def generate_test_data(key, N, m, P, ls):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_test_data(key, m, P, ls))
u, y, s = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u, y, s
################################################################
# configurations
################################################################
def main(l,id):
ntrain = 1000
ntest = 1000
m = 1000
Nx = 1000
h = 1000
s = h
batch_size = 100
learning_rate = 0.001
epochs = 500
step_size = 100
gamma = 0.5
modes = 32
width = 100
length_scale = int(l)
ind = id
P = 100
################################################################
# read data
################################################################
# Data is of the shape (number of samples, grid size)
print('The lengthscale is %.2f'%(0.1*l))
key_train = random.PRNGKey(0)
U_train, y_train, s_train = generate_training_data(key_train, ntrain, m, Nx, 0.1*l)
key_test = random.PRNGKey(12345)
U_test, y_test, s_test = generate_test_data(key_test, ntest, m, Nx, 0.1)
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
x_train = torch.from_numpy(np.asarray(U_train)).type(dtype_double).reshape(ntrain,s,1)
y_train = torch.from_numpy(np.asarray(s_train)).type(dtype_double).reshape(ntrain,s,1)
x_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double).reshape(ntrain,s,1)
y_test = torch.from_numpy(np.asarray(s_test)).type(dtype_double).reshape(ntrain,s,1)
ind_train = torch.randint(s, (ntrain, P))
ind_test = torch.randint(s, (ntest, P))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train, ind_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test, ind_test), batch_size=batch_size, shuffle=True)
################################################################
# training and evaluation
################################################################
batch_ind = torch.arange(batch_size).reshape(-1, 1).repeat(1, P)
# model
model = FNO1d(modes, width).cuda()
print(count_params(model))
################################################################
# training and evaluation
################################################################
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_mse = 0
train_l2 = 0
for x, y, idx in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
l2 = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
# l2.backward()
mse = F.mse_loss(out.view(batch_size, -1), y.view(batch_size, -1), reduction='mean')
mse.backward()
optimizer.step()
train_mse += mse.item()
train_l2 += l2.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y, idx in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_mse /= len(train_loader)
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_mse, train_l2, test_l2)
x_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double).reshape(ntrain,s,1)
y_test = torch.from_numpy(np.asarray(s_test)).type(dtype_double).reshape(ntrain,s,1)
pred_torch = torch.zeros(y_test.shape)
baseline_torch = torch.zeros(y_test.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=1, shuffle=False)
test_error_u = []
test_error_u_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y = x.cuda(), y.cuda()
out = model(x)
pred_torch[index] = out
baseline_torch[index,:,:] = y[:,:,:]
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_u_np.append(np.linalg.norm(y.view(-1).cpu().numpy()- out.view(-1).cpu().numpy(),2)/np.linalg.norm(y.view(-1).cpu().numpy(),2))
# print(index, test_l2)
index = index + 1
print("The average test u error (no noise) is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
print("The average test u error (no noise) is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np)))
# in_noise_test = 0.05*np.random.normal(loc=0.0, scale=1.0, size=(U_test.shape))
# U_test = U_test + in_noise_test
x_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double).reshape(ntrain,s,1)
y_test = torch.from_numpy(np.asarray(s_test)).type(dtype_double).reshape(ntrain,s,1)
pred_torch = torch.zeros(y_test.shape)
baseline_torch = torch.zeros(y_test.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=1, shuffle=False)
test_error_u = []
test_error_u_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y = x.cuda(), y.cuda()
out = model(x)
pred_torch[index] = out
baseline_torch[index,:,:] = y[:,:,:]
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_u_np.append(np.linalg.norm(y.view(-1).cpu().numpy()- out.view(-1).cpu().numpy(),2)/np.linalg.norm(y.view(-1).cpu().numpy(),2))
# print(index, test_l2)
index = index + 1
print("The average test u error (noise) is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
print("The average test u error (noise) is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process model parameters.')
parser.add_argument('l', metavar='l', type=int, nargs='+', help='Lenghtscale of test dataset')
parser.add_argument('id', metavar='id', type=int, nargs='+', help='Index of the run')
args = parser.parse_args()
l = args.l[0]
id = args.id[0]
main(l,id) | 14,906 | 36.360902 | 226 | py |
null | LOCA-main/Antiderivative/FNO/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
from functools import partial
import os
#################################################
#
# Utilities
#
#################################################
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# Sobolev norm (HS norm)
# where we also compare the numerical derivatives between the output and target
class HsLoss(object):
def __init__(self, d=2, p=2, k=1, a=None, group=False, size_average=True, reduction=True):
super(HsLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.k = k
self.balanced = group
self.reduction = reduction
self.size_average = size_average
if a == None:
a = [1,] * k
self.a = a
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y, a=None):
nx = x.size()[1]
ny = x.size()[2]
k = self.k
balanced = self.balanced
a = self.a
x = x.view(x.shape[0], nx, ny, -1)
y = y.view(y.shape[0], nx, ny, -1)
k_x = torch.cat((torch.arange(start=0, end=nx//2, step=1),torch.arange(start=-nx//2, end=0, step=1)), 0).reshape(nx,1).repeat(1,ny)
k_y = torch.cat((torch.arange(start=0, end=ny//2, step=1),torch.arange(start=-ny//2, end=0, step=1)), 0).reshape(1,ny).repeat(nx,1)
k_x = torch.abs(k_x).reshape(1,nx,ny,1).to(x.device)
k_y = torch.abs(k_y).reshape(1,nx,ny,1).to(x.device)
x = torch.fft.fftn(x, dim=[1, 2])
y = torch.fft.fftn(y, dim=[1, 2])
if balanced==False:
weight = 1
if k >= 1:
weight += a[0]**2 * (k_x**2 + k_y**2)
if k >= 2:
weight += a[1]**2 * (k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
weight = torch.sqrt(weight)
loss = self.rel(x*weight, y*weight)
else:
loss = self.rel(x, y)
if k >= 1:
weight = a[0] * torch.sqrt(k_x**2 + k_y**2)
loss += self.rel(x*weight, y*weight)
if k >= 2:
weight = a[1] * torch.sqrt(k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
loss += self.rel(x*weight, y*weight)
loss = loss / (k+1)
return loss
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c
| 9,157 | 27.798742 | 139 | py |
null | LOCA-main/Antiderivative/LOCA/LOCAAntiderivative.py | import jax
import jax.numpy as jnp
from jax.example_libraries.stax import Dense, Gelu
from jax.example_libraries import stax
from jax.example_libraries import optimizers
from jax.example_libraries.ode import odeint
from jax.config import config
import timeit
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit
from functools import partial
from torch.utils import data
from jax.flatten_util import ravel_pytree
from tqdm import trange
import itertools
from kymatio.numpy import Scattering1D
from numpy.polynomial.legendre import leggauss
import os
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def euclid_distance(x,y):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxu, y, s, z, w,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxu = inputsxu
self.y = y
self.s = s
self.z = z
self.w = w
self.N = inputsxu.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxu[idx,:,:]
y = self.y[idx,:,:]
z = self.z[idx,:,:]
w = self.w[idx,:,:]
inputs = (inputsxu, y, z, w)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
def scatteringTransform(sig, l=100, m=100, training_batch_size = 100):
J = 4
Q = 8
T = sig.shape[1]
scattering = Scattering1D(J, T, Q)
sig = np.asarray(sig)
sctcoef = np.zeros((training_batch_size, 1550, 1))
for i in range(0,training_batch_size):
sctcoef[i,:,:] = scattering(sig[i,:,0]).flatten()[:,None]
return sctcoef
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def rel(self, x, y):
num_examples = x.shape[0]
diff_norms = jnp.linalg.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = jnp.linalg.norm(y.reshape(num_examples,-1), self.p, 1)
return jnp.mean(diff_norms/y_norms)
def __call__(self, x, y):
return self.rel(x, y)
class LOCA:
def __init__(self, q_layers, g_layers, v_layers, jac_det=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.q_init, self.q_apply = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, q_params = self.q_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(seed), self.in_shape)
# RBF kernel parameters
beta = [1.]
gamma = [1.]
# Model parameters
params = (beta, gamma,q_params, g_params, v_params)
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
self.itercount = itertools.count()
self.loss_log = []
self.l2loss = LpLoss(size_average=False)
self.jac_det = jac_det
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
def LOCA_net(self, params, inputs, ds=1):
beta, gamma, q_params, g_params, v_params = params
inputsxu, inputsy, inputsz, w = inputs
inputsy = self.q_apply(q_params,inputsy)
inputsz = self.q_apply(q_params,inputsz)
d = self.vdistance_function(inputsz, inputsz)
K = beta[0]*jnp.exp(-gamma[0]*d)
Kzz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
d = self.vdistance_function(inputsy, inputsz)
K = beta[0]*jnp.exp(-gamma[0]*d)
Kyz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params, inputsz)
g = self.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
v = self.v_apply(v_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
v = v.reshape(v.shape[0],int(v.shape[2]/ds),ds)
attn_vec = jnp.einsum("ijkl,ilk->ijk", g,v)
return attn_vec
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.loss(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2error(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def count_params(self, params):
params_flat, _ = ravel_pytree(params)
print("The number of model parameters is:",params_flat.shape[0])
# Define RBF kernel
def RBF(x1, x2, params):
output_scale, lengthscales = params
diffs = jnp.expand_dims(x1 / lengthscales, 1) - \
jnp.expand_dims(x2 / lengthscales, 0)
r2 = jnp.sum(diffs**2, axis=2)
return output_scale * jnp.exp(-0.5 * r2)
# Geneate training data corresponding to one input sample
def generate_one_training_data(key, m=100, P=1):
# Sample GP prior at a fine grid
N = 512
length_scale = 0.3
gp_params = (1.0, length_scale)
key1= random.split(key,num=2)
z = random.uniform(key1[0], minval=-2, maxval=2)
output_scale = 10**z
z = random.uniform(key1[1], minval=-2, maxval=0)
length_scale = 10**z
gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Ijnput sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y_train = random.uniform(key, (P,)).sort()
s_train = odeint(u_fn, 0.0, jnp.hstack((0.0, y_train)))[1:] # JAX has a bug and always returns s(0), so add a dummy entry to y and return s[1:]
return u, y_train, s_train, length_scale
# Geneate test data corresponding to one input sample
def generate_one_test_data(key, m=100, P=100):
# Sample GP prior at a fine grid
N = 512
length_scale = 0.3
gp_params = (1.0, length_scale)
key1, key2 = random.split(key,num=2)
z = random.uniform(key1, minval=-2, maxval=2)
output_scale = 10**z
z = random.uniform(key2, minval=-2, maxval=0)
length_scale = 10**z
gp_params = (output_scale, length_scale)
jitter = 1e-10
X = jnp.linspace(0, 1, N)[:,None]
K = RBF(X, X, gp_params)
L = jnp.linalg.cholesky(K + jitter*jnp.eye(N))
gp_sample = jnp.dot(L, random.normal(key, (N,)))
# Create a callable interpolation function
u_fn = lambda x, t: jnp.interp(t, X.flatten(), gp_sample)
# Input sensor locations and measurements
x = jnp.linspace(0, 1, m)
u = vmap(u_fn, in_axes=(None,0))(0.0, x)
# Output sensor locations and measurements
y = jnp.linspace(0, 1, P)
s = odeint(u_fn, 0.0, y)
return u, y, s, length_scale
# Geneate training data corresponding to N input sample
def generate_training_data(key, N, m, P):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_training_data(key, m, P))
u_train, y_train, s_train, l_train = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u_train, y_train, s_train, l_train
# Geneate test data corresponding to N input sample
def generate_test_data(key, N, m, P):
config.update("jax_enable_x64", True)
keys = random.split(key, N)
gen_fn = jit(lambda key: generate_one_test_data(key, m, P))
u, y, s, l = vmap(gen_fn)(keys)
config.update("jax_enable_x64", False)
return u, y, s, l
TRAINING_ITERATIONS = 50000
P = 100
m = 500
L = 1
T = 1
N_hat = 1
num_train = 1000
num_test = 1000
training_batch_size = 100
du = 1
dy = 1
ds = 1
n_hat = 100
l = 100
Nx = P
H = 10
index = 0
length_scale = 1.1
# Number of GLL quadrature points, coordinates and weights
polypoints = 100
z, w = leggauss(polypoints)
lb = np.array([0.0])
ub = np.array([1.0])
# Map [-1,1] -> [0,1]
z = 0.5*(ub - lb)*(z + 1.0) + lb
jac_det = 0.5*(ub-lb)
# Reshape both weights and coordinates. We need them to have shape: (num_train, N, dy)
z = np.tile(np.expand_dims(z,0),(num_train,1))[:,:,None]
w = np.tile(np.expand_dims(w,0),(num_train,1))[:,:,None]
# Create the dataset
key_train = random.PRNGKey(0)
U_train, y_train, s_train, l_train = generate_training_data(key_train, num_train, m, Nx)
key_test = random.PRNGKey(12345)
U_test, y_test, s_test, l_test = generate_test_data(key_test, num_test, m, Nx)
# Make all array to be jax numpy format
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
z = jnp.asarray(z)
w = jnp.asarray(w)
U_train = np.reshape(U_train,(num_test,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
z = jnp.reshape(z,(num_test,polypoints,dy))
w = jnp.reshape(w,(num_test,polypoints,dy))
plot=False
if plot == True:
import matplotlib.pyplot as plt
pltN = 10
for i in range(0,pltN-1):
plt.plot(y_train[i,:,0], s_train[i,:,0], 'r-')
plt.plot(y_test[i,:,0], s_test[i,:,0], 'b-')
plt.plot(y_train[pltN,:,0], s_train[pltN,:,0], 'r-', label="Training output")
plt.plot(y_test[pltN,:,0], s_test[pltN,:,0], 'b-', label="Testing output")
plt.legend()
plt.show()
x = jnp.linspace(0,1,num=m)
pltN = 10
for i in range(0,pltN-1):
plt.plot(x, np.asarray(U_train)[i,:,0], 'y-')
plt.plot(x, np.asarray(U_test)[i,:,0], 'g-')
plt.plot(x, np.asarray(U_train)[pltN,:,0], 'y-', label="Training input")
plt.plot(x, np.asarray(U_test)[pltN,:,0], 'g-', label="Testing input")
plt.legend()
plt.show()
# Positionally encode y and z
y_train_pos = y_train
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
start_time = timeit.default_timer()
inputs_trainxu = jnp.asarray(scatteringTransform(U_train, l=l, m=m, training_batch_size=num_train))
inputs_testxu = jnp.asarray(scatteringTransform(U_test , l=l, m=m, training_batch_size=num_test))
elapsed = timeit.default_timer() - start_time
print("The wall-clock time for for loop is seconds is equal to %f seconds"%elapsed)
print(inputs_trainxu.shape, inputs_testxu.shape)
train_dataset = DataGenerator(inputs_trainxu, y_train, s_train, z, w, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, y_test, s_test, z, w, training_batch_size)
test_dataset = iter(test_dataset)
q_layers = [L*dy+H*dy, 100, 100, l]
v_layers = [1550*du, 500, ds*n_hat]
g_layers = [l, 100, 100, ds*n_hat]
model = LOCA(q_layers, g_layers, v_layers, jac_det=jac_det)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predict(params, (inputs_testxu,y_test, z, w))
test_error_u = []
for i in range(0,s_test.shape[0]):
test_error_u.append(jnp.linalg.norm(s_test[i,:,-1] - uCNN_test[i,:,-1], 2)/jnp.linalg.norm(s_test[i,:,-1], 2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (inputs_trainxu, y_train, z, w))
train_error_u = []
for i in range(0,s_test.shape[0]):
train_error_u.append(jnp.linalg.norm(s_train[i,:,-1] - uCNN_train[i,:,-1], 2)/jnp.linalg.norm(s_train[i,:,-1], 2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
| 16,855 | 34.711864 | 204 | py |
null | LOCA-main/Climate_Modeling/DeepONet/DeepONet_Weatherg.py | from jax.core import as_named_shape
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
from jax.example_libraries.stax import Dense, Gelu, Relu
from jax.example_libraries import stax
import os
import timeit
from jax.example_libraries import optimizers
from absl import app
from jax import vjp
import jax
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, jit
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import sqrt
import itertools
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
def output_construction(s,Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100):
s = s.reshape(Nx,Ny)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
Y_all = np.hstack([x[:, None], y[:,None]]) * [1./(Nx - 1), 1./(Ny - 1)]
s_all = s[x][range(P), y][:, None]
return s_all, Y_all
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
X1 = jnp.take(self.Y, 0, axis=2)[:,:,None]
X2 = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionX1 = jnp.tile(X1,(1,1,self.H))
positionX2 = jnp.tile(X2,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX1[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX1[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionX2[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionX2[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class PositionalEncodingU:
def __init__(self, U, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/2)*2)
self.U = U
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
peu = np.zeros((x.shape[0], self.max_len, self.H))
U = jnp.take(self.U, 0, axis=2)[:,:,None]
positionU = jnp.tile(U,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
peu = jax.ops.index_update(peu, jax.ops.index[:,:,0::2], jnp.cos(positionU[:,:,0::2] * div_term))
peu = jax.ops.index_update(peu, jax.ops.index[:,:,1::2], jnp.sin(positionU[:,:,1::2] * div_term))
x = jnp.concatenate([x, peu], -1)
return x
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(10000), self.in_shape)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(10000), self.in_shape)
params = (trunk_params, branch_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.95))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=1):
trunk_params, branch_params = params
inputsxu, inputsy = inputs
t = self.trunk_apply(trunk_params, inputsy).reshape(inputsy.shape[0], inputsy.shape[1], ds, int(100/ds))
b = self.branch_apply(branch_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((y.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
def predict_function(U_in,Y_in, model=None, params= None, H=10):
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(U_in.shape[0],1,1))
inputs_trainxu = jnp.asarray(U_in)
pos_encodingy = PositionalEncodingY(y,int(y.shape[1]*y.shape[2]), max_len = Y_in.shape[0], H=H)
y = pos_encodingy.forward(y)
del pos_encodingy
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000, Nx=32, Ny=32):
test_error_u = []
z = uCNN_super_all.reshape(num_train,Nx,Ny)
s = s_all.reshape(num_train,Nx,Ny)
s = np.swapaxes(s,1,2)
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u), test_error_u
def minmax(a, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
def main(_):
TRAINING_ITERATIONS = 100000
P = 144
m = int(72*72)
num_train = 1825
num_test = 1825
training_batch_size = 100
du = 1
dy = 2
ds = 1
n_hat = 100
Nx = 72
Ny = 72
H_y = 10
H_u = 10
d = np.load("../Data/weather_dataset.npz")
u_train = d["U_train"][:num_train,:]
S_train = d["S_train"][:num_train,:]/1000.
Y_train = d["Y_train"]
d = np.load("../Data/weather_dataset.npz")
u_test = d["U_train"][-num_test:,:]
S_test = d["S_train"][-num_test:,:]/1000.
Y_test = d["Y_train"]
Y_train_in = Y_train
Y_test_in = Y_test
s_all_test = S_test
s_all_train = S_train
s_train = np.zeros((num_train,P,ds))
y_train = np.zeros((num_train,P,dy))
U_train = np.zeros((num_train,m,du))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
for i in range(0,num_train):
s_train[i,:,:], y_train[i,:,:] = output_construction(S_train[i,:], Y_train, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_train[i,:,:] = u_train[i,:][:,None]
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:], Y_test, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_test[i,:,:] = u_test[i,:][:,None]
U_train = jnp.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_test = jnp.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
U_train = jnp.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
print(U_test[0,0:20,:])
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
s_train_mean = jnp.mean(s_train,axis=0)
s_train_std = jnp.std(s_train,axis=0)
s_train = (s_train - s_train_mean)/s_train_std
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
branch_layers = [m*(du*H_u+du), 100, 100, 100, 100, ds*n_hat]
trunk_layers = [H_y*dy + dy, 100, 100, 100, 100, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
print("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(s_all_test).reshape(num_test, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_test[:,idx,:], _, _ = predict_function(U_test , Y_test_in[idx,:], model=model, params=params, H=H_y)
uCNN_super_all_train = np.zeros_like(s_all_train).reshape(num_train, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_train[:,idx,:], _, _ = predict_function(U_train , Y_train_in[idx,:], model=model, params=params, H=H_y)
absolute_error_test, mean_test_error, test_error = error_full_resolution(uCNN_super_all_test, s_all_test, tag='test', num_train=num_train,Nx=Nx, Ny=Ny)
absolute_error_train, mean_train_error, train_error = error_full_resolution(uCNN_super_all_train, s_all_train, tag='train',num_train=num_test ,Nx=Nx, Ny=Ny)
print(np.max(absolute_error_test), np.max(absolute_error_train))
np.savez_compressed("Error_Weather_DeepONet_P%d"%(P), test_error = test_error)
if __name__ == '__main__':
app.run(main) | 15,510 | 36.831707 | 238 | py |
null | LOCA-main/Climate_Modeling/FNO/Adam.py | import math
import torch
from torch import Tensor
from typing import List, Optional
from torch.optim.optimizer import Optimizer
import os
os.environ['CUDA_VISIBLE_DEVICES']="3"
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss | 6,612 | 39.078788 | 120 | py |
null | LOCA-main/Climate_Modeling/FNO/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
from functools import partial
import os
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# Sobolev norm (HS norm)
# where we also compare the numerical derivatives between the output and target
class HsLoss(object):
def __init__(self, d=2, p=2, k=1, a=None, group=False, size_average=True, reduction=True):
super(HsLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.k = k
self.balanced = group
self.reduction = reduction
self.size_average = size_average
if a == None:
a = [1,] * k
self.a = a
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y, a=None):
nx = x.size()[1]
ny = x.size()[2]
k = self.k
balanced = self.balanced
a = self.a
x = x.view(x.shape[0], nx, ny, -1)
y = y.view(y.shape[0], nx, ny, -1)
k_x = torch.cat((torch.arange(start=0, end=nx//2, step=1),torch.arange(start=-nx//2, end=0, step=1)), 0).reshape(nx,1).repeat(1,ny)
k_y = torch.cat((torch.arange(start=0, end=ny//2, step=1),torch.arange(start=-ny//2, end=0, step=1)), 0).reshape(1,ny).repeat(nx,1)
k_x = torch.abs(k_x).reshape(1,nx,ny,1).to(x.device)
k_y = torch.abs(k_y).reshape(1,nx,ny,1).to(x.device)
x = torch.fft.fftn(x, dim=[1, 2])
y = torch.fft.fftn(y, dim=[1, 2])
if balanced==False:
weight = 1
if k >= 1:
weight += a[0]**2 * (k_x**2 + k_y**2)
if k >= 2:
weight += a[1]**2 * (k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
weight = torch.sqrt(weight)
loss = self.rel(x*weight, y*weight)
else:
loss = self.rel(x, y)
if k >= 1:
weight = a[0] * torch.sqrt(k_x**2 + k_y**2)
loss += self.rel(x*weight, y*weight)
if k >= 2:
weight = a[1] * torch.sqrt(k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
loss += self.rel(x*weight, y*weight)
loss = loss / (k+1)
return loss
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c | 9,154 | 28.063492 | 139 | py |
null | LOCA-main/Climate_Modeling/FNO/weather_FNO.py | """
@author: Zongyi Li
This file is the Fourier Neural Operator for 2D problem such as the Darcy Flow discussed in Section 5.2 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).
"""
import numpy as np
from numpy.linalg import norm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import matplotlib.pyplot as plt
import operator
from functools import reduce
from functools import partial
from timeit import default_timer
from utilities3 import *
torch.manual_seed(0)
np.random.seed(0)
import timeit
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class FNO2d(nn.Module):
def __init__(self, modes1, modes2, width):
super(FNO2d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the coefficient function and locations (a(x, y), x, y)
input shape: (batchsize, x=s, y=s, c=3)
output: the solution
output shape: (batchsize, x=s, y=s, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.fc0 = nn.Linear(3, self.width) # input channel is 3: (a(x, y), x, y)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
batchsize = x.shape[0]
size_x, size_y = x.shape[1], x.shape[2]
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
x1 = self.conv0(x)
x2 = self.w0(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
################################################################
# configs
################################################################
ntrain = 1825
ntest = 1825
batch_size = 73
learning_rate = 0.001
epochs = 400
step_size = 100
gamma = 0.5
modes = 12
width = 32
r = 1
Nx = 72
Ny = 72
h = Nx
s = h
P = 144
################################################################
# load data and data normalization
################################################################
d = np.load("../Data/weather_dataset.npz")
U_train = d["U_train"][:ntrain,:].reshape(ntrain,Nx,Ny)
S_train = d["S_train"][:ntrain,:].reshape(ntrain,Nx,Ny)/1000.
CX = d["X_train"]
CY = d["Y_train"]
d = np.load("../Data/weather_dataset.npz")
U_test = d["U_train"][ntest:,:].reshape(ntrain,Nx,Ny)
S_test = d["S_train"][ntest:,:].reshape(ntrain,Nx,Ny)/1000.
CX = d["X_train"]
CY = d["Y_train"]
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
U_train = torch.from_numpy(np.asarray(U_train)).type(dtype_double)
S_train = torch.from_numpy(np.asarray(S_train)).type(dtype_double)
U_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
S_test = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
x_train = U_train
y_train = S_train
x_test = U_test
y_test = S_test
grids = []
lontest = np.linspace(0,355,num=Nx)/360
lattest = (np.linspace(90,-87.5,num=Ny) + 90.)/180.
grids.append(lontest)
grids.append(lattest)
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_train = torch.cat([x_train.reshape(ntrain,s,s,1), grid.repeat(ntrain,1,1,1)], dim=3)
x_test = torch.cat([x_test.reshape(ntest,s,s,1), grid.repeat(ntest,1,1,1)], dim=3)
ind_train = torch.randint(s*s, (ntrain, P))
ind_test = torch.randint(s*s, (ntest, P))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train, ind_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test, ind_test), batch_size=batch_size, shuffle=True)
################################################################
# training and evaluation
################################################################
batch_ind = torch.arange(batch_size).reshape(-1, 1).repeat(1, P)
model = FNO2d(modes, modes, width).cuda()
print(count_params(model))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
start_time = timeit.default_timer()
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y, idx in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s*s)
y = y.reshape(batch_size, s*s)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
loss = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y, idx in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s*s)
y = y.reshape(batch_size, s*s,1)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
test_l2 += myloss(out.view(batch_size,-1), y.view(batch_size,-1)).item()
train_l2/= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_l2, test_l2)#, np.mean(error_total))
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
pred_torch = torch.zeros(S_test.shape)
baseline_torch = torch.zeros(S_test.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=1, shuffle=False)
test_error_u = []
test_error_u_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y = x.cuda(), y.cuda()
out = model(x).reshape(1, s, s)
pred_torch[index,:,:] = out[:,:,:]
baseline_torch[index,:,:] = y[:,:,:]
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_u_np.append(np.linalg.norm(out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1])- y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1]),2)/np.linalg.norm(out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[2]),2))
index = index + 1
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np))) | 10,016 | 34.147368 | 243 | py |
null | LOCA-main/Climate_Modeling/LOCA/LOCAWeather.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.example_libraries.stax import Dense, Gelu, Relu
from jax.example_libraries import stax
import os
import timeit
from jax.example_libraries import optimizers
from absl import app
import jax
from jax import vjp
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, pmap
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import log, sqrt, sin, cos
import itertools
import torch
import scipy.signal as signal
from kymatio.numpy import Scattering2D
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= "0"
def input_construction(U,X,m=100, Nx=100, Ny=100, N=1000,du=1,dx=2):
dx = 1./(Nx-1)
dy = 1./(Ny-1)
x = np.arange(0,1+dx,dx)
y = np.arange(0,1+dy,dy)
U = U.reshape(Nx,Ny)
u = interpolate.interp2d(x,y,U[:,:],kind="cubic")
X_new = np.linspace(0,1,num=int(sqrt(m)))
Y_new = np.linspace(0,1,num=int(sqrt(m)))
XX_new, YY_new = np.meshgrid(X_new,Y_new)
U_all = np.zeros((int(sqrt(m)),int(sqrt(m))))
U_all[:,:] = u(X_new, Y_new)
X_all = np.concatenate((XX_new.flatten()[:,None],YY_new.flatten()[:,None]),-1)
U_all = U.reshape(int(sqrt(m))*int(sqrt(m)),du)
return U_all, X_all
def output_construction(s,Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100):
s = s.reshape(Nx,Ny)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
lontest = np.linspace(0,355,num=72)/360
lattest = (np.linspace(90,-87.5,num=72) + 90.)/180.
XX, YY = np.meshgrid(lontest, lattest)
Y_all = np.concatenate((XX[x][range(P),y][:,None],YY[x][range(P),y][:,None]),axis=-1)
s_all = s[x][range(P), y][:, None]
return s_all, Y_all
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def peuclid_distance(x,y,square=True):
XX = jnp.einsum('ik,ik->i',x,x)
YY = jnp.einsum('ik,ik->i',y,y)
XY = jnp.einsum('ik,jk->ij',x,y)
return XX[:,np.newaxis]+YY[np.newaxis,:] - 2*XY
def euclid_distance(x,y,square=True):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxu, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxu = inputsxu
self.y = y
self.s = s
self.N = inputsxu.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxu[idx,:,:]
y = self.y[idx,:,:]
inputs = (inputsxu, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
@partial(jit, static_argnums=(0,))
def forward(self, x):
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
def continouswavetransf(sig, l=100, m=100, training_batch_size = 1):
scattering = Scattering2D(J=1, L=8, max_order=2, shape=(72, 72))
cwtmatr = np.zeros((training_batch_size, 11664, 1))
sig = np.array(sig)
for i in range(0,training_batch_size):
scatteringCoeffs = scattering(sig[i,:,:].reshape(72,72))
cwtmatr[i,:,:] = scatteringCoeffs.flatten()[:,None]
return cwtmatr
class LOCA:
def __init__(self, q_layers, g_layers, weight_layers , m=100, P=100, X=None, Y=None, Yt=None):
# Network initialization and evaluation functions
self.q_init, self.q_apply = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, q_params = self.q_init(random.PRNGKey(10000), self.in_shape)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(10000), self.in_shape)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(10000), self.in_shape)
self.R = 1000
self.D = 100
self.N = 432
self.W = random.normal(random.PRNGKey(10000), shape=(self.R, self.D))
self.b = random.uniform(random.PRNGKey(10000), minval=0, maxval=2*np.pi, shape=(self.R,))
self.B = jnp.repeat(self.b[:, jnp.newaxis], self.N, axis=1)
self.norm = 1./ jnp.sqrt(self.R)
beta = [1.]
gamma = [1.]
params = (beta, gamma, encoder_params2, g_params, weights_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.loss_operator_log = []
self.loss_physics_log = []
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
self.distance_function = vmap(jit(self.euclid_distance))
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
# @partial(jax.jit, static_argnums=0)
def euclid_distance(self, x, y, square=True):
diff = x[None,:,:] - y[:,None,:]
return jnp.sum(diff**2,axis=-1)
@partial(jit, static_argnums=(0,))
def matr_vec(self,M,v):
return vmap(jnp.matmul,in_axes=(None,0))(M,v)
@partial(jit, static_argnums=(0,))
def matr_matr(self,M,v):
return vmap(jnp.matmul,in_axes=(0,0))(M,v)
@partial(jax.jit, static_argnums=0)
def fast_gauss_kernel(self, x):
print(x.T.shape, self.W.shape, self.B.shape)
Z = self.norm * np.sqrt(2) * jnp.cos(jnp.matmul(self.W,x.T) + self.B)
return jnp.matmul(Z.T,Z)
@partial(jax.jit, static_argnums=0)
def vector_fast_gauss_kernel(self,x):
return vmap(self.fast_gauss_kernel,in_axes=(0))(x)
@partial(jax.jit, static_argnums=0)
def LOCA_net(self, params, inputs, ds=1):
beta, gamma, q_params, g_params, v_params = params
inputsxu, inputsy = inputs
inputsy = self.q_apply(q_params,inputsy)
attn_logits = self.vdistance_function(inputsy, inputsy)
K = beta[0]*jnp.exp(- gamma[0]*attn_logits)
Kxx = jnp.sqrt((1./K.shape[1])*jnp.sum(K ,axis=-1,keepdims=True))
mean_K = jnp.matmul(Kxx, jnp.swapaxes(Kxx,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params, inputsy)
g = (1./K.shape[1])*jnp.einsum("ijk,ikml->ijml",K,g.reshape(inputsy.shape[0], inputsy.shape[1], ds, int(g.shape[2]/ds)))
g = jax.nn.softmax(g, axis=-1)
value_heads = self.v_apply(v_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
value_heads = value_heads.reshape(value_heads.shape[0],int(value_heads.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", g,value_heads)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = jnp.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = jnp.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
beta, gamma,q_params, g_params, v_params = params
qlv, _ = self.ravel_pytree(q_params)
vlv, _ = self.ravel_pytree(v_params)
glv, _ = self.ravel_pytree(g_params)
print("The number of model parameters is:",qlv.shape[0]+vlv.shape[0]+glv.shape[0])
def predict_function(U_in,Y_in, model=None, params= None, H=10):
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(U_in.shape[0],1,1))
inputs_trainxu = jnp.asarray(U_in)
pos_encodingy = PositionalEncodingY(y,int(y.shape[1]*y.shape[2]), max_len = Y_in.shape[0], H=H)
y = pos_encodingy.forward(y)
del pos_encodingy
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000, P=128, Nx=32, Ny=32, idx=None):
test_error_u = []
z = uCNN_super_all.reshape(num_train,Nx,Ny)
s = s_all.reshape(num_train,Nx,Ny)
# s = np.swapaxes(s,1,2)
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u), test_error_u
def minmax(a, n, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
TRAINING_ITERATIONS = 100000
P = 144
m = int(72*72)
T = 1
N_hat = 1
num_train = 1825
num_test = 1825
training_batch_size = 73
dx = 2
du = 1
dy = 2
ds = 1
n_hat = 100
l = 100
Nx = 72
Ny = 72
Nt = 1
Ng = 0
L = 1
H = 10
casenum_train = 2
casenum_test = 2
d = np.load("../Data/weather_dataset.npz")
u_train = d["U_train"][:num_train,:]
S_train = d["S_train"][:num_train,:]/1000.
x_train = d["X_train"]
Y_train = d["Y_train"]
d = np.load("../Data/weather_dataset.npz")
u_test = d["U_train"][-num_test:,:]
S_test = d["S_train"][-num_test:,:]/1000.
x_test = d["X_train"]
Y_test = d["Y_train"]
Y_train_in = Y_train
Y_test_in = Y_test
s_all_test = S_test
s_all_train = S_train
s_train = np.zeros((num_train*N_hat,P,ds))
y_train = np.zeros((num_train*N_hat,P,dy))
U_train = np.zeros((num_train*N_hat,m,du))
X_train = np.zeros((num_train*N_hat,m,dx))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
X_test = np.zeros((num_test,m,dx))
for j in range(0,N_hat):
for i in range(0,num_train):
s_train[i + j*num_train,:,:], y_train[i+ j*num_train,:,:] = output_construction(S_train[i,:], Y_train, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_train[i+ j*num_train,:,:], X_train[i+ j*num_train,:,:] = input_construction( u_train[i,:], x_train, Nx=Nx, Ny=Ny, m=m, du=du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:], Y_test, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_test[i,:,:], X_test[i,:,:] = input_construction( u_test[i,:], x_test, Nx=Nx, Ny=Ny, m=m, du=du)
num_train = N_hat*num_train
X_train2 = X_train
U_train2 = U_train
X_test2 = X_test
U_test2 = U_test
X_train = jnp.asarray(X_train)
U_train = np.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
X_test = jnp.asarray(X_test)
U_test = np.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
X_train = jnp.reshape(X_train,(num_train,m,dx))
U_train = np.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
X_test = jnp.reshape(X_test,(num_test,m,dx))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
y_train_pos = y_train
y_train_posT = y_test
pos_encodingy = PositionalEncodingY(y_train_pos,int(y_train_pos.shape[1]*y_train_pos.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_train_posT,int(y_train_posT.shape[1]*y_train_posT.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
inputs_trainxu = jnp.asarray(continouswavetransf(U_train, l=l, m=m, training_batch_size=num_train))
inputs_testxu = jnp.asarray(continouswavetransf(U_test , l=l, m=m, training_batch_size=num_test))
train_dataset = DataGenerator(inputs_trainxu, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
q_layers = [L*dy+H*dy, 100, 100, l]
weights_layers = [11664, 100, 100, ds*n_hat]
g_layers = [l, 100, 100, n_hat]
model = LOCA(q_layers, g_layers, weights_layers, m=m, P=P, X=X_train, Y=y_train_pos, Yt=y_train_posT)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
print("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(s_all_test).reshape(num_test, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_test[:,idx,:], _, _ = predict_function(inputs_testxu , Y_test_in[idx,:], model=model, params=params, H=H)
uCNN_super_all_train = np.zeros_like(s_all_train).reshape(num_train, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_train[:,idx,:], _, _ = predict_function(inputs_trainxu , Y_train_in[idx,:], model=model, params=params, H=H)
absolute_error_test, mean_test_error, test_error = error_full_resolution(uCNN_super_all_test,s_all_test, tag='test', P=P,Nx=Nx, Ny=Ny, idx = None, num_train=num_test)
absolute_error_train, mean_train_error, train_error = error_full_resolution(uCNN_super_all_train,s_all_train,tag='train',P=P,Nx=Nx, Ny=Ny, idx = None, num_train=num_train) | 18,853 | 35.824219 | 238 | py |
null | LOCA-main/Darcy/DeepONet/DeepONet_Darcy.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax.flatten_util import ravel_pytree
from jax.experimental.stax import Dense, Gelu
from jax.experimental import stax
import os
import timeit
from jax.experimental import optimizers
import jax
import jax.numpy as jnp
from jax import vjp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, jit
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import sqrt
import itertools
from kymatio.numpy import Scattering2D
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def input_construction(U,X,m=100, Nx=100, Ny=100, N=1000,du=1,dx=2):
dx = 1./(Nx-1)
dy = 1./(Ny-1)
x = np.arange(0,1+dx,dx)
y = np.arange(0,1+dy,dy)
U = U.reshape(Nx,Ny)
u = interpolate.interp2d(x,y,U[:,:],kind="cubic")
X_new = np.linspace(0,1,num=int(sqrt(m)))
Y_new = np.linspace(0,1,num=int(sqrt(m)))
XX_new, YY_new = np.meshgrid(X_new,Y_new)
U_all = np.zeros((int(sqrt(m)),int(sqrt(m))))
U_all[:,:] = u(X_new, Y_new)
X_all = np.concatenate((XX_new.flatten()[:,None],YY_new.flatten()[:,None]),-1)
U_all = U_all.reshape(int(sqrt(m))*int(sqrt(m)),du)
return U_all, X_all
def output_construction(s,Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100):
s = s.reshape(Nx,Ny)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
Y_all = np.hstack([x[:, None], y[:,None]]) * [1./(Nx - 1), 1./(Ny - 1)]
s_all = s[x][range(P), y][:, None]
return s_all, Y_all
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class PositionalEncodingU:
def __init__(self, U, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/2)*2)
self.U = U
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
peu = np.zeros((x.shape[0], self.max_len, self.H))
U = jnp.take(self.U, 0, axis=2)[:,:,None]
positionU = jnp.tile(U,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
peu = jax.ops.index_update(peu, jax.ops.index[:,:,0::2], jnp.cos(positionU[:,:,0::2] * div_term))
peu = jax.ops.index_update(peu, jax.ops.index[:,:,1::2], jnp.sin(positionU[:,:,1::2] * div_term))
x = jnp.concatenate([x, peu], -1)
return x
def scatteringTransform(sig, m=100, training_batch_size = 100):
scattering = Scattering2D(J=1, L=3, max_order=2, shape=(32, 32))
cwtmatr = np.zeros((training_batch_size, 768, 1))
sig = np.array(sig)
for i in range(0,training_batch_size):
scatteringCoeffs = scattering(sig[i,:,:].reshape(32,32))
cwtmatr[i,:,:] = scatteringCoeffs[:3,:,:].flatten()[:,None]
return cwtmatr
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(10000), self.in_shape)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(10000), self.in_shape)
params = (trunk_params, branch_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=1):
trunk_params, branch_params = params
inputsxu, inputsy = inputs
t = self.trunk_apply(trunk_params, inputsy).reshape(inputsy.shape[0], inputsy.shape[1], ds, int(100/ds))
b = self.branch_apply(branch_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((y.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
def predict_function(U_in,Y_in, model=None, params= None, H=10):
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(U_in.shape[0],1,1))
inputs_trainxu = jnp.asarray(U_in)
pos_encodingy = PositionalEncodingY(y,int(y.shape[1]*y.shape[2]), max_len = Y_in.shape[0], H=H)
y = pos_encodingy.forward(y)
del pos_encodingy
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000, Nx=32, Ny=32):
test_error_u = []
z = uCNN_super_all.reshape(num_train,Nx,Ny)
s = s_all.reshape(num_train,Nx,Ny)
s = np.swapaxes(s,1,2)
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,:]- z[i,:,:], 2)/norm(s[i,:,:], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u), test_error_u
TRAINING_ITERATIONS = 20000
P = 128
m = 1024
num_train = 1000
num_test = 1000
training_batch_size = 100
dx = 2
du = 1
dy = 2
ds = 1
n_hat = 100
Nx = 32
Ny = 32
H_y = 6
H_u = 6
d = np.load("/scratch/gkissas/Darcy/train_darcy_dataset.npz")
u_train = d["U_train"]
x_train = d["X_train"]
Y_train = d["Y_train"]
S_train = d["s_train"]
d = np.load("/scratch/gkissas/Darcy/test_darcy_dataset.npz")
u_test = d["U_test"]
x_test = d["X_test"]
Y_test = d["Y_test"]
S_test = d["s_test"]
Y_train_in = Y_train
Y_test_in = Y_test
s_all_test = S_test
s_all_train = S_train
s_train = np.zeros((num_train,P,ds))
y_train = np.zeros((num_train,P,dy))
U_train = np.zeros((num_train,m,du))
X_train = np.zeros((num_train,m,dx))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
X_test = np.zeros((num_test,m,dx))
for i in range(0,num_train):
s_train[i,:,:], y_train[i,:,:] = output_construction(S_train[i,:], Y_train, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_train[i,:,:], X_train[i,:,:] = input_construction( u_train[i,:], x_train, Nx=Nx, Ny=Ny, m=m, du=du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:], Y_test, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_test[i,:,:], X_test[i,:,:] = input_construction( u_test[i,:], x_test, Nx=Nx, Ny=Ny, m=m, du=du)
U_train = jnp.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_test = jnp.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
X_train = jnp.reshape(X_train,(num_train,m,dx))
U_train = jnp.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
X_test = jnp.reshape(X_test,(num_test,m,dx))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
s_train_mean = jnp.mean(s_train,axis=0)
s_train_std = jnp.std(s_train,axis=0)
s_train = (s_train - s_train_mean)/s_train_std
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
branch_layers = [m*(du*H_u+du), 1024, 1024, 1024, ds*n_hat]
trunk_layers = [H_y*dy + dy , 1024, 1024, 1024, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
print("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(s_all_test).reshape(num_test, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_test[:,idx,:], _, _ = predict_function(U_test , Y_test_in[idx,:], model=model, params=params, H=H_y)
uCNN_super_all_train = np.zeros_like(s_all_train).reshape(num_train, Nx*Ny, ds)
for i in range(0, Nx*Ny, P):
idx = i + np.arange(0,P)
uCNN_super_all_train[:,idx,:], _, _ = predict_function(U_train , Y_train_in[idx,:], model=model, params=params, H=H_y)
absolute_error_test, mean_test_error, test_error = error_full_resolution(uCNN_super_all_test, s_all_test, tag='test', num_train=num_train,Nx=Nx, Ny=Ny)
absolute_error_train, mean_train_error, train_error = error_full_resolution(uCNN_super_all_train, s_all_train, tag='train',num_train=num_test ,Nx=Nx, Ny=Ny) | 16,183 | 36.290323 | 238 | py |
null | LOCA-main/Darcy/FNO/FNODarcy.py | """
@author: Zongyi Li
This file is the Fourier Neural Operator for 2D problem such as the Darcy Flow discussed in Section 5.2 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).
"""
import numpy as np
from numpy.linalg import norm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import matplotlib.pyplot as plt
import operator
from functools import reduce
from functools import partial
from timeit import default_timer
from utilities3 import *
torch.manual_seed(0)
np.random.seed(0)
import timeit
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class FNO2d(nn.Module):
def __init__(self, modes1, modes2, width):
super(FNO2d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the coefficient function and locations (a(x, y), x, y)
input shape: (batchsize, x=s, y=s, c=3)
output: the solution
output shape: (batchsize, x=s, y=s, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.fc0 = nn.Linear(3, self.width)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
batchsize = x.shape[0]
size_x, size_y = x.shape[1], x.shape[2]
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
x1 = self.conv0(x)
x2 = self.w0(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
################################################################
# configs
################################################################
ntrain = 1000
ntest = 1000
batch_size = 100
learning_rate = 0.001
epochs = 500
step_size = 100
gamma = 0.5
modes = 8
width = 32
r = 1
h = 32
sub = 1
s = 32
P = 128
################################################################
# load data and data normalization
################################################################
d = np.load("../Data/train_darcy_dataset.npz")
U_train = d["U_train"].reshape(ntrain,32,32)[:,::sub,::sub]
X_train = d["X_train"]
Y_train = d["Y_train"]
S_train = d["s_train"].reshape(ntrain,32,32)[:,::sub,::sub]
d = np.load("../Data/test_darcy_dataset.npz")
U_test = d["U_test"].reshape(ntest,32,32)[:,::sub,::sub]
X_test = d["X_test"]
Y_test = d["Y_test"]
S_test = d["s_test"].reshape(ntest,32,32)[:,::sub,::sub]
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
U_train = torch.from_numpy(np.asarray(U_train)).type(dtype_double)
S_train = torch.from_numpy(np.asarray(S_train)).type(dtype_double)
U_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
S_test = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
x_train = U_train
y_train = S_train
x_test = U_test
y_test = S_test
grids = []
grids.append(np.linspace(0, 1, s))
grids.append(np.linspace(0, 1, s))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_train = torch.cat([x_train.reshape(ntrain,s,s,1), grid.repeat(ntrain,1,1,1)], dim=3)
x_test = torch.cat([x_test.reshape(ntest,s,s,1), grid.repeat(ntest,1,1,1)], dim=3)
ind_train = torch.randint(s*s, (ntrain, P))
ind_test = torch.randint(s*s, (ntest, P))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train, ind_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test, ind_test), batch_size=batch_size, shuffle=True)
################################################################
# training and evaluation
################################################################
batch_ind = torch.arange(batch_size).reshape(-1, 1).repeat(1, P)
model = FNO2d(modes, modes, width).cuda()
print(count_params(model))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
start_time = timeit.default_timer()
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y, idx in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s*s)
y = y.reshape(batch_size, s*s)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
loss = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y, idx in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s*s)
y = y.reshape(batch_size, s*s,1)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
test_l2 += myloss(out.view(batch_size,-1), y.view(batch_size,-1)).item()
train_l2/= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_l2, test_l2)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
grids = []
grids.append(np.linspace(0, 1, s))
grids.append(np.linspace(0, 1, s))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_train = torch.cat([x_train, grid.repeat(ntrain,1,1,1)], dim=3)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=1, shuffle=False)
train_error_u_np = []
with torch.no_grad():
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(1, s, s)
train_error_u_np.append(np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1])- out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1]),2)/np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[2]),2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u_np),np.std(train_error_u_np),np.min(train_error_u_np),np.max(train_error_u_np)))
sub = 1
s = 32
d = np.load("data/test_darcy_dataset_FNO2.npz")
U_test = d["U_test"].reshape(ntest,32,32)[:,::sub,::sub]
X_test = d["X_test"]
Y_test = d["Y_test"]
S_test = d["s_test"].reshape(ntest,32,32)[:,::sub,::sub]
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
U_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
S_test = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
x_test = U_test
y_test = S_test
grids = []
grids.append(np.linspace(0, 1, s))
grids.append(np.linspace(0, 1, s))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_test = torch.cat([x_test.reshape(ntest,s,s,1), grid.repeat(ntest,1,1,1)], dim=3)
pred_torch = torch.zeros(S_test.shape)
baseline_torch = torch.zeros(S_test.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=1, shuffle=False)
test_error_u = []
test_error_u_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y = x.cuda(), y.cuda()
out = model(x).reshape(1, s, s)
pred_torch[index,:,:] = out[:,:,:]
baseline_torch[index,:,:] = y[:,:,:]
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_u_np.append(np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1])- out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1]),2)/np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[2]),2))
index = index + 1
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np))) | 11,702 | 35.34472 | 242 | py |
null | LOCA-main/Darcy/FNO/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
from functools import partial
#################################################
#
# Utilities
#
#################################################
import os
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# Sobolev norm (HS norm)
# where we also compare the numerical derivatives between the output and target
class HsLoss(object):
def __init__(self, d=2, p=2, k=1, a=None, group=False, size_average=True, reduction=True):
super(HsLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.k = k
self.balanced = group
self.reduction = reduction
self.size_average = size_average
if a == None:
a = [1,] * k
self.a = a
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y, a=None):
nx = x.size()[1]
ny = x.size()[2]
k = self.k
balanced = self.balanced
a = self.a
x = x.view(x.shape[0], nx, ny, -1)
y = y.view(y.shape[0], nx, ny, -1)
k_x = torch.cat((torch.arange(start=0, end=nx//2, step=1),torch.arange(start=-nx//2, end=0, step=1)), 0).reshape(nx,1).repeat(1,ny)
k_y = torch.cat((torch.arange(start=0, end=ny//2, step=1),torch.arange(start=-ny//2, end=0, step=1)), 0).reshape(1,ny).repeat(nx,1)
k_x = torch.abs(k_x).reshape(1,nx,ny,1).to(x.device)
k_y = torch.abs(k_y).reshape(1,nx,ny,1).to(x.device)
x = torch.fft.fftn(x, dim=[1, 2])
y = torch.fft.fftn(y, dim=[1, 2])
if balanced==False:
weight = 1
if k >= 1:
weight += a[0]**2 * (k_x**2 + k_y**2)
if k >= 2:
weight += a[1]**2 * (k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
weight = torch.sqrt(weight)
loss = self.rel(x*weight, y*weight)
else:
loss = self.rel(x, y)
if k >= 1:
weight = a[0] * torch.sqrt(k_x**2 + k_y**2)
loss += self.rel(x*weight, y*weight)
if k >= 2:
weight = a[1] * torch.sqrt(k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
loss += self.rel(x*weight, y*weight)
loss = loss / (k+1)
return loss
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c
| 9,157 | 27.798742 | 139 | py |
null | LOCA-main/Darcy/LOCA/LOCADarcy.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.example_libraries.stax import Dense, Gelu, Relu
from jax.example_libraries import stax
import os
import timeit
from jax.example_libraries import optimizers
# from jax.api import vjp
from jax import vjp
from absl import app
import jax
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, pmap
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import log, sqrt, sin, cos
import itertools
import torch
from kymatio.numpy import Scattering2D
from numpy.polynomial.legendre import leggauss
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def input_construction(U,X,m=100, Nx=100, Ny=100, N=1000,du=1,dx=2):
dx = 1./(Nx-1)
dy = 1./(Ny-1)
x = np.arange(0,1+dx,dx)
y = np.arange(0,1+dy,dy)
U = U.reshape(Nx,Ny)
u = interpolate.interp2d(x,y,U[:,:],kind="cubic")
X_new = np.linspace(0,1,num=int(sqrt(m)))
Y_new = np.linspace(0,1,num=int(sqrt(m)))
XX_new, YY_new = np.meshgrid(X_new,Y_new)
U_all = np.zeros((int(sqrt(m)),int(sqrt(m))))
U_all[:,:] = u(X_new, Y_new)
X_all = np.concatenate((XX_new.flatten()[:,None],YY_new.flatten()[:,None]),-1)
U_all = U_all.reshape(int(sqrt(m))*int(sqrt(m)),du)
return U_all, X_all
def output_construction(s,Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100):
s = s.reshape(Nx,Ny)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
Y_all = np.hstack([x[:, None], y[:,None]]) * [1./(Nx - 1), 1./(Ny - 1)]
s_all = s[x][range(P), y][:, None]
return s_all, Y_all
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def euclid_distance(x,y,square=True):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxuy, inputsxu, y, s, z, w,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxuy = inputsxuy
self.inputsxu = inputsxu
self.y = y
self.s = s
self.z = z
self.w = w
self.N = inputsxu.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxu[idx,:,:]
y = self.y[idx,:,:]
z = self.z[idx,:,:]
w = self.w[idx,:,:]
inputs = (inputsxu, y, z, w)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1) # [[x,pex],
# [y,pey]]
x = jnp.concatenate([x, pos_embedding], -1)
return x
def scatteringTransform(sig, l=100, m=100, training_batch_size = 100):
scattering = Scattering2D(J=1, L=3, max_order=2, shape=(32, 32))
cwtmatr = np.zeros((training_batch_size, 768, 1))
sig = np.array(sig)
for i in range(0,training_batch_size):
scatteringCoeffs = scattering(sig[i,:,:].reshape(32,32))
cwtmatr[i,:,:] = scatteringCoeffs[:3,:,:].flatten()[:,None]
return cwtmatr
class LOCA:
def __init__(self, q_layers, g_layers, v_layers , m=100, P=100, jac_det=None):
# Network initialization and evaluation functions
self.q_init, self.q_apply = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, q_params = self.q_init(random.PRNGKey(10000), self.in_shape)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(10000), self.in_shape)
self.v_apply = jit(self.v_apply)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(10000), self.in_shape)
self.g_apply = jit(self.g_apply)
# RBF kernel parameters
beta = [10.]
gamma = [0.2]
# Model parameters
params = (beta, gamma,q_params, g_params, v_params)
self.jac_det = jac_det
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.grads = []
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def RBF(self, X, Y, gamma, beta):
d = self.vdistance_function(X, Y)
return beta[0]*jnp.exp(-gamma[0]*d) + 1e-5
@partial(jax.jit, static_argnums=0)
def LOCA_net(self, params, inputs, ds=1):
beta, gamma, q_params, g_params, v_params = params
u, y, z, w = inputs
y = self.q_apply(q_params,y)
z = self.q_apply(q_params,z)
K = self.RBF(z, z, gamma, beta)
Kzz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
K = self.RBF(y, z, gamma, beta)
Kyz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params,z)
g = self.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
v = self.v_apply(v_params, u.reshape(u.shape[0],1,u.shape[1]*u.shape[2]))
v = v.reshape(v.shape[0],int(v.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", g,v)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.LOCA_net(params,inputs)
return norm(y_pred.flatten() - y.flatten(), 2)/norm(y_pred.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.LOCA_net(params,inputs)
return norm(y_pred.flatten() - y.flatten(), 2)/norm(y_pred.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state), g
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state, g = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
self.grads.append(g)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def count_params(self):
params = self.get_params(self.opt_state)
params_flat, _ = ravel_pytree(params)
print("The number of model parameters is:",params_flat.shape[0])
def predict_function(U_in, X_in, Y_in, P=128, m=100, P_test=1024,num_test=200, Nx=30, Ny=32,model=None,dy=2, training_batch_size=100,params= None, L=128, mode="train", X_sim=None, Y_sim=None, H=20, z= None, w=None):
print("Predicting the solution for the full resolution")
ds = 1
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(num_test,1,1))
uCNN_super_all = np.zeros((num_test, Nx*Ny,ds))
inputs_trainxu = jnp.asarray(scatteringTransform(U_in, l=L, m=m, training_batch_size=num_test))
# inputs_trainxu = jnp.asarray(U_in)
pos_encodingy = PositionalEncodingY(y,int(y.shape[1]*y.shape[2]), max_len = Nx*Ny, H=H)
y_train = pos_encodingy.forward(y)
uCNN_super_all = model.predict(params, (inputs_trainxu,y_train, z, w))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000, P=128, Nx=32, Ny=32, idx=None):
test_error_u = []
z = uCNN_super_all.reshape(num_train,Nx,Ny)
s = s_all.reshape(num_train,Nx,Ny)
s = np.swapaxes(s,1,2)
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,:]- z[i,:,:], 2)/norm(s[i,:,:], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u), test_error_u
def minmax(a, n, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
TRAINING_ITERATIONS = 20000
P = 128
m = 1024
L = 1
N_hat = 1
num_train = 1000
num_test = 1000
casenum_train = 2
casenum_test = 2
training_batch_size = 100
dx = 2
du = 1
dy = 2
ds = 1
n_hat = 100
l = 100
Nx = 32
Ny = 32
H = 6
d = np.load("../Data/train_darcy_dataset.npz")
u_train = d["U_train"]
x_train = d["X_train"]
Y_train = d["Y_train"]
S_train = d["s_train"]
d = np.load("../Data/test_darcy_dataset.npz")
u_test = d["U_test"]
x_test = d["X_test"]
Y_test = d["Y_test"]
S_test = d["s_test"]
polypoints = 14
lb = np.array([0.0, 0.0])
ub = np.array([1.0, 1.0])
# GLL nodes and weights in [-1,1]
z1, w1 = leggauss(polypoints)
z2, w2 = leggauss(polypoints)
# Rescale nodes to [lb,ub]
x1 = 0.5*(ub[0] - lb[0])*(z1 + 1.0) + lb[0]
x2 = 0.5*(ub[1] - lb[1])*(z2 + 1.0) + lb[1]
# Determinant of Jacobian of mapping [lb,ub]-->[-1,1]^2
jac_det = 0.5**2 * (ub[0]-lb[0]) * (ub[1]-lb[1])
Z_1, Z_2 = np.meshgrid(z1,z2,indexing="ij")
Z = np.concatenate((Z_1.flatten()[:,None], Z_2.flatten()[:,None]), axis=-1)
Z = np.tile(Z,(num_train,1,1))
W = np.outer(w1, w2).flatten()[:,None]
W = np.tile(W,(num_train,1,1))
polypoints = polypoints**dy
Y_train_in = Y_train
Y_test_in = Y_test
s_all_test = S_test[:num_test,:]
s_all_train = S_train[:num_train,:]
s_train = np.zeros((num_train*N_hat,P,ds))
y_train = np.zeros((num_train*N_hat,P,dy))
U_train = np.zeros((num_train*N_hat,m,du))
X_train = np.zeros((num_train*N_hat,m,dx))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
X_test = np.zeros((num_test,m,dx))
for j in range(0,N_hat):
for i in range(0,num_train):
s_train[i + j*num_train,:,:], y_train[i+ j*num_train,:,:] = output_construction(S_train[i,:], Y_train, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_train[i+ j*num_train,:,:], X_train[i+ j*num_train,:,:] = input_construction( u_train[i,:], x_train, Nx=Nx, Ny=Ny, m=m, du=du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:], Y_test, Nx=Nx, Ny=Ny, P=P, ds=ds)
U_test[i,:,:], X_test[i,:,:] = input_construction( u_test[i,:], x_test, Nx=Nx, Ny=Ny, m=m, du=du)
num_train = N_hat*num_train
X_train2 = X_train
U_train2 = U_train
X_test2 = X_test
U_test2 = U_test
X_train = jnp.asarray(X_train)
U_train = np.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
X_test = jnp.asarray(X_test)
U_test = np.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
z = jnp.asarray(Z)
w = jnp.asarray(W)
X_train = jnp.reshape(X_train,(num_train,m,dx))
U_train = np.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
X_test = jnp.reshape(X_test,(num_test,m,dx))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
z = jnp.reshape(z,(num_test,polypoints,dy))
w = jnp.reshape(w,(num_test,polypoints,1))
y_train_pos = y_train
y_train_posT = y_test
pos_encodingy = PositionalEncodingY(y_train_pos,int(y_train_pos.shape[1]*y_train_pos.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_train_posT,int(y_train_posT.shape[1]*y_train_posT.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
inputs_trainxu = jnp.asarray(scatteringTransform(U_train, l=l, m=m, training_batch_size=num_train))
inputs_testxu = jnp.asarray(scatteringTransform(U_test , l=l, m=m, training_batch_size=num_test))
train_dataset = DataGenerator(inputs_trainxu, inputs_trainxu, y_train, s_train, z, w, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, inputs_testxu, y_test, s_test, z, w, training_batch_size)
test_dataset = iter(test_dataset)
q_layers = [L*dy+H*dy, 100, 100, l]
v_layers = [768*du, 100, 100, ds*n_hat]
g_layers = [l, 100, 100, ds*n_hat]
# Define model
model = LOCA(q_layers, g_layers, v_layers, m=m, P=P, jac_det=jac_det)
model.count_params()
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_super_all_train, X, T = predict_function(U_train, X_train, Y_train_in, model=model, P=P, L= L,Nx=Nx,Ny=Ny, params=params, H=H, z=z, w=w, num_test=num_train)
uCNN_super_all_test , X, T = predict_function(U_test, X_test, Y_test_in, model=model, P=P, L=L,Nx=Nx,Ny=Ny, params=params,H=H, z=z, w=w, num_test=num_test)
absolute_error_test, mean_test_error, test_error = error_full_resolution(uCNN_super_all_test, s_all_test, tag='test', P=P, Nx=Nx, Ny=Ny, idx = None, num_train=num_train)
absolute_error_train, mean_train_error, train_error = error_full_resolution(uCNN_super_all_train, s_all_train, tag='train',P=P, Nx=Nx, Ny=Ny, idx = None, num_train=num_test)
| 18,193 | 34.604697 | 238 | py |
null | LOCA-main/MMNIST/DeepONet/DeepONet_MNIST.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.example_libraries.stax import Dense, Gelu, Relu
from jax.example_libraries import stax
import os
import timeit
from jax.example_libraries import optimizers
from absl import app
import jax
import jax.numpy as jnp
from jax import vjp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, pmap
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import log, sqrt, sin, cos
import itertools
import torch
import scipy.signal as signal
from kymatio.numpy import Scattering2D
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def output_construction(s,T, X, Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100, Nt=2):
U_all = np.zeros((P,ds))
Y_all = np.zeros((P,ds))
t = np.random.randint(Nt, size=P)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
Y_all = np.concatenate((T[t,x][range(P),y][:,None], X[t,x][range(P),y][:,None], Y[t,x][range(P),y][:,None]),axis=-1)
U_all[:,:] = s[t,x][range(P), y]
return U_all, Y_all
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=4):
self.d_model = int(np.ceil(d_model/6)*2)
self.Y = Y
self.max_len = max_len
self.H = H
def forward(self, x):
pet = np.zeros((x.shape[0], self.max_len, self.H))
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
Y = jnp.take(self.Y, 2, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
positionY = jnp.tile(Y,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pet = jax.ops.index_update(pet, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pet = jax.ops.index_update(pet, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionY[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionY[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pet,pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class PositionalEncodingU:
def __init__(self, Y, d_model, max_len = 100, H=20):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(seed), self.in_shape)
params = (trunk_params, branch_params)
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=2):
trunk_params, branch_params = params
inputsxu, inputsy = inputs
t = self.trunk_apply(trunk_params, inputsy).reshape(inputsy.shape[0], inputsy.shape[1], ds, int(1000/ds))
b = self.branch_apply(branch_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((y.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
def predict_function(U_in,Y_in,num_test=1000, model=None,params= None, H=4):
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(num_test,1,1))
inputs_trainxu = jnp.asarray(U_in)
pos_encodingy = PositionalEncodingY(y,int(y.shape[1]*y.shape[2]), max_len = Y_in.shape[0], H=H)
y = pos_encodingy.forward(y)
del pos_encodingy
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000,P=128, Nx=30, Ny=30, Nt=10, idx=None, ds=2):
z = uCNN_super_all.reshape(num_train,Nt*Nx*Ny,ds)
s = s_all.reshape(num_train,Nt*Nx*Ny,ds)
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
test_error_v = []
for i in range(0,num_train):
test_error_v.append(norm(s[i,:,1]- z[i,:,1], 2)/norm(s[i,:,1], 2))
print("The average "+tag+" v error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_v),np.std(test_error_v),np.min(test_error_v),np.max(test_error_v)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u),np.mean(test_error_v), test_error_u, test_error_v
def minmax(a, n, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
TRAINING_ITERATIONS = 100000
P = 56
m = int(28*28)
N_hat = 1
num_train = 60000
num_test = 10000
training_batch_size = 500
dx = 2
du = 2
dy = 3
ds = 2
n_hat = 500
Nx = 28
Ny = 28
H_y = 10
H_u = 10
ind = 0
idxT = [11]
Nt = len(idxT)
d = np.load("../Data/MMNIST_dataset_train.npz")
dispx_allsteps_train = d["dispx_allsteps_train"][:num_train,idxT,:,:,None]
dispy_allsteps_train = d["dispy_allsteps_train"][:num_train,idxT,:,:,None]
u_trainx = d["dispx_allsteps_train"][:num_train,7,:,:,None]
u_trainy = d["dispy_allsteps_train"][:num_train,7,:,:,None]
bitmap_train = d["MNIST_inputs_train"][:,:,:,None]/255.
d = np.load("../Data/MMNIST_dataset_test.npz")
dispx_allsteps_test = d["dispx_allsteps_test"][:num_test,idxT,:,:,None]
dispy_allsteps_test = d["dispy_allsteps_test"][:num_test,idxT,:,:,None]
u_testx = d["dispx_allsteps_test"][:num_test,7,:,:,None]
u_testy = d["dispy_allsteps_test"][:num_test,7,:,:,None]
bitmap_test = d["MNIST_inputs_test"][:,:,:,None]/255.
S_train = np.concatenate((dispx_allsteps_train,dispy_allsteps_train),axis=-1)
S_test = np.concatenate((dispx_allsteps_test,dispy_allsteps_test),axis=-1)
u_train = np.concatenate((u_trainx,u_trainy),axis=-1)
u_test = np.concatenate((u_testx,u_testy),axis=-1)
X = np.zeros((Nt,Nx,Ny))
Y = np.zeros((Nt,Nx,Ny))
T = np.zeros((Nt,Nx,Ny))
dx = 0.037037037037037035
for ii in range(0,Nt):
T[ii,:,:] = ii
for kk in range(0,Nx):
for jj in range(0,Ny):
X[ii, kk,jj] = jj*dx# 0.5 # x is columns
Y[ii, kk,jj] = kk*dx# 0.5 # y is rows
Y_train = np.concatenate((T.flatten()[:,None], X.flatten()[:,None], Y.flatten()[:,None]),axis=-1)
Y_test = np.concatenate((T.flatten()[:,None], X.flatten()[:,None], Y.flatten()[:,None]),axis=-1)
Y_train_in = Y_train
Y_test_in = Y_test
CX = np.linspace(0,1,num=Nx)
CY = np.linspace(0,1,num=Ny)
s_all_test = S_test
s_all_train = S_train
# num_train = num_train*N_hat
s_train = np.zeros((num_train*N_hat,P,ds))
y_train = np.zeros((num_train*N_hat,P,dy))
U_train = np.zeros((num_train*N_hat,m,du))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
for j in range(0,N_hat):
for i in range(0,num_train):
s_train[i + j*num_train,:,:], y_train[i+ j*num_train,:,:] = output_construction(S_train[i,:,:,:,:], T, X, Y, P=P,Nt=Nt, Nx=Nx, Ny=Ny, ds=ds, dy=dy)
U_train[i+ j*num_train,:,:] = u_train[i,:,:,:].reshape(Nx*Ny,du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:,:,:,:], T, X,Y, P=P,Nt=Nt, Nx=Nx, Ny=Ny, ds=ds, dy=dy)
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
del S_train, S_test, dispx_allsteps_train, dispy_allsteps_train, dispx_allsteps_test, dispy_allsteps_test, u_train
U_train = jnp.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_test = jnp.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
U_train = jnp.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
s_train_mean = jnp.mean(s_train,axis=0)
s_train_std = jnp.std(s_train,axis=0)
s_train = (s_train - s_train_mean)/s_train_std
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
print(U_train.shape, U_test.shape, y_train.shape, y_test.shape, s_train.shape, s_test.shape)
branch_layers = [m*(du*H_u+du),100, 100, 100, 100, ds*n_hat]
trunk_layers = [H_y*dy + dy, 100, 100, 100, 100, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
del U_train, y_train, s_train
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
tag = "NN"
# in_noise_test = 0.15*np.random.normal(loc=0.0, scale=1.0, size=(u_test.shape))
# u_test = u_test + in_noise_test
U_test = np.zeros((num_test,m,du))
for i in range(num_test):
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
rint("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(s_all_test).reshape(num_test, Nx*Ny*Nt, ds)
if P>300:
PP = int(P/4)
else:
PP = P
for i in range(0, Nx*Ny, PP):
idx = i + np.arange(0,PP)
uCNN_super_all_test[:,idx,:], _, _ = predict_function(U_test , Y_test_in[idx,:], model=model, params=params, num_test=num_test, H=H_y)
absolute_error_test, mean_test_error_u, mean_test_error_v, test_error_u, test_error_v = error_full_resolution(uCNN_super_all_test,s_all_test,tag='test',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_test)
| 18,384 | 37.462343 | 238 | py |
null | LOCA-main/MMNIST/FNO/Adam.py | import math
import torch
from torch import Tensor
from typing import List, Optional
from torch.optim.optimizer import Optimizer
import os
os.environ['CUDA_VISIBLE_DEVICES']="3"
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss | 6,612 | 39.078788 | 120 | py |
null | LOCA-main/MMNIST/FNO/FNOMMNIST.py | """
@author: Zongyi Li
This file is the Fourier Neural Operator for 2D problem such as the Darcy Flow discussed in Section 5.2 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).
"""
from jax._src.numpy.lax_numpy import arange
import numpy as np
from numpy.linalg import norm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import os
from functools import reduce
from timeit import default_timer
from utilities3 import count_params, LpLoss
import timeit
seed = np.random.randint(10000)
torch.manual_seed(seed)
np.random.seed(seed)
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class FNO2d(nn.Module):
def __init__(self, modes1, modes2, width, indices=None):
super(FNO2d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the coefficient function and locations (a(x, y), x, y)
input shape: (batchsize, x=s, y=s, c=3)
output: the solution
output shape: (batchsize, x=s, y=s, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.fc0 = nn.Linear(4, self.width)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 2)
self.indices = indices
def forward(self, x):
batchsize = x.shape[0]
size_x, size_y = x.shape[1], x.shape[2]
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
x1 = self.conv0(x)
x2 = self.w0(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)
x = x1 + x2
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
################################################################
# configs
################################################################
ntrain = 60000
ntest = 10000
batch_size = 100
learning_rate = 0.001
epochs = 200
step_size = 100
gamma = 0.5
modes = 12
width = 32
h = 28
r = 1
sub = 1
sub1 = 1
s = h
P = 56
ind = 9
################################################################
# load data and data normalization
################################################################
idxT = [11]
d = np.load("../Data/MMNIST_dataset_train.npz")
dispx_allsteps_train = d["dispx_allsteps_train"][:ntrain,idxT,::sub,::sub,None][:,-1,:,:,:]
dispy_allsteps_train = d["dispy_allsteps_train"][:ntrain,idxT,::sub,::sub,None][:,-1,:,:,:]
u_trainx = d["dispx_allsteps_train"][:ntrain,7,::sub,::sub,None]
u_trainy = d["dispy_allsteps_train"][:ntrain,7,::sub,::sub,None]
d = np.load("../Data/MMNIST_dataset_test.npz")
dispx_allsteps_test = d["dispx_allsteps_test"][:ntest,idxT,::sub,::sub,None][:,-1,:,:,:]
dispy_allsteps_test = d["dispy_allsteps_test"][:ntest,idxT,::sub,::sub,None][:,-1,:,:,:]
u_testx = d["dispx_allsteps_test"][:ntest,7,::sub,::sub,None]
u_testy = d["dispy_allsteps_test"][:ntest,7,::sub,::sub,None]
S_train = np.concatenate((dispx_allsteps_train,dispy_allsteps_train),axis=-1)
S_test = np.concatenate((dispx_allsteps_test,dispy_allsteps_test),axis=-1)
U_train = np.concatenate((u_trainx,u_trainy),axis=-1)
U_test = np.concatenate((u_testx,u_testy),axis=-1)
tag = "CN"
# in_noise_train = 0.15*np.random.normal(loc=0.0, scale=1.0, size=(U_train.shape))
# U_train = U_train + in_noise_train
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
U_train = torch.from_numpy(np.asarray(U_train)).type(dtype_double)
S_train = torch.from_numpy(np.asarray(S_train)).type(dtype_double)
U_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
S_test = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
x_train = U_train
y_train = S_train
x_test = U_test
y_test = S_test
###########################################
grids = []
grids.append(np.linspace(0, 1, s))
grids.append(np.linspace(0, 1, s))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_train = torch.cat([x_train.reshape(ntrain,s,s,2), grid.repeat(ntrain,1,1,1)], dim=3)
x_test = torch.cat([x_test.reshape(ntest,s,s,2), grid.repeat(ntest,1,1,1)], dim=3)
ind_train = torch.randint(s*s, (ntrain, P))
ind_test = torch.randint(s*s, (ntest, P))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train, ind_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test, ind_test), batch_size=batch_size, shuffle=True)
################################################################
# training and evaluation
################################################################
batch_ind = torch.arange(batch_size).reshape(-1, 1).repeat(1, P)
model = FNO2d(modes, modes, width).cuda()
print(count_params(model))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
start_time = timeit.default_timer()
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y, idx in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s*s,2)
y = y.reshape(batch_size, s*s,2)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
loss = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y, idx in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s*s,2)
y = y.reshape(batch_size, s*s,2)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
test_l2 += myloss(out.view(batch_size,-1), y.view(batch_size,-1)).item()
train_l2/= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_l2, test_l2)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
sub = 1
s = 28
d = np.load("/scratch/gkissas/MMNIST_dataset_test.npz")
dispx_allsteps_test = d["dispx_allsteps_test"][:ntest,idxT,::sub,::sub,None][:,-1,:,:,:]
dispy_allsteps_test = d["dispy_allsteps_test"][:ntest,idxT,::sub,::sub,None][:,-1,:,:,:]
u_testx = d["dispx_allsteps_test"][:ntest,7,::sub,::sub,None]
u_testy = d["dispy_allsteps_test"][:ntest,7,::sub,::sub,None]
S_test = np.concatenate((dispx_allsteps_test,dispy_allsteps_test),axis=-1)
U_test = np.concatenate((u_testx,u_testy),axis=-1)
in_noise_test = 0.15*np.random.normal(loc=0.0, scale=1.0, size=(U_test.shape))
U_test = U_test + in_noise_test
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
U_test = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
S_test = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
x_test = U_test
y_test = S_test
grids = []
grids.append(np.linspace(0, 1, s))
grids.append(np.linspace(0, 1, s))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,s,s,2)
grid = torch.tensor(grid, dtype=torch.float)
x_test = torch.cat([x_test.reshape(ntest,s,s,2), grid.repeat(ntest,1,1,1)], dim=3)
pred_torch = torch.zeros(S_test.shape)
baseline_torch = torch.zeros(S_test.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=1, shuffle=True)
test_error_u = []
test_error_u_np = []
test_error_v_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y= x.cuda(), y.cuda()
out = model(x).reshape(1, s, s,2)
# out = y_normalizer.decode(out)
pred_torch[index,:,:] = out[:,:,:,:]
baseline_torch[index,:,:] = y[:,:,:,:]
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_u_np.append(np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1],2)[:,0]- out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1],2)[:,0],2)/np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[2],2)[:,0],2))
test_error_v_np.append(np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1],2)[:,1]- out.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[1],2)[:,1],2)/np.linalg.norm(y.cpu().numpy().reshape(S_test.shape[1]*S_test.shape[2],2)[:,1],2))
# print(index, test_l2)
index = index + 1
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np)))
print("The average test v error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_v_np),np.std(test_error_v_np),np.min(test_error_v_np),np.max(test_error_v_np))) | 12,303 | 36.060241 | 262 | py |
null | LOCA-main/MMNIST/FNO/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as tnn
import operator
from functools import reduce
from functools import partial
import os
################################################
#
# Utilities
#
#################################################
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['CUDA_VISIBLE_DEVICES']= "6"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
print(self.d, self.p, self.reduction, self.size_average)
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0] # 100 x 64*64 x 10
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# Sobolev norm (HS norm)
# where we also compare the numerical derivatives between the output and target
class HsLoss(object):
def __init__(self, d=2, p=2, k=1, a=None, group=False, size_average=True, reduction=True):
super(HsLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.k = k
self.balanced = group
self.reduction = reduction
self.size_average = size_average
if a == None:
a = [1,] * k
self.a = a
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y, a=None):
nx = x.size()[1]
ny = x.size()[2]
k = self.k
balanced = self.balanced
a = self.a
x = x.view(x.shape[0], nx, ny, -1)
y = y.view(y.shape[0], nx, ny, -1)
k_x = torch.cat((torch.arange(start=0, end=nx//2, step=1),torch.arange(start=-nx//2, end=0, step=1)), 0).reshape(nx,1).repeat(1,ny)
k_y = torch.cat((torch.arange(start=0, end=ny//2, step=1),torch.arange(start=-ny//2, end=0, step=1)), 0).reshape(1,ny).repeat(nx,1)
k_x = torch.abs(k_x).reshape(1,nx,ny,1).to(x.device)
k_y = torch.abs(k_y).reshape(1,nx,ny,1).to(x.device)
x = torch.fft.fftn(x, dim=[1, 2])
y = torch.fft.fftn(y, dim=[1, 2])
if balanced==False:
weight = 1
if k >= 1:
weight += a[0]**2 * (k_x**2 + k_y**2)
if k >= 2:
weight += a[1]**2 * (k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
weight = torch.sqrt(weight)
loss = self.rel(x*weight, y*weight)
else:
loss = self.rel(x, y)
if k >= 1:
weight = a[0] * torch.sqrt(k_x**2 + k_y**2)
loss += self.rel(x*weight, y*weight)
if k >= 2:
weight = a[1] * torch.sqrt(k_x**4 + 2*k_x**2*k_y**2 + k_y**4)
loss += self.rel(x*weight, y*weight)
loss = loss / (k+1)
return loss
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = tnn.ModuleList()
for j in range(self.n_layers):
self.layers.append(tnn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(tnn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c | 9,283 | 28.103448 | 139 | py |
null | LOCA-main/MMNIST/LOCA/LOCAMMNIST.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax.interpreters.xla import Backend
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.example_libraries.stax import Dense, Gelu
from jax.example_libraries import stax
import os
from scipy.integrate import solve_ivp
import timeit
from jax.example_libraries import optimizers
from absl import app
import jax
from jax import vjp
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from numpy.polynomial.legendre import leggauss
import itertools
import torch
from kymatio.numpy import Scattering2D
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def output_construction(s, X, Y,P=100,ds=1, dy=2, N=1000,Nx=100,Ny=100, Nt=2):
S_all = np.zeros((P,ds))
Y_all = np.zeros((P,ds))
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
Y_all = np.concatenate((X[x][range(P),y][:,None], Y[x][range(P),y][:,None]),axis=-1)
S_all[:,:] = s[x][range(P), y]
return S_all, Y_all
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def peuclid_distance(x,y,square=True):
XX = jnp.einsum('ik,ik->i',x,x)
YY = jnp.einsum('ik,ik->i',y,y)
XY = jnp.einsum('ik,jk->ij',x,y)
return XX[:,np.newaxis]+YY[np.newaxis,:] - 2*XY
def euclid_distance(x,y,square=True):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxuy, y, s, z, w,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxuy = inputsxuy
self.y = y
self.s = s
self.z = z
self.w = w
self.N = inputsxuy.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxuy[idx,:,:]
y = self.y[idx,:,:]
z = self.z[idx,:,:]
w = self.w[idx,:,:]
inputs = (inputsxu, y, z, w)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=4):
self.d_model = int(np.ceil(d_model/4)*2)
self.Y = Y
self.max_len = max_len
self.H = H
def forward(self, x):
pet = np.zeros((x.shape[0], self.max_len, self.H))
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
X = jnp.take(self.Y, 0, axis=2)[:,:,None]
Y = jnp.take(self.Y, 1, axis=2)[:,:,None]
positionX = jnp.tile(X,(1,1,self.H))
positionY = jnp.tile(Y,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionY[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionY[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
def scattering(sig, l=100, m=100, training_batch_size = 100):
scattering = Scattering2D(J=1, L=16, max_order=2, shape=(28, 28))
cwtmatr = np.zeros((training_batch_size, 3332, 1))
sig = np.array(sig)
for i in range(0,training_batch_size):
scatteringCoeffs = scattering(sig[i,:,:].reshape(28,28))
cwtmatr[i,:,:] = scatteringCoeffs.flatten()[:,None]
return cwtmatr
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def rel(self, x, y):
num_examples = x.shape[0]
diff_norms = jnp.linalg.norm(y.reshape(num_examples,-1) - x.reshape(num_examples,-1), self.p, 1)
y_norms = jnp.linalg.norm(y.reshape(num_examples,-1), self.p, 1)
return jnp.sum(diff_norms/y_norms)/100.
def __call__(self, x, y):
return self.rel(x, y)
class LOCA:
def __init__(self, q_layers, g_layers, v_layers , m=100, P=100, X=None, Y=None, Yt=None, H=30, batch_size=100, jac_det=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.q_init, self.q_apply = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, q_params = self.q_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(seed), self.in_shape)
beta = [1.]
gamma = [1.]
params = (beta,gamma,q_params, g_params, v_params)
self.jac_det = jac_det
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.P = P
self.L = 1
self.batchsize = batch_size
self.l2loss = LpLoss(size_average=False)
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
print("Model initialized")
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
def LOCA_net(self, params, inputs, ds=2):
beta, gamma, q_params, g_params, v_params = params
inputsxu, inputsy, inputsz, w = inputs
inputsy = self.q_apply(q_params,inputsy)
inputsz = self.q_apply(q_params,inputsz)
d = self.vdistance_function(inputsz, inputsz)
K = beta[0]*jnp.exp(-gamma[0]*d)
Kzz = jnp.sqrt(self.jac_det*jnp.matmul(K,w))
d = self.vdistance_function(inputsy, inputsz)
K = beta[0]*jnp.exp(-gamma[0]*d)
Kyz = jnp.sqrt(self.jac_det*jnp.matmul(K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params, inputsz)
g = self.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
v = self.v_apply(v_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
v = v.reshape(v.shape[0],int(v.shape[2]/ds),ds)
attn_vec = jnp.einsum("ijkl,ilk->ijk", g,v)
return attn_vec
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
# def train(self, train_dataset, test_dataset, nIter = 10000):
def train(self, train_dataset, nIter = 10000):
train_data = iter(train_dataset)
# test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
# test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
# loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
# errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Train error': errorTrain})
# 'Testing loss' : loss_test,
# 'Test error': errorTest,
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
beta, gamma,q_params, g_params, v_params = params
qlv, _ = self.ravel_pytree(q_params)
vlv, _ = self.ravel_pytree(v_params)
glv, _ = self.ravel_pytree(g_params)
print("The number of model parameters is:",qlv.shape[0]+vlv.shape[0]+glv.shape[0])
def predict_function(U_in, Y_in, P=128, m=100, P_test=1024,num_test=1000, Nx=30, Nt=100, Ny=32,model=None,dy=2, training_batch_size=100,params= None, L=100, mode="train", z=None, w=None,H=100):
print("Predicting the solution for the full resolution")
dx = 2
du = 2
dy = 2
ds = 2
if mode=="train":
predict = model.predict
if mode=="test":
predict= model.predictT
y = np.expand_dims(Y_in,axis=0)
y = np.tile(y,(num_test,1,1))
s_super_all = np.zeros((num_test,Nt*Nx*Ny,ds))
inputs_trainxu = np.zeros((num_test, 3332,du))
inputs_trainxu[:,:,0:1] = jnp.asarray(scattering(U_in[:,:,0:1], l=L, m=m, training_batch_size=num_test))
inputs_trainxu[:,:,1:2] = jnp.asarray(scattering(U_in[:,:,1:2], l=L, m=m, training_batch_size=num_test))
inputs_trainxu = jnp.array(inputs_trainxu)
for i in range(0,Nx*Ny*Nt,P):
s_super_loc = np.zeros((num_test, P,ds))
idx1 = i + np.arange(0,P)
Y_super = y[:,idx1,:]
pos_encodingy = PositionalEncodingY(Y_super,int(Y_super.shape[1]*Y_super.shape[2]), max_len = P, H=H)
y_trainT = jnp.tile(jnp.reshape(Y_super,(num_test,P,dy))[:,:,0:1],(1,1,L))
y_trainX = jnp.tile(jnp.reshape(Y_super,(num_test,P,dy))[:,:,1:2],(1,1,L))
y_trainY = jnp.tile(jnp.reshape(Y_super,(num_test,P,dy))[:,:,2:3],(1,1,L))
y_train = jnp.concatenate((y_trainT, y_trainX, y_trainY),axis=-1)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
for j in range(0, U_in.shape[0],training_batch_size):
idx = j + np.arange(0,training_batch_size,1)
s_super_loc[idx,:,:] = predict(params, (inputs_trainxu[idx,:,:], y_train[idx,:,:], z, w))
s_super_all[:,idx1,:] = s_super_loc
return s_super_all, y[:,:,0:1], y[:,:,1:2]
def error_full_resolution(s_super_all, s_all,tag='train', num_train=1000,P=128, Nx=30, Ny=30, Nt=10, idx=None, ds=2):
z = s_super_all.reshape(num_train,Nt*Nx*Ny,ds)
s = s_all.reshape(num_train,Nt*Nx*Ny,ds)
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
test_error_v = []
for i in range(0,num_train):
test_error_v.append(norm(s[i,:,1]- z[i,:,1], 2)/norm(s[i,:,1], 2))
print("The average "+tag+" v error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_v),np.std(test_error_v),np.min(test_error_v),np.max(test_error_v)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_u),np.mean(test_error_v), test_error_u, test_error_v
def minmax(a, n, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
TRAINING_ITERATIONS = 100000
P = 56
m = int(28*28)
N_hat = 1
num_train = 60000
num_test = 10000
training_batch_size = 100
dx = 2
du = 2
dy = 2
ds = 2
n_hat = 500
l = 100
Nx = 28
Ny = 28
L = 1
H = 10
idxT = [11]
Nt = len(idxT)
d = np.load("../Data/MMNIST_dataset_train.npz")
dispx_allsteps_train = d["dispx_allsteps_train"][:num_train,11,:,:,None]
dispy_allsteps_train = d["dispy_allsteps_train"][:num_train,11,:,:,None]
u_trainx = d["dispx_allsteps_train"][:num_train,7,:,:,None]
u_trainy = d["dispy_allsteps_train"][:num_train,7,:,:,None]
S_train = np.concatenate((dispx_allsteps_train,dispy_allsteps_train),axis=-1)
u_train = np.concatenate((u_trainx,u_trainy),axis=-1)
print("Dataset loaded")
polypoints = 20
lb = np.array([0.0, 0.0])
ub = np.array([1.0, 1.0])
# GLL nodes and weights in [-1,1]
z1, w1 = leggauss(polypoints)
z2, w2 = leggauss(polypoints)
# Rescale nodes to [lb,ub]
x1 = 0.5*(ub[0] - lb[0])*(z1 + 1.0) + lb[0]
x2 = 0.5*(ub[1] - lb[1])*(z2 + 1.0) + lb[1]
# Determinant of Jacobian of mapping [lb,ub]-->[-1,1]^2
jac_det = 0.5**2 * (ub[0]-lb[0]) * (ub[1]-lb[1])
Z_1, Z_2 = np.meshgrid(z1,z2,indexing="ij")
Z = np.concatenate((Z_1.flatten()[:,None], Z_2.flatten()[:,None]), axis=-1)
Z = np.tile(Z,(training_batch_size,1,1))
W = np.outer(w1, w2).flatten()[:,None]
W = np.tile(W,(training_batch_size,1,1))
polypoints = polypoints**dy
# in_noise_train = 0.15*np.random.normal(loc=0.0, scale=1.0, size=(u_train.shape))
# u_train = u_train + in_noise_train
X = np.zeros((Nx,Ny))
Y = np.zeros((Nx,Ny))
dx = 0.037037037037037035
for kk in range(0,Nx):
for jj in range(0,Ny):
X[kk,jj] = jj*dx #+ 0.5 # x is columns
Y[kk,jj] = kk*dx #+ 0.5 # y is rows
Y_train = np.concatenate((X.flatten()[:,None], Y.flatten()[:,None]),axis=-1)
Y_train_in = Y_train
Y_test = np.concatenate((X.flatten()[:,None], Y.flatten()[:,None]),axis=-1)
Y_test_in = Y_test
s_train = np.zeros((num_train*N_hat,P,ds))
y_train = np.zeros((num_train*N_hat,P,dy))
U_train = np.zeros((num_train*N_hat,m,du))
for j in range(0,N_hat):
for i in range(0,num_train):
s_train[i + j*num_train,:,:], y_train[i+ j*num_train,:,:] = output_construction(S_train[i,:,:,:], X, Y, P=P,Nt=Nt, Nx=Nx, Ny=Ny, ds=ds, dy=dy)
U_train[i+ j*num_train,:,:] = u_train[i,:,:,:].reshape(Nx*Ny,du)
num_train = num_train*N_hat
z = jnp.asarray(Z)
w = jnp.asarray(W)
del S_train, dispx_allsteps_train, dispy_allsteps_train, u_train, Z, W, u_trainx, u_trainy
U_train = jnp.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
z = jnp.reshape(z,(training_batch_size,polypoints,dy))
w = jnp.reshape(w,(training_batch_size,polypoints,1))
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
inputs_trainxu = np.zeros((num_train,3332,du))
inputs_trainxu[:,:,0:1] = jnp.asarray(scattering(U_train[:,:,0:1], l=L, m=m, training_batch_size=num_train))
inputs_trainxu[:,:,1:2] = jnp.asarray(scattering(U_train[:,:,1:2], l=L, m=m, training_batch_size=num_train))
inputs_trainxu = jnp.array(inputs_trainxu)
print("Dataset preprocessed")
train_dataset = DataGenerator(inputs_trainxu, jnp.asarray(y_train), jnp.asarray(s_train), z, w, training_batch_size)
train_dataset = iter(train_dataset)
encoder_layers2 = [L*dy+H*dy, 256, 256, l]
weights_layers = [3332*du, 256, 256, ds*n_hat]
g_layers = [l, 256, 256, ds*n_hat]
model = LOCA(encoder_layers2, g_layers, weights_layers, m=m, P=P, H=H, batch_size=training_batch_size, jac_det=jac_det)
del inputs_trainxu, y_train, U_train, Y_train_in, s_train
print("P is equal to %d"%(P))
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
del train_dataset
d = np.load("../Data/MMNIST_dataset_test.npz")
dispx_allsteps_test = d["dispx_allsteps_test"][:num_test,11,:,:,None]
dispy_allsteps_test = d["dispy_allsteps_test"][:num_test,11,:,:,None]
u_testx = d["dispx_allsteps_test"][:num_test,7,:,:,None]
u_testy = d["dispy_allsteps_test"][:num_test,7,:,:,None]
S_test = np.concatenate((dispx_allsteps_test,dispy_allsteps_test),axis=-1)
u_test = np.concatenate((u_testx,u_testy),axis=-1)
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,1,dy))
U_test = np.zeros((num_test,m,du))
for i in range(num_test):
a, b = output_construction(S_test[i,:,:,:], X,Y, P=P,Nt=Nt, Nx=Nx, Ny=Ny, ds=ds, dy=dy)
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:,:,:], X,Y, P=P,Nt=Nt, Nx=Nx, Ny=Ny, ds=ds, dy=dy)
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
y_train_posT = y_test
pos_encodingyt = PositionalEncodingY(y_train_posT,int(y_train_posT.shape[1]*y_train_posT.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
tag = "CN"
in_noise_test = 0.15*np.random.normal(loc=0.0, scale=1.0, size=(u_test.shape))
u_test = u_test + in_noise_test
U_test = np.zeros((num_test,m,du))
for i in range(num_test):
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
inputs_testxu = np.zeros((num_test,3332,du))
inputs_testxu[:,:,0:1] = jnp.asarray(scattering(U_test[:,:,0:1], l=L, m=m, training_batch_size=num_test))
inputs_testxu[:,:,1:2] = jnp.asarray(scattering(U_test[:,:,1:2], l=L, m=m, training_batch_size=num_test))
inputs_testxu = jnp.array(inputs_testxu)
Z = np.concatenate((Z_1.flatten()[:,None], Z_2.flatten()[:,None]), axis=-1)
Z = np.tile(Z,(training_batch_size,1,1))
W = np.outer(w1, w2).flatten()[:,None]
W = np.tile(W,(training_batch_size,1,1))
z = jnp.asarray(Z)
w = jnp.asarray(W)
z = jnp.reshape(z,(training_batch_size,polypoints,dy))
w = jnp.reshape(w,(training_batch_size,polypoints,1))
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
s_super_all_test = np.zeros((num_test, Nx*Ny*Nt, ds))
print("Predicting the solution for the full resolution")
s_super_all_test, X, Y = predict_function(U_test, Y_test_in, model=model, P=P, Nx=Nx, Ny=Ny, Nt=Nt, params=params, L=L,mode="test", num_test=num_test, training_batch_size=training_batch_size, H=H, z=z, w=w)
absolute_error_test, mean_test_error_u, mean_test_error_v, test_error_u, test_error_v = error_full_resolution(s_super_all_test,S_test,tag='test',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_test)
s_super_all_test = np.asarray(s_super_all_test)
S_test = np.asarray(S_test)
absolute_error_test, mean_test_error_u, mean_test_error_v, test_error_u, test_error_v = error_full_resolution(s_super_all_test,S_test,tag='test',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_test)
| 22,479 | 36.845118 | 238 | py |
null | LOCA-main/PushForward/DeepONet/DeepONet_Pushforward.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy.polynomial import polyutils
from jax.experimental.stax import Dense, Gelu
from jax.experimental import stax
import os
from scipy.integrate import solve_ivp
import timeit
from jax.experimental import optimizers
from absl import app
import jax
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, vjp
from functools import partial
from torch.utils import data
from tqdm import trange
import itertools
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmax(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
class PositionalEncodingU:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(seed), self.in_shape)
params = (trunk_params, branch_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-1):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=1):
trunk_params, branch_params = params
u, y = inputs
print(u.shape, y.shape)
t = self.trunk_apply(trunk_params, y).reshape(y.shape[0], y.shape[1], ds, int(100/ds))
b = self.branch_apply(branch_params, u.reshape(u.shape[0],1,u.shape[1]*u.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((y.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
TRAINING_ITERATIONS = 50000
P = 300
m = 300
num_train = 1000
num_test = 1000
training_batch_size = 100
du = 1
dy = 1
ds = 1
n_hat = 100
Nx = P
index = 9
length_scale = 0.9
H_y = 10
H_u = 10
d = np.load("../Data/train_pushforward.npz")
U_train = d["U_train"]
x_train = d["x_train"]
y_train = d["y_train"]
s_train = d["s_train"]
d = np.load("../Data/test_pushforward.npz")
U_test = d["U_test"]
x_test = d["x_test"]
y_test = d["y_test"]
s_test = d["s_test"]
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_train = jnp.asarray(U_train)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
U_test = jnp.asarray(U_test)
U_train = jnp.reshape(U_train,(num_test,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
plot=False
if plot:
import matplotlib.pyplot as plt
pltN = 10
for i in range(0,pltN-1):
plt.plot(y_train[i,:,0], s_train[i,:,0], 'r-')
plt.plot(y_test[i,:,0], s_test[i,:,0], 'b-')
plt.plot(y_train[pltN,:,0], s_train[pltN,:,0], 'r-', label="Training output")
plt.plot(y_test[pltN,:,0], s_test[pltN,:,0], 'b-', label="Testing output")
plt.legend()
plt.show()
x = jnp.linspace(0,1,num=m)
pltN = 10
for i in range(0,pltN-1):
plt.plot(x, np.asarray(U_train)[i,:,0], 'y-')
plt.plot(x, np.asarray(U_test)[i,:,0], 'g-')
plt.plot(x, np.asarray(U_train)[pltN,:,0], 'y-', label="Training input")
plt.plot(x, np.asarray(U_test)[pltN,:,0], 'g-', label="Testing input")
plt.legend()
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
s_train_mean = 0.#jnp.mean(s_train,axis=0)
s_train_std = 1.#jnp.std(s_train,axis=0) + 1e-03
s_train = (s_train - s_train_mean)/s_train_std
# Perform the scattering transform for the inputs yh
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
branch_layers = [m*(du*H_u+du), 512, 512, ds*n_hat]
trunk_layers = [H_y*dy + dy, 512, 512, ds*n_hat]
# branch_layers = [m*du, 512, 512, ds*n_hat]
# trunk_layers = [dy, 512, 512, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predictT(params, (U_test, y_test))
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s_test[i,:,0]- uCNN_test[i,:,0],2)/norm(s_test[i,:,0],2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (U_train, y_train))
train_error_u = []
for i in range(0,num_test):
train_error_u.append(norm(s_train[i,:,0]- uCNN_train[i,:,0],2)/norm(s_train[i,:,0],2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
trunk_params, branch_params = params
t = model.trunk_apply(trunk_params, y_test).reshape(y_test.shape[0], y_test.shape[1], ds, int(n_hat/ds))
def minmax(a):
minpos = a.index(min(a))
print("The minimum is at position", minpos)
return minpos
minpos = minmax(train_error_u)
np.savez_compressed("eigenfunctions_DON3.npz", efuncs=t[minpos,:,0,:])
| 13,445 | 34.384211 | 204 | py |
null | LOCA-main/PushForward/LOCA/LOCAPushforward.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jax
import jax.numpy as jnp
from jax.example_libraries.stax import Dense, Gelu
from jax.example_libraries import stax
from jax.example_libraries import optimizers
import os
import timeit
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, vjp
from functools import partial
from torch.utils import data
from tqdm import trange
import itertools
from kymatio.numpy import Scattering1D
from jax.flatten_util import ravel_pytree
from numpy.polynomial.legendre import leggauss
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def euclid_distance(x,y):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxuy, inputsxu, y, s, z, w,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxuy = inputsxuy
self.inputsxu = inputsxu
self.y = y
self.s = s
self.z = z
self.w = w
self.N = inputsxu.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxu[idx,:,:]
y = self.y[idx,:,:]
z = self.z[idx,:,:]
w = self.w[idx,:,:]
inputs = (inputsxu, y, z, w)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
def scatteringTransform(sig, l=100, m=100, training_batch_size = 100):
J = 1
Q = 8
T = sig.shape[1]
scattering = Scattering1D(J, T, Q)
sig = np.asarray(sig)
sctcoef = np.zeros((training_batch_size, 1200, 1))
for i in range(0,training_batch_size):
sctcoef[i,:,:] = scattering(sig[i,:,0]).flatten()[:,None]
return sctcoef
class LpLoss(object):
def __init__(self, d=2, p=2):
super(LpLoss, self).__init__()
self.d = d
self.p = p
def rel(self, y, x):
num_examples = x.shape[0]
diff_norms = jnp.linalg.norm(y.reshape(num_examples,-1) - x.reshape(num_examples,-1), self.p, 1)
y_norms = jnp.linalg.norm(y.reshape(num_examples,-1), self.p, 1)
return jnp.mean(diff_norms/y_norms)
def __call__(self, y, x):
return self.rel(y, x)
class LOCA:
def __init__(self, q_layers, g_layers, v_layers , m=100, P=100, jac_det=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.q_init, self.q_apply = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, q_params = self.q_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(seed), self.in_shape)
# RBF kernel parameters
beta = [1.]
gamma = [1.]
# Model parameters
params = (beta, gamma,q_params, g_params, v_params)
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
self.itercount = itertools.count()
self.loss_log = []
self.l2loss = LpLoss()
self.jac_det = jac_det
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def RBF(self, X, Y, gamma, beta):
d = self.vdistance_function(X, Y)
return beta[0]*jnp.exp(-gamma[0]*d)
@partial(jax.jit, static_argnums=0)
def Matern_32(self, X, Y, gamma=[0.5], beta=[0.5]):
d = self.vdistance_function(X, Y)
return (1 + (jnp.sqrt(3)*gamma[0])*d)*beta[0]*jnp.exp(-(jnp.sqrt(3)*gamma[0])*d)
@partial(jax.jit, static_argnums=0)
def Matern_52(self, X, Y, gamma=[0.5], beta=[0.5]):
d = self.vdistance_function(X, Y)
return (1 + (jnp.sqrt(5)*gamma[0])*d + (5/3*gamma[0]**2)*d**2)*beta[0]*jnp.exp(-(jnp.sqrt(5)*gamma[0])*d)
@partial(jax.jit, static_argnums=0)
def periodic(self, X, Y, gamma=[0.5], beta=[0.5], p=0.7):
d = self.vdistance_function(X, Y)
return beta[0]*jnp.exp(-2.0*jnp.sin(jnp.pi*d/p)**2*gamma[0])
@partial(jax.jit, static_argnums=0)
def RQK(self, X, Y, gamma, beta):
d = self.vdistance_function(X, Y)
return beta[0]*(1 + (1./(3.*0.1*2))*gamma[0]*d)**(gamma[0])
@partial(jax.jit, static_argnums=0)
def local_periodic(self, X, Y, gamma, beta):
return self.periodic(X, Y, gamma, beta)*self.RBF(X, Y, gamma, beta)
@partial(jax.jit, static_argnums=0)
def LOCA_net(self, params, inputs, ds=1):
beta, gamma, q_params, g_params, v_params = params
u, y, z, w = inputs
y = self.q_apply(q_params,y)
z = self.q_apply(q_params,z)
K = self.periodic(z, z, gamma, beta)
Kzz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
K = self.periodic(y, z, gamma, beta)
Kyz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params,z)
g = self.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
v = self.v_apply(v_params, u.reshape(u.shape[0],1,u.shape[1]*u.shape[2]))
v = v.reshape(v.shape[0],int(v.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", g,v)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.loss(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2error(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def count_params(self, params):
params_flat, _ = ravel_pytree(params)
print("The number of model parameters is:",params_flat.shape[0])
TRAINING_ITERATIONS = 50000
P = 300
m = 300
L = 1
T = 1
N_hat = 1
num_train = 1000
num_test = 1000
training_batch_size = 100
du = 1
dy = 1
ds = 1
n_hat = 100
l = 100
Nx = P
H = 20
# Number of GLL quadrature points, coordinates and weights
polypoints = 20
z, w = leggauss(polypoints)
lb = np.array([0.0])
ub = np.array([1.0])
# Map [-1,1] -> [0,1]
z = 0.5*(ub - lb)*(z + 1.0) + lb
jac_det = 0.5*(ub-lb)
# Reshape both weights and coordinates. We need them to have shape: (num_train, N, dy)
z = np.tile(np.expand_dims(z,0),(num_train,1))[:,:,None]
w = np.tile(np.expand_dims(w,0),(num_train,1))[:,:,None]
# Create the dataset
d = np.load("../Data/train_pushforward.npz")
U_train = d["U_train"]
x_train = d["x_train"]
y_train = d["y_train"]
s_train = d["s_train"]
d = np.load("../Data/test_pushforward.npz")
U_test = d["U_test"]
x_test = d["x_test"]
y_test = d["y_test"]
s_test = d["s_test"]
# Make all array to be jax numpy format
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
z = jnp.asarray(z)
w = jnp.asarray(w)
U_train = np.reshape(U_train,(num_test,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
z = jnp.reshape(z,(num_test,polypoints,dy))
w = jnp.reshape(w,(num_test,polypoints,dy))
plot=False
if plot:
import matplotlib.pyplot as plt
pltN = 10
for i in range(0,pltN-1):
plt.plot(y_train[i,:,0], s_train[i,:,0], 'r-')
plt.plot(y_test[i,:,0], s_test[i,:,0], 'b-')
plt.plot(y_train[pltN,:,0], s_train[pltN,:,0], 'r-', label="Training output")
plt.plot(y_test[pltN,:,0], s_test[pltN,:,0], 'b-', label="Testing output")
plt.legend()
plt.show()
x = jnp.linspace(0,1,num=m)
pltN = 10
for i in range(0,pltN-1):
plt.plot(x, np.asarray(U_train)[i,:,0], 'y-')
plt.plot(x, np.asarray(U_test)[i,:,0], 'g-')
plt.plot(x, np.asarray(U_train)[pltN,:,0], 'y-', label="Training input")
plt.plot(x, np.asarray(U_test)[pltN,:,0], 'g-', label="Testing input")
plt.legend()
plt.show()
y_train_pos = y_train
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
start_time = timeit.default_timer()
inputs_trainxu = jnp.asarray(scatteringTransform(U_train, l=l, m=m, training_batch_size=num_train))
inputs_testxu = jnp.asarray(scatteringTransform(U_test , l=l, m=m, training_batch_size=num_test))
# inputs_trainxu = jnp.asarray(U_train)
# inputs_testxu = jnp.asarray(U_test )
elapsed = timeit.default_timer() - start_time
print("The wall-clock time for for loop is seconds is equal to %f seconds"%elapsed)
print(inputs_trainxu.shape, inputs_testxu.shape)
train_dataset = DataGenerator(inputs_trainxu, inputs_trainxu, y_train, s_train, z, w, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, inputs_testxu, y_test, s_test, z, w, training_batch_size)
test_dataset = iter(test_dataset)
q_layers = [L*dy+H*dy, 100, 100, l]
v_layers = [1200*du, 1024, ds*n_hat]
g_layers = [l, 100, 100, ds*n_hat]
model = LOCA(q_layers, g_layers, v_layers, m=m, P=P, jac_det=jac_det)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predict(params, (inputs_testxu,y_test, z, w))
test_error_u = []
for i in range(0,s_test.shape[0]):
test_error_u.append(jnp.linalg.norm(s_test[i,:,-1] - uCNN_test[i,:,-1], 2)/jnp.linalg.norm(s_test[i,:,-1], 2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (inputs_trainxu, y_train, z, w))
train_error_u = []
for i in range(0,s_test.shape[0]):
train_error_u.append(jnp.linalg.norm(s_train[i,:,-1] - uCNN_train[i,:,-1], 2)/jnp.linalg.norm(s_train[i,:,-1], 2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
beta, gamma, q_params, g_params, v_params = params
y = y_test
y = model.q_apply(q_params,y)
z = model.q_apply(q_params,z)
K = model.periodic(z, z, gamma, beta)
Kzz = jnp.sqrt(model.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
K = model.periodic(y, z, gamma, beta)
Kyz = jnp.sqrt(model.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = model.g_apply(g_params,z)
g = model.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
def minmax(a):
minpos = a.index(min(a))
print("The minimum is at position", minpos)
return minpos
minpos = minmax(train_error_u)
np.savez_compressed("eigenfunctions_KCAlocalper.npz", efuncs=g[minpos,:,0,:])
| 15,898 | 33.713974 | 204 | py |
null | LOCA-main/PushForward/LOCA/LOCA_closetoDON.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jax
import jax.numpy as jnp
from jax.example_libraries.stax import Dense, Gelu
from jax.example_libraries import stax
from jax.example_libraries import optimizers
import os
import timeit
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, vjp
from functools import partial
from torch.utils import data
from tqdm import trange
import itertools
from kymatio.numpy import Scattering1D
from jax.flatten_util import ravel_pytree
from numpy.polynomial.legendre import leggauss
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']="False"
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def euclid_distance(x,y):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, inputsxuy, inputsxu, y, s, z, w,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.inputsxuy = inputsxuy
self.inputsxu = inputsxu
self.y = y
self.s = s
self.z = z
self.w = w
self.N = inputsxu.shape[0]
self.batch_size = batch_size
self.key = rng_key
# @partial(jit, static_argnums=(0,))
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.inputsxu[idx,:,:]
y = self.y[idx,:,:]
z = self.z[idx,:,:]
w = self.w[idx,:,:]
inputs = (inputsxu, y, z, w)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100,H=20):
self.d_model = d_model
self.Y = Y
self.max_len = max_len
self.H = H
@partial(jit, static_argnums=(0,))
def forward(self, x):
self.pe = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.asarray(self.Y[:,:,0:1])
position = jnp.tile(T,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,0::2], jnp.cos(position[:,:,0::2] * div_term))
self.pe = jax.ops.index_update(self.pe, jax.ops.index[:,:,1::2], jnp.sin(position[:,:,1::2] * div_term))
x = jnp.concatenate([x, self.pe],axis=-1)
return x
def scatteringTransform(sig, l=100, m=100, training_batch_size = 100):
J = 1
Q = 8
T = sig.shape[1]
scattering = Scattering1D(J, T, Q)
sig = np.asarray(sig)
sctcoef = np.zeros((training_batch_size, 1200, 1))
for i in range(0,training_batch_size):
sctcoef[i,:,:] = scattering(sig[i,:,0]).flatten()[:,None]
return sctcoef
class LpLoss(object):
def __init__(self, d=2, p=2):
super(LpLoss, self).__init__()
self.d = d
self.p = p
def rel(self, y, x):
num_examples = x.shape[0]
diff_norms = jnp.linalg.norm(y.reshape(num_examples,-1) - x.reshape(num_examples,-1), self.p, 1)
y_norms = jnp.linalg.norm(y.reshape(num_examples,-1), self.p, 1)
return jnp.mean(diff_norms/y_norms)
def __call__(self, y, x):
return self.rel(y, x)
class LOCA:
def __init__(self, g_layers, v_layers , m=100, P=100, jac_det=None):
# Network initialization and evaluation functions
seed = np.random.randint(10000)
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(seed), self.in_shape)
seed = np.random.randint(10000)
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(seed), self.in_shape)
# RBF kernel parameters
beta = [1.]
gamma = [1.]
# Model parameters
params = (beta, gamma, g_params, v_params)
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
self.itercount = itertools.count()
self.loss_log = []
self.l2loss = LpLoss()
self.jac_det = jac_det
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def RBF(self, X, Y, gamma, beta):
d = self.vdistance_function(X, Y)
return beta[0]*jnp.exp(-gamma[0]*d)
@partial(jax.jit, static_argnums=0)
def Matern_32(self, X, Y, gamma=[0.5], beta=[0.5]):
d = self.vdistance_function(X, Y)
return (1 + (jnp.sqrt(3)*gamma[0])*d)*beta[0]*jnp.exp(-(jnp.sqrt(3)*gamma[0])*d)
@partial(jax.jit, static_argnums=0)
def Matern_52(self, X, Y, gamma=[0.5], beta=[0.5]):
d = self.vdistance_function(X, Y)
return (1 + (jnp.sqrt(5)*gamma[0])*d + (5/3*gamma[0]**2)*d**2)*beta[0]*jnp.exp(-(jnp.sqrt(5)*gamma[0])*d)
@partial(jax.jit, static_argnums=0)
def periodic(self, X, Y, gamma=[0.5], beta=[0.5], p=0.7):
d = self.vdistance_function(X, Y)
return beta[0]*jnp.exp(-2.0*jnp.sin(jnp.pi*d/p)**2*gamma[0])
@partial(jax.jit, static_argnums=0)
def RQK(self, X, Y, gamma, beta):
d = self.vdistance_function(X, Y)
return beta[0]*(1 + (1./(3.*0.1*2))*gamma[0]*d)**(gamma[0])
@partial(jax.jit, static_argnums=0)
def local_periodic(self, X, Y, gamma, beta):
return self.periodic(X, Y, gamma, beta)*self.RBF(X, Y, gamma, beta)
@partial(jax.jit, static_argnums=0)
def LOCA_net(self, params, inputs, ds=1):
beta, gamma, g_params, v_params = params
u, y, z, w = inputs
K = self.RBF(z, z, gamma, beta)
Kzz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
K = self.RBF(y, z, gamma, beta)
Kyz = jnp.sqrt(self.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params,z)
g = self.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
v = self.v_apply(v_params, u.reshape(u.shape[0],1,u.shape[1]*u.shape[2]))
v = v.reshape(v.shape[0],int(v.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", g,v)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs.flatten() - y_pred.flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
return norm(outputs.flatten() - y_pred.flatten(), 2)/norm(outputs.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.loss(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2error(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def count_params(self, params):
params_flat, _ = ravel_pytree(params)
print("The number of model parameters is:",params_flat.shape[0])
TRAINING_ITERATIONS = 50000
P = 300
m = 300
L = 1
T = 1
N_hat = 1
num_train = 1000
num_test = 1000
training_batch_size = 100
du = 1
dy = 1
ds = 1
n_hat = 100
l = 100
Nx = P
H = 20
# Number of GLL quadrature points, coordinates and weights
polypoints = 20
z, w = leggauss(polypoints)
lb = np.array([0.0])
ub = np.array([1.0])
# Map [-1,1] -> [0,1]
z = 0.5*(ub - lb)*(z + 1.0) + lb
jac_det = 0.5*(ub-lb)
# Reshape both weights and coordinates. We need them to have shape: (num_train, N, dy)
z = np.tile(np.expand_dims(z,0),(num_train,1))[:,:,None]
w = np.tile(np.expand_dims(w,0),(num_train,1))[:,:,None]
# Create the dataset
d = np.load("../Data/train_pushforward.npz")
U_train = d["U_train"]
x_train = d["x_train"]
y_train = d["y_train"]
s_train = d["s_train"]
d = np.load("../Data/test_pushforward.npz")
U_test = d["U_test"]
x_test = d["x_test"]
y_test = d["y_test"]
s_test = d["s_test"]
# Make all array to be jax numpy format
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
z = jnp.asarray(z)
w = jnp.asarray(w)
U_train = np.reshape(U_train,(num_test,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
z = jnp.reshape(z,(num_test,polypoints,dy))
w = jnp.reshape(w,(num_test,polypoints,dy))
plot=False
if plot:
import matplotlib.pyplot as plt
pltN = 10
for i in range(0,pltN-1):
plt.plot(y_train[i,:,0], s_train[i,:,0], 'r-')
plt.plot(y_test[i,:,0], s_test[i,:,0], 'b-')
plt.plot(y_train[pltN,:,0], s_train[pltN,:,0], 'r-', label="Training output")
plt.plot(y_test[pltN,:,0], s_test[pltN,:,0], 'b-', label="Testing output")
plt.legend()
plt.show()
x = jnp.linspace(0,1,num=m)
pltN = 10
for i in range(0,pltN-1):
plt.plot(x, np.asarray(U_train)[i,:,0], 'y-')
plt.plot(x, np.asarray(U_test)[i,:,0], 'g-')
plt.plot(x, np.asarray(U_train)[pltN,:,0], 'y-', label="Training input")
plt.plot(x, np.asarray(U_test)[pltN,:,0], 'g-', label="Testing input")
plt.legend()
plt.show()
y_train_pos = y_train
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingy = PositionalEncodingY(z,int(z.shape[1]*z.shape[2]), max_len = polypoints, H=H)
z = pos_encodingy.forward(z)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
start_time = timeit.default_timer()
inputs_trainxu = jnp.asarray(U_train)
inputs_testxu = jnp.asarray(U_test )
elapsed = timeit.default_timer() - start_time
print("The wall-clock time for for loop is seconds is equal to %f seconds"%elapsed)
print(inputs_trainxu.shape, inputs_testxu.shape)
train_dataset = DataGenerator(inputs_trainxu, inputs_trainxu, y_train, s_train, z, w, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, inputs_testxu, y_test, s_test, z, w, training_batch_size)
test_dataset = iter(test_dataset)
v_layers = [m*du, 512, 512, ds*n_hat]
g_layers = [L*dy+H*dy, 512, 512, ds*n_hat]
model = LOCA(g_layers, v_layers, m=m, P=P, jac_det=jac_det)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predict(params, (inputs_testxu,y_test, z, w))
test_error_u = []
for i in range(0,s_test.shape[0]):
test_error_u.append(jnp.linalg.norm(s_test[i,:,-1] - uCNN_test[i,:,-1], 2)/jnp.linalg.norm(s_test[i,:,-1], 2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (inputs_trainxu, y_train, z, w))
train_error_u = []
for i in range(0,s_test.shape[0]):
train_error_u.append(jnp.linalg.norm(s_train[i,:,-1] - uCNN_train[i,:,-1], 2)/jnp.linalg.norm(s_train[i,:,-1], 2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
beta, gamma, g_params, v_params = params
y = y_test
K = model.RBF(z, z, gamma, beta)
Kzz = jnp.sqrt(model.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
K = model.RBF(y, z, gamma, beta)
Kyz = jnp.sqrt(model.jac_det*jnp.einsum("ijk,ikl->ijl",K,w))
mean_K = jnp.matmul(Kyz, jnp.swapaxes(Kzz,1,2))
K = jnp.divide(K,mean_K)
g = model.g_apply(g_params,z)
g = model.jac_det*jnp.einsum("ijk,iklm,ik->ijlm",K,g.reshape(g.shape[0],g.shape[1], ds, int(g.shape[-1]/ds)),w[:,:,-1])
g = jax.nn.softmax(g, axis=-1)
def minmax(a):
minpos = a.index(min(a))
print("The minimum is at position", minpos)
return minpos
minpos = minmax(train_error_u)
np.savez_compressed("eigenfunctions_KCAlocalper_closetoDON2.npz", efuncs=g[minpos,:,0,:])
| 15,231 | 33.076063 | 204 | py |
null | LOCA-main/ShallowWaters/DeepONet/DeepOnet_SW.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.experimental.stax import Dense, Gelu, Relu
from jax.experimental import stax
import os
import timeit
from jax.experimental import optimizers
from absl import app
import jax
from jax import vjp
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, pmap
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import log, sqrt, sin, cos
import itertools
import torch
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
def output_construction(Ux,t_his,cx, cy, ng,P=1000, num_train=1000, ds=3, Nx=30, Ny=30, Nt=100):
U_all = np.zeros((P,ds))
Y_all = np.zeros((P,ds))
it = np.random.randint(Nt, size=P)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
T, X, Y = np.meshgrid(t_his,cx,cy,indexing="ij")
Y_all[:,:] = np.concatenate((T[it,x][range(P),y][:,None], X[it,x][range(P),y][:,None], Y[it,x][range(P),y][:,None]),axis=-1)
U_all[:,:] = Ux[it,x][range(P),y]
return U_all, Y_all
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
u = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (u, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=4):
self.d_model = int(np.ceil(d_model/6)*2)
self.Y = Y
self.max_len = max_len
self.H = H
def forward(self, x):
pet = np.zeros((x.shape[0], self.max_len, self.H))
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
Y = jnp.take(self.Y, 2, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
positionY = jnp.tile(Y,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pet = jax.ops.index_update(pet, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pet = jax.ops.index_update(pet, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionY[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionY[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pet,pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class PositionalEncodingU:
def __init__(self, Y, d_model, max_len = 100, H=4):
self.d_model = int(np.ceil(d_model/6)*2)
self.Y = Y
self.max_len = max_len
self.H = H
def forward(self, x):
pet = np.zeros((x.shape[0], self.max_len, self.H))
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
Y = jnp.take(self.Y, 2, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
positionY = jnp.tile(Y,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pet = jax.ops.index_update(pet, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pet = jax.ops.index_update(pet, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionY[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionY[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pet,pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
class DON:
def __init__(self,branch_layers, trunk_layers , m=100, P=100, mn=None, std=None):
# Network initialization and evaluation functions
self.branch_init, self.branch_apply = self.init_NN(branch_layers, activation=Gelu)
self.in_shape = (-1, branch_layers[0])
self.out_shape, branch_params = self.branch_init(random.PRNGKey(10000), self.in_shape)
self.trunk_init, self.trunk_apply = self.init_NN(trunk_layers, activation=Gelu)
self.in_shape = (-1, trunk_layers[0])
self.out_shape, trunk_params = self.trunk_init(random.PRNGKey(10000), self.in_shape)
params = (trunk_params, branch_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.mean = mn
self.std = std
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
@partial(jax.jit, static_argnums=0)
def DON(self, params, inputs, ds=3):
trunk_params, branch_params = params
inputsxu, inputsy = inputs
t = self.trunk_apply(trunk_params, inputsy).reshape(inputsy.shape[0], inputsy.shape[1], ds, int(12/ds))
b = self.branch_apply(branch_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
b = b.reshape(b.shape[0],int(b.shape[2]/ds),ds)
Guy = jnp.einsum("ijkl,ilk->ijk", t,b)
return Guy
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
outputs = outputs*self.std + self.mean
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs[:,:,0].flatten() - y_pred[:,:,0].flatten())**2) + 100.*np.mean((outputs[:,:,1].flatten() - y_pred[:,:,1].flatten())**2) + 100.*np.mean((outputs[:,:,2].flatten() - y_pred[:,:,2].flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
loss = np.mean((outputs[:,:,0].flatten() - y_pred[:,:,0].flatten())**2) + 100.*np.mean((outputs[:,:,1].flatten() - y_pred[:,:,1].flatten())**2) + 100.*np.mean((outputs[:,:,2].flatten() - y_pred[:,:,2].flatten())**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.DON(params,inputs)
y = y*self.std + self.mean
y_pred = y_pred*self.std + self.mean
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.DON(params,inputs)
return s_pred*self.std + self.mean
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
trunk_params, branch_params = params
blv, _ = self.ravel_pytree(branch_params)
tlv, _ = self.ravel_pytree(trunk_params)
print("The number of model parameters is:",blv.shape[0]+tlv.shape[0])
def predict_function(U_in,y, model=None,params= None, H=None):
inputs_trainxu = jnp.asarray(U_in)
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,1:2], y[:,:,0:1]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000,P=128, Nx=30, Ny=30, Nt=10, idx=None, ds=3):
print(s_all.shape)
z = uCNN_super_all.reshape(num_train,Nx*Ny*Nt,ds)
s = s_all.reshape(num_train,Nx*Ny*Nt,ds)
test_error_rho = []
for i in range(0,num_train):
test_error_rho.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" rho error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_rho),np.std(test_error_rho),np.min(test_error_rho),np.max(test_error_rho)))
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,1]- z[i,:,1], 2)/norm(s[i,:,1], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
test_error_v = []
for i in range(0,num_train):
test_error_v.append(norm(s[i,:,2]- z[i,:,2], 2)/norm(s[i,:,2], 2))
print("The average "+tag+" v error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_v),np.std(test_error_v),np.min(test_error_v),np.max(test_error_v)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_rho), np.mean(test_error_u),np.mean(test_error_v), (test_error_rho, test_error_u, test_error_v)
# if __name__ == "__main__":
TRAINING_ITERATIONS = 100000
P = 128
m = 1024
num_train = 1000
num_test = 1000
training_batch_size = 100
dx = 3
du = 3
dy = 3
ds = 3
n_hat = 4
l = 100
Nx = 32
Ny = 32
Nt = 5
Ng = 0
H_y = 2
H_u = 2
idxT = [10,15,20,25,30]
d = np.load("/scratch/gkissas/all_train_SW_Nx%d_Ny%d_numtrain%d.npz"%(Nx,Ny,1000))
u_train = d["U_train"][:,:,:,:]
S_train = d["s_train"][:,idxT,:,:,:]
T = d["T_train"][idxT]
CX = d["X_train"]
CY = d["Y_train"]
d = np.load("/scratch/gkissas/all_test_SW_Nx%d_Ny%d_numtest%d.npz"%(Nx,Ny,1000))
u_test = d["U_test"][:,:,:,:]
S_test = d["s_test"][:,idxT,:,:,:]
T = d["T_test"][idxT]
CX = d["X_test"]
CY = d["Y_test"]
s_all_test = S_test
s_all_train = S_train
s_train = np.zeros((num_train,P,ds))
y_train = np.zeros((num_train,P,dy))
U_train = np.zeros((num_train,m,du))
X_train = np.zeros((num_train,m,dx))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
X_test = np.zeros((num_test,m,dx))
for i in range(0,num_train):
s_train[i ,:,:], y_train[i,:,:] = output_construction(S_train[i,:,:,:,:], T, CX, CY, Ng,P=P,Nt=Nt)
U_train[i,:,:] = u_train[i,:,:,:].reshape(Nx*Ny,du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:,:,:,:], T, CX, CY, Ng,P=P,Nt=Nt)
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
U_train = jnp.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
U_test = jnp.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
X_train = jnp.reshape(X_train,(num_train,m,dx))
U_train = jnp.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
X_test = jnp.reshape(X_test,(num_test,m,dx))
U_test = jnp.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
s_train_mean = jnp.mean(s_train,axis=0)
s_train_std = jnp.std(s_train,axis=0)
s_train = (s_train - s_train_mean)/s_train_std
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H_y)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P, H=H_y)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
pos_encodingy = PositionalEncodingU(U_train,int(U_train.shape[1]*U_train.shape[2]), max_len = m, H=H_u)
U_train = pos_encodingy.forward(U_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingU(U_test,int(U_test.shape[1]*U_test.shape[2]), max_len = m, H=H_u)
U_test = pos_encodingyt.forward(U_test)
del pos_encodingyt
train_dataset = DataGenerator(U_train, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(U_test, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
branch_layers = [m*(du*H_u+du), 100, 100, 100, ds*n_hat]
trunk_layers = [H_y*dy + dy , 100, 100, 100, ds*n_hat]
model = DON(branch_layers, trunk_layers, m=m, P=P, mn=s_train_mean, std=s_train_std)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, test_dataset, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
uCNN_test = model.predict(params, (U_test, y_test))
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s_test[i,:,0]- uCNN_test[i,:,0],2)/norm(s_test[i,:,0],2))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
uCNN_train = model.predict(params, (U_train, y_train))
train_error_u = []
for i in range(0,num_test):
train_error_u.append(norm(s_train[i,:,0]- uCNN_train[i,:,0],2)/norm(s_train[i,:,0],2))
print("The average train u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(train_error_u),np.std(train_error_u),np.min(train_error_u),np.max(train_error_u)))
TT, XX, YY = np.meshgrid(T, CX, CY, indexing="ij")
TT = np.expand_dims(TT,axis=0)
XX = np.expand_dims(XX,axis=0)
YY = np.expand_dims(YY,axis=0)
TT = np.tile(TT,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
XX = np.tile(XX,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
YY = np.tile(YY,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
Y_test_in = np.concatenate((TT, XX, YY),axis=-1)
Y_train_in = np.concatenate((TT, XX, YY),axis=-1)
pos_encodingy = PositionalEncodingY(Y_train_in,int(Y_train_in.shape[1]*Y_train_in.shape[2]), max_len = Y_train_in.shape[1], H=H_y)
Y_train_in = pos_encodingy.forward(Y_train_in)
del pos_encodingy
pos_encodingy = PositionalEncodingY(Y_test_in,int(Y_test_in.shape[1]*Y_test_in.shape[2]), max_len = Y_test_in.shape[1], H=H_y)
Y_test_in = pos_encodingy.forward(Y_test_in)
del pos_encodingy
print("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(s_all_test).reshape(num_test, Nx*Ny*Nt, ds)
for i in range(0, Nx*Ny*Nt, P):
idx = i + np.arange(0,P)
uCNN_super_all_test[:,idx,:], _, _ = predict_function(U_test , Y_test_in[:,idx,:], model=model, params=params, H=H_y)
uCNN_super_all_train = np.zeros_like(s_all_train).reshape(num_train, Nx*Ny*Nt, ds)
for i in range(0, Nx*Ny*Nt, P):
idx = i + np.arange(0,P)
uCNN_super_all_train[:,idx,:], _, _ = predict_function(U_train , Y_train_in[:,idx,:], model=model, params=params, H=H_y)
absolute_error_train, mean_train_error_rho, mean_train_error_u, mean_train_error_v, train_error = error_full_resolution(uCNN_super_all_train,s_all_train,tag='train',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_train)
absolute_error_test, mean_test_error_rho, mean_test_error_u, mean_test_error_v, test_error = error_full_resolution(uCNN_super_all_test,s_all_test,tag='test',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_test)
| 19,267 | 40.083156 | 248 | py |
null | LOCA-main/ShallowWaters/FNO/Adam.py | import math
import torch
from torch import Tensor
from typing import List, Optional
from torch.optim.optimizer import Optimizer
import os
os.environ['CUDA_VISIBLE_DEVICES']="3"
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss | 6,612 | 39.078788 | 120 | py |
null | LOCA-main/ShallowWaters/FNO/FNOSW.py | """
@author: Zongyi Li
This file is the Fourier Neural Operator for 3D problem such as the Navier-Stokes equation discussed in Section 5.3 in the [paper](https://arxiv.org/pdf/2010.08895.pdf),
which takes the 2D spatial + 1D temporal equation directly as a 3D problem
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from utilities3 import *
from timeit import default_timer
import timeit
################################################################
# 3d fourier layers
################################################################
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super(SpectralConv3d, self).__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.modes3 = modes3
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat))
self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat))
self.weights3 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat))
self.weights4 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat))
# Complex multiplication
def compl_mul3d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3,-2,-1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-3), x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, :self.modes2, :self.modes3], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, :self.modes2, :self.modes3], self.weights2)
out_ft[:, :, :self.modes1, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, -self.modes2:, :self.modes3], self.weights3)
out_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, -self.modes2:,:self.modes3], self.weights4)
#Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
class FNO3d(nn.Module):
def __init__(self, modes1, modes2, modes3, width):
super(FNO3d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t). It's a constant function in time, except for the last index.
input shape: (batchsize, x=64, y=64, t=40, c=13)
output: the solution of the next 40 timesteps
output shape: (batchsize, x=64, y=64, t=40, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.modes3 = modes3
self.width = width
self.padding = 12 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(6, self.width)
# input channel is 12: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t)
self.conv0 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv1 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv2 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv3 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.w0 = nn.Conv3d(self.width, self.width, 1)
self.w1 = nn.Conv3d(self.width, self.width, 1)
self.w2 = nn.Conv3d(self.width, self.width, 1)
self.w3 = nn.Conv3d(self.width, self.width, 1)
self.bn0 = torch.nn.BatchNorm3d(self.width)
self.bn1 = torch.nn.BatchNorm3d(self.width)
self.bn2 = torch.nn.BatchNorm3d(self.width)
self.bn3 = torch.nn.BatchNorm3d(self.width)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 3)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
x = F.pad(x, [0,self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x[..., :-self.padding]
x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
################################################################
# configs
################################################################
ntrain = 1000
ntest = 1000
modes = 8
width = 25
batch_size = 100
batch_size2 = batch_size
epochs = 400
learning_rate = 0.001
scheduler_step = 100
scheduler_gamma = 0.5
print(epochs, learning_rate, scheduler_step, scheduler_gamma)
runtime = np.zeros(2,)
t1 = default_timer()
sub = 1
S = 32 // sub
T_in = 1
T = 5
par = 3
P = 128
################################################################
# load data
################################################################
idxT = [10,15,20,25,30]
d = np.load("/scratch/gkissas/all_train_SW_Nx32_Ny32_numtrain1000.npz")
U_train = d["U_train"][:,:,:,:]
S_train = np.swapaxes(d["s_train"][:,idxT,:,:,None,:],4,1)[:,-1,:,:,:,:]
TT = d["T_train"][idxT]
CX = d["X_train"]
CY = d["Y_train"]
X_sim_train = d["XX_train"]
Y_sim_train = d["YY_train"]
d = np.load("/scratch/gkissas/all_test_SW_Nx32_Ny32_numtest1000.npz")
U_test = d["U_test"][:,:,:,:]
S_test = np.swapaxes(d["s_test"][:,idxT,:,:,None,:],4,1)[:,-1,:,:,:,:]
TT = d["T_test"][idxT]
CX = d["X_test"]
CY = d["Y_test"]
X_sim_test = d["XX_test"]
Y_sim_test = d["YY_test"]
dtype_double = torch.FloatTensor
cdtype_double = torch.cuda.DoubleTensor
train_a = torch.from_numpy(np.asarray(U_train)).type(dtype_double)
train_u = torch.from_numpy(np.asarray(S_train)).type(dtype_double)
test_a = torch.from_numpy(np.asarray(U_test)).type(dtype_double)
test_u = torch.from_numpy(np.asarray(S_test)).type(dtype_double)
print(train_u.shape, train_a.shape)
print(test_u.shape, test_a.shape)
assert (S == train_u.shape[-3])
assert (T == train_u.shape[-2])
assert (par == train_u.shape[-1])
train_a = train_a.reshape(ntrain,S,S,1,par).repeat([1,1,1,T,1])
test_a = test_a.reshape(ntest,S,S,1,par).repeat([1,1,1,T,1])
ind_train = torch.randint(S*S*T, (ntrain, P))
ind_test = torch.randint(S*S*T, (ntest, P))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_a, train_u, ind_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_a, test_u, ind_test), batch_size=batch_size, shuffle=False)
t2 = default_timer()
print('preprocessing finished, time used:', t2-t1)
device = torch.device('cuda')
################################################################
# training and evaluation
################################################################
batch_ind = torch.arange(batch_size).reshape(-1, 1).repeat(1, P)
model = FNO3d(modes, modes, modes, width).cuda()
print(count_params(model))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4) # the weight decay is 1e-4 originally
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma)
myloss = LpLoss(size_average=False)
start_time = timeit.default_timer()
for ep in range(epochs):
model.train()
t1 = default_timer()
train_mse = 0
train_l2 = 0
for x, y, idx in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).view(batch_size, S*S*T, par)
y = y.reshape(batch_size, S*S*T, par)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
mse = F.mse_loss(out, y, reduction='mean')
l2 = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
l2.backward()
optimizer.step()
train_mse += mse.item()
train_l2 += l2.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y, idx in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).view(batch_size, S*S*T, par)
y = y.reshape(batch_size, S*S*T,par)
y = y[batch_ind, idx]
out = out[batch_ind, idx]
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_mse /= len(train_loader)
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_mse, train_l2, test_l2)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
pred = torch.zeros(test_u.shape)
index = 0
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_a, test_u), batch_size=1, shuffle=False)
test_error_u = []
test_error_rho_np = []
test_error_u_np = []
test_error_v_np = []
with torch.no_grad():
for x, y in test_loader:
test_l2 = 0
x, y = x.cuda(), y.cuda()
out = model(x).view(S, S, T, par)
pred[index,:,:,:] = out
test_l2 += myloss(out.view(1, -1), y.view(1, -1)).item()
test_error_u.append(test_l2)
test_error_rho_np.append(np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,0]- out.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,0],2)/np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,0],2))
test_error_u_np.append(np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,1]- out.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,1],2)/np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,1],2))
test_error_v_np.append(np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,2]- out.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,2],2)/np.linalg.norm(y.cpu().numpy().reshape(test_u.shape[2]*test_u.shape[2]*T,par)[:,2],2))
index = index + 1
print("The average test rho error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_rho_np),np.std(test_error_rho_np),np.min(test_error_rho_np),np.max(test_error_rho_np)))
print("The average test u error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_u_np),np.std(test_error_u_np),np.min(test_error_u_np),np.max(test_error_u_np)))
print("The average test v error is %e the standard deviation is %e the min error is %e and the max error is %e"%(np.mean(test_error_v_np),np.std(test_error_v_np),np.min(test_error_v_np),np.max(test_error_v_np))) | 12,999 | 40.800643 | 276 | py |
null | LOCA-main/ShallowWaters/LOCA/LOCAShallowWater.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax.core import as_named_shape
from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from sklearn import gaussian_process as gp
import argparse
from jax.experimental.stax import Dense, Gelu, Relu
from jax.experimental import stax
import os
import timeit
from jax.experimental import optimizers
from absl import app
import jax
from jax import vjp
import jax.numpy as jnp
import numpy as np
from jax.numpy.linalg import norm
from jax import random, grad, vmap, jit, pmap
from functools import partial
from torch.utils import data
from scipy import interpolate
from tqdm import trange
from math import log, sqrt, sin, cos
import itertools
import torch
from kymatio.numpy import Scattering2D
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Used >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return str(np.argmin(memory_available))
os.environ['CUDA_VISIBLE_DEVICES']= get_freer_gpu()
def output_construction(Ux,t_his,cx, cy, ng,P=1000, num_train=1000, ds=3, Nx=30, Ny=30, Nt=100):
U_all = np.zeros((P,ds))
Y_all = np.zeros((P,ds))
it = np.random.randint(Nt, size=P)
x = np.random.randint(Nx, size=P)
y = np.random.randint(Ny, size=P)
T, X, Y = np.meshgrid(t_his,cx,cy,indexing="ij")
Y_all[:,:] = np.concatenate((T[it,x][range(P),y][:,None], X[it,x][range(P),y][:,None], Y[it,x][range(P),y][:,None]),axis=-1)
U_all[:,:] = Ux[it,x][range(P),y]
return U_all, Y_all
def pairwise_distances(dist,**arg):
return jit(vmap(vmap(partial(dist,**arg),in_axes=(None,0)),in_axes=(0,None)))
def euclid_distance(x,y,square=True):
XX=jnp.dot(x,x)
YY=jnp.dot(y,y)
XY=jnp.dot(x,y)
return XX+YY-2*XY
class DataGenerator(data.Dataset):
def __init__(self, u, y, s,
batch_size=100, rng_key=random.PRNGKey(1234)):
'Initialization'
self.u = u
self.y = y
self.s = s
self.N = u.shape[0]
self.batch_size = batch_size
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
inputs,outputs = self.__data_generation(subkey)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
s = self.s[idx,:,:]
inputsxu = self.u[idx,:,:]
y = self.y[idx,:,:]
inputs = (inputsxu, y)
return inputs, s
class PositionalEncodingY:
def __init__(self, Y, d_model, max_len = 100, H=4):
self.d_model = int(np.ceil(d_model/6)*2)
self.Y = Y
self.max_len = max_len
self.H = H
def forward(self, x):
pet = np.zeros((x.shape[0], self.max_len, self.H))
pex = np.zeros((x.shape[0], self.max_len, self.H))
pey = np.zeros((x.shape[0], self.max_len, self.H))
T = jnp.take(self.Y, 0, axis=2)[:,:,None]
X = jnp.take(self.Y, 1, axis=2)[:,:,None]
Y = jnp.take(self.Y, 2, axis=2)[:,:,None]
positionT = jnp.tile(T,(1,1,self.H))
positionX = jnp.tile(X,(1,1,self.H))
positionY = jnp.tile(Y,(1,1,self.H))
div_term = 2**jnp.arange(0,int(self.H/2),1)*jnp.pi
pet = jax.ops.index_update(pet, jax.ops.index[:,:,0::2], jnp.cos(positionT[:,:,0::2] * div_term))
pet = jax.ops.index_update(pet, jax.ops.index[:,:,1::2], jnp.sin(positionT[:,:,1::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,0::2], jnp.cos(positionX[:,:,0::2] * div_term))
pex = jax.ops.index_update(pex, jax.ops.index[:,:,1::2], jnp.sin(positionX[:,:,1::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,0::2], jnp.cos(positionY[:,:,0::2] * div_term))
pey = jax.ops.index_update(pey, jax.ops.index[:,:,1::2], jnp.sin(positionY[:,:,1::2] * div_term))
pos_embedding = jnp.concatenate((pet,pex,pey),axis=-1)
x = jnp.concatenate([x, pos_embedding], -1)
return x
def scatteringTransform(sig, l=100, m=100, training_batch_size = 100):
scattering = Scattering2D(J=1, L=3, max_order=2, shape=(32, 32))
cwtmatr = np.zeros((training_batch_size, 768, 1))
sig = np.array(sig)
for i in range(0,training_batch_size):
scatteringCoeffs = scattering(sig[i,:,:].reshape(32,32))
cwtmatr[i,:,:] = scatteringCoeffs[:,:,:].flatten()[:,None]
return cwtmatr
class LOCA:
def __init__(self, q_layers, g_layers, v_layers , m=100, P=100, H=100):
# Network initialization and evaluation functions
self.encoder_init2, self.encoder_apply2 = self.init_NN(q_layers, activation=Gelu)
self.in_shape = (-1, q_layers[0])
self.out_shape, encoder_params2 = self.encoder_init2(random.PRNGKey(10000), self.in_shape)
self.encoder_apply2 = self.encoder_apply2
self.v_init, self.v_apply = self.init_NN(v_layers, activation=Gelu)
self.in_shape = (-1, v_layers[0])
self.out_shape, v_params = self.v_init(random.PRNGKey(10000), self.in_shape)
self.v_apply = self.v_apply
self.g_init, self.g_apply = self.init_NN(g_layers, activation=Gelu)
self.in_shape = (-1, g_layers[0])
self.out_shape, g_params = self.g_init(random.PRNGKey(10000), self.in_shape)
self.g_apply = self.g_apply
beta = [1.]
gamma = [1.]
params = (beta,gamma,encoder_params2, g_params, v_params)
# Use optimizers to set optimizer initialization and update functions
self.opt_init,self.opt_update,self.get_params = optimizers.adam(optimizers.exponential_decay(1e-3,
decay_steps=100,
decay_rate=0.99))
self.opt_state = self.opt_init(params)
# Logger
self.itercount = itertools.count()
self.loss_log = []
self.vdistance_function = vmap(pairwise_distances(euclid_distance))
print("Model initialized")
def init_NN(self, Q, activation=Gelu):
layers = []
num_layers = len(Q)
if num_layers < 2:
net_init, net_apply = stax.serial()
else:
for i in range(0, num_layers-2):
layers.append(Dense(Q[i+1]))
layers.append(activation)
layers.append(Dense(Q[-1]))
net_init, net_apply = stax.serial(*layers)
return net_init, net_apply
def LOCA_net(self, params, inputs, ds=3):
beta, gamma, q_params, g_params, v_params = params
inputsxu, inputsy = inputs
inputsy = self.encoder_apply2(q_params,inputsy)
d = self.vdistance_function(inputsy, inputsy)
K = beta[0]*jnp.exp(-gamma[0]*d)
Kzz = jnp.sqrt((1./K.shape[1])*jnp.sum(K ,axis=1,keepdims=True))
Kyz = jnp.sqrt((1./K.shape[1])*jnp.sum(K ,axis=-1,keepdims=True))
mean_K = jnp.matmul(Kyz, Kzz)
K = jnp.divide(K,mean_K)
g = self.g_apply(g_params, inputsy)
g = (1./K.shape[1])*jnp.einsum("ijk,ikml->ijml",K,g.reshape(inputsy.shape[0], inputsy.shape[1], ds, int(g.shape[2]/ds)))
g = jax.nn.softmax(g, axis=-1)
value_heads = self.v_apply(v_params, inputsxu.reshape(inputsxu.shape[0],1,inputsxu.shape[1]*inputsxu.shape[2]))
value_heads = value_heads.reshape(value_heads.shape[0],int(value_heads.shape[2]/ds),ds)
attn_vec = jnp.einsum("ijkl,ilk->ijk", g,value_heads)
return attn_vec
@partial(jax.jit, static_argnums=0)
def loss(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs- y_pred)**2)
return loss
@partial(jax.jit, static_argnums=0)
def lossT(self, params, batch):
inputs, outputs = batch
y_pred = self.LOCA_net(params,inputs)
loss = np.mean((outputs - y_pred)**2)
return loss
@partial(jax.jit, static_argnums=0)
def L2errorT(self, params, batch):
inputs, y = batch
y_pred = self.LOCA_net(params,inputs)
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jax.jit, static_argnums=0)
def L2error(self, params, batch):
inputs, y = batch
y_pred = self.LOCA_net(params,inputs)
return norm(y.flatten() - y_pred.flatten(), 2)/norm(y.flatten(),2)
@partial(jit, static_argnums=(0,))
def step(self, i, opt_state, batch):
params = self.get_params(opt_state)
g = grad(self.loss)(params, batch)
return self.opt_update(i, g, opt_state)
def train(self, train_dataset, test_dataset, nIter = 10000):
train_data = iter(train_dataset)
print("Inputs made iterable")
if test_dataset is not None:
test_data = iter(test_dataset)
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
test_batch = next(test_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
loss_test = self.lossT(params, test_batch)
errorTrain = self.L2error(params, train_batch)
errorTest = self.L2errorT(params, test_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Testing loss' : loss_test,
'Test error': errorTest,
'Train error': errorTrain})
else:
pbar = trange(nIter)
for it in pbar:
train_batch = next(train_data)
self.opt_state = self.step(next(self.itercount), self.opt_state, train_batch)
if it % 100 == 0:
params = self.get_params(self.opt_state)
loss_train = self.loss(params, train_batch)
errorTrain = self.L2error(params, train_batch)
self.loss_log.append(loss_train)
pbar.set_postfix({'Training loss': loss_train,
'Train error': errorTrain})
@partial(jit, static_argnums=(0,))
def predict(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
@partial(jit, static_argnums=(0,))
def predictT(self, params, inputs):
s_pred = self.LOCA_net(params,inputs)
return s_pred
def ravel_list(self, *lst):
return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])
def ravel_pytree(self, pytree):
leaves, treedef = jax.tree_util.tree_flatten(pytree)
flat, unravel_list = vjp(self.ravel_list, *leaves)
unravel_pytree = lambda flat: jax.tree_util.tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def count_params(self, params):
beta, gamma,q_params, g_params, v_params = params
qlv, _ = self.ravel_pytree(q_params)
vlv, _ = self.ravel_pytree(v_params)
glv, _ = self.ravel_pytree(g_params)
print("The number of model parameters is:",qlv.shape[0]+vlv.shape[0]+glv.shape[0])
def predict_function(inputs_trainxu,y, model=None,params= None, H=None):
uCNN_super_all = model.predict(params, (inputs_trainxu, y))
return uCNN_super_all, y[:,:,0:1], y[:,:,1:2], y[:,:,2:3]
def error_full_resolution(uCNN_super_all, s_all,tag='train', num_train=1000,P=128, Nx=30, Ny=30, Nt=10, idx=None, ds=3):
print(s_all.shape)
z = uCNN_super_all.reshape(num_train,Nx*Ny*Nt,ds)
s = s_all.reshape(num_train,Nx*Ny*Nt,ds)
test_error_rho = []
for i in range(0,num_train):
test_error_rho.append(norm(s[i,:,0]- z[i,:,0], 2)/norm(s[i,:,0], 2))
print("The average "+tag+" rho error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_rho),np.std(test_error_rho),np.min(test_error_rho),np.max(test_error_rho)))
test_error_u = []
for i in range(0,num_train):
test_error_u.append(norm(s[i,:,1]- z[i,:,1], 2)/norm(s[i,:,1], 2))
print("The average "+tag+" u error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_u),np.std(test_error_u),np.min(test_error_u),np.max(test_error_u)))
test_error_v = []
for i in range(0,num_train):
test_error_v.append(norm(s[i,:,2]- z[i,:,2], 2)/norm(s[i,:,2], 2))
print("The average "+tag+" v error for the super resolution is %e, the standard deviation %e, the minimum error is %e and the maximum error is %e"%(np.mean(test_error_v),np.std(test_error_v),np.min(test_error_v),np.max(test_error_v)))
absolute_error = np.abs(z-s)
return absolute_error, np.mean(test_error_rho), np.mean(test_error_u),np.mean(test_error_v), (test_error_rho, test_error_u, test_error_v)
def minmax(a, n, mean):
minpos = a.index(min(a))
maxpos = a.index(max(a))
meanpos = min(range(len(a)), key=lambda i: abs(a[i]-mean))
print("The maximum is at position", maxpos)
print("The minimum is at position", minpos)
print("The mean is at position", meanpos)
return minpos,maxpos,meanpos
TRAINING_ITERATIONS = 80000
P = 128
m = 1024
L = 1
T = 1
num_train = 1000
num_test = 1000
casenum_train = 2
casenum_test = 2
training_batch_size = 100
dx = 3
du = 3
dy = 3
ds = 3
n_hat = 480
l = 100
Nx = 32
Ny = 32
Nt = 5
Ng = 0
H = 2
idxT = [10,15,20,25,30]
d = np.load("../Data/train_SW.npz")
u_train = d["U_train"][:,:,:,:]
S_train = d["s_train"][:,idxT,:,:,:]
T = d["T_train"][idxT]
CX = d["X_train"]
CY = d["Y_train"]
d = np.load("../Data/test_SW.npz")
u_test = d["U_test"][:,:,:,:]
S_test = d["s_test"][:,idxT,:,:,:]
T = d["T_test"][idxT]
CX = d["X_test"]
CY = d["Y_test"]
s_train = np.zeros((num_train,P,ds))
y_train = np.zeros((num_train,P,dy))
U_train = np.zeros((num_train,m,du))
X_train = np.zeros((num_train,m,dx))
s_test = np.zeros((num_test,P,ds))
y_test = np.zeros((num_test,P,dy))
U_test = np.zeros((num_test,m,du))
X_test = np.zeros((num_test,m,dx))
for i in range(0,num_train):
s_train[i,:,:], y_train[i,:,:] = output_construction(S_train[i,:,:,:,:], T, CX, CY, Ng,P=P,Nt=Nt)
U_train[i,:,:] = u_train[i,:,:,:].reshape(Nx*Ny,du)
for i in range(num_test):
s_test[i,:,:], y_test[i,:,:] = output_construction(S_test[i,:,:,:,:], T, CX, CY, Ng,P=P,Nt=Nt)
U_test[i,:,:] = u_test[i,:,:,:].reshape(Nx*Ny,du)
print("Dataset created")
X_train = jnp.asarray(X_train)
U_train = np.asarray(U_train)
y_train = jnp.asarray(y_train)
s_train = jnp.asarray(s_train)
X_test = jnp.asarray(X_test)
U_test = np.asarray(U_test)
y_test = jnp.asarray(y_test)
s_test = jnp.asarray(s_test)
X_train = jnp.reshape(X_train,(num_train,m,dx))
U_train = np.reshape(U_train,(num_train,m,du))
y_train = jnp.reshape(y_train,(num_train,P,dy))
s_train = jnp.reshape(s_train,(num_train,P,ds))
X_test = jnp.reshape(X_test,(num_test,m,dx))
U_test = np.reshape(U_test,(num_test,m,du))
y_test = jnp.reshape(y_test,(num_test,P,dy))
s_test = jnp.reshape(s_test,(num_test,P,ds))
inputs_trainxu = np.zeros((num_train,768,3))
inputs_trainxu[:,:,0:1] = jnp.asarray(scatteringTransform(U_train[:,:,0:1], l=L, m=m, training_batch_size=num_train))
inputs_trainxu[:,:,1:2] = jnp.asarray(scatteringTransform(U_train[:,:,1:2], l=L, m=m, training_batch_size=num_train))
inputs_trainxu[:,:,2:3] = jnp.asarray(scatteringTransform(U_train[:,:,2:3], l=L, m=m, training_batch_size=num_train))
inputs_trainxu = jnp.array(inputs_trainxu)
inputs_testxu = np.zeros((num_test,768,3))
inputs_testxu[:,:,0:1] = jnp.asarray(scatteringTransform(U_test[:,:,0:1], l=L, m=m, training_batch_size=num_test))
inputs_testxu[:,:,1:2] = jnp.asarray(scatteringTransform(U_test[:,:,1:2], l=L, m=m, training_batch_size=num_test))
inputs_testxu[:,:,2:3] = jnp.asarray(scatteringTransform(U_test[:,:,2:3], l=L, m=m, training_batch_size=num_test))
inputs_testxu = jnp.array(inputs_testxu)
print("Model inputs created")
pos_encodingy = PositionalEncodingY(y_train,int(y_train.shape[1]*y_train.shape[2]), max_len = P, H=H)
y_train = pos_encodingy.forward(y_train)
del pos_encodingy
pos_encodingyt = PositionalEncodingY(y_test,int(y_test.shape[1]*y_test.shape[2]), max_len = P,H=H)
y_test = pos_encodingyt.forward(y_test)
del pos_encodingyt
train_dataset = DataGenerator(inputs_trainxu, y_train, s_train, training_batch_size)
train_dataset = iter(train_dataset)
test_dataset = DataGenerator(inputs_testxu, y_test, s_test, training_batch_size)
test_dataset = iter(test_dataset)
q_layers = [L*dy+H*dy, 1024, l]
v_layers = [768*du, 1024, ds*n_hat]
g_layers = [l, 1024, ds*n_hat]
print("DataGenerator defined")
model = LOCA(q_layers, g_layers, v_layers, m=m, P=P, H=H)
model.count_params(model.get_params(model.opt_state))
start_time = timeit.default_timer()
model.train(train_dataset, None, nIter=TRAINING_ITERATIONS)
elapsed = timeit.default_timer() - start_time
print("The training wall-clock time is seconds is equal to %f seconds"%elapsed)
params = model.get_params(model.opt_state)
TT, XX, YY = np.meshgrid(T, CX, CY, indexing="ij")
TT = np.expand_dims(TT,axis=0)
XX = np.expand_dims(XX,axis=0)
YY = np.expand_dims(YY,axis=0)
TT = np.tile(TT,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
XX = np.tile(XX,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
YY = np.tile(YY,(num_test,1,1)).reshape(num_test,Nx*Ny*Nt,1)
Y_test_in = np.concatenate((TT, XX, YY),axis=-1)
Y_train_in = np.concatenate((TT, XX, YY),axis=-1)
pos_encodingy = PositionalEncodingY(Y_train_in,int(Y_train_in.shape[1]*Y_train_in.shape[2]), max_len = Y_train_in.shape[1], H=H)
Y_train_in = pos_encodingy.forward(Y_train_in)
del pos_encodingy
pos_encodingy = PositionalEncodingY(Y_test_in,int(Y_test_in.shape[1]*Y_test_in.shape[2]), max_len = Y_test_in.shape[1], H=H)
Y_test_in = pos_encodingy.forward(Y_test_in)
del pos_encodingy
print("Predicting the solution for the full resolution")
uCNN_super_all_test = np.zeros_like(S_test).reshape(num_test, Nx*Ny*Nt, ds)
for i in range(0, Nx*Ny*Nt, P):
idx = i + np.arange(0,P)
uCNN_super_all_test[:,idx,:], T_out, X, Y = predict_function(inputs_testxu , Y_test_in[:,idx,:], model=model, params=params, H=H)
uCNN_super_all_train = np.zeros_like(S_train).reshape(num_train, Nx*Ny*Nt, ds)
for i in range(0, Nx*Ny*Nt, P):
idx = i + np.arange(0,P)
uCNN_super_all_train[:,idx,:], T, X, Y = predict_function(inputs_trainxu , Y_train_in[:,idx,:], model=model, params=params, H=H)
absolute_error_train, mean_train_error_rho, mean_train_error_u, mean_train_error_v, train_error = error_full_resolution(uCNN_super_all_train,S_train,tag='train',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_train)
absolute_error_test, mean_test_error_rho, mean_test_error_u, mean_test_error_v, test_error = error_full_resolution(uCNN_super_all_test,S_test,tag='test',P=P,Nx=Nx, Ny=Ny, Nt=Nt, idx = None, num_train=num_test)
| 19,553 | 37.644269 | 248 | py |
signal_transformer | signal_transformer-master/README.md | # Signal Transformer
Transformer-based model for generating an efficient force/torque signal representations for haptic localization of a legged robot.
## Architecture

## Credits:
- MSc. Jakub Bednarek's https://github.com/jbed94 package - putpy_tf
- https://github.com/omoindrot/tensorflow-triplet-loss
| 366 | 35.7 | 130 | md |
signal_transformer | signal_transformer-master/loss/__init__.py | 0 | 0 | 0 | py |
|
signal_transformer | signal_transformer-master/loss/triplet_loss.py | import numpy as np
import tensorflow as tf
def pairwise_distances(vector):
return tf.sqrt(tf.reduce_sum((tf.expand_dims(vector, 1) - tf.expand_dims(vector, 0)) ** 2, 2))
def batch_all_triplet_loss(positions, embeddings, margin=0.1, dist_threshold=0.25):
pairwise_dist_emb = pairwise_distances(embeddings)
pairwise_dist_pos = pairwise_distances(positions)
anchor_positive_dist = tf.expand_dims(pairwise_dist_emb, 2)
anchor_positive_dist_pos = tf.expand_dims(pairwise_dist_pos, 2)
anchor_negative_dist = tf.expand_dims(pairwise_dist_emb, 1)
anchor_negative_dist_pos = tf.expand_dims(pairwise_dist_pos, 1)
triplet_loss = anchor_positive_dist - anchor_negative_dist + margin
positive_shape_multpl = tf.constant([1, 1, tf.shape(anchor_negative_dist_pos)[0].numpy()])
negative_shape_multpl = tf.constant([1, tf.shape(anchor_negative_dist_pos)[0].numpy(), 1])
anchor_positive_dist_pos_broadcast = tf.tile(anchor_positive_dist_pos, positive_shape_multpl)
anchor_negative_dist_pos_broadcast = tf.tile(anchor_negative_dist_pos, negative_shape_multpl)
positives_mask = anchor_positive_dist_pos_broadcast <= dist_threshold
negatives_mask = anchor_negative_dist_pos_broadcast > dist_threshold
indices_equal = tf.cast(tf.eye(tf.shape(positions)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
mask = tf.logical_and(distinct_indices, positives_mask, negatives_mask)
mask = tf.cast(mask, tf.float32)
triplet_loss = tf.multiply(mask, triplet_loss)
triplet_loss = tf.maximum(triplet_loss, 0.0)
valid_triplets = tf.cast(tf.greater(triplet_loss, 1e-16), tf.float32)
num_positive_triplets = tf.reduce_sum(valid_triplets)
num_valid_triplets = tf.reduce_sum(mask)
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16)
triplet_loss = tf.reduce_sum(triplet_loss) / (num_positive_triplets + 1e-16)
return triplet_loss, fraction_positive_triplets
| 2,254 | 43.215686 | 98 | py |
signal_transformer | signal_transformer-master/model/__init__.py | 0 | 0 | 0 | py |
|
signal_transformer | signal_transformer-master/model/encoder.py | import tensorflow as tf
from positional_encoding import PositionalEmbedding
from encoder_layer import EncoderLayer
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.pos_encoding = PositionalEmbedding(input_vocab_size, input_vocab_size, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
x = self.pos_encoding(x)
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model) | 906 | 33.884615 | 115 | py |
signal_transformer | signal_transformer-master/model/encoder_layer.py | import tensorflow as tf
from multi_head_attention import MultiHeadAttention
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'),
tf.keras.layers.Dense(d_model)
])
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2 | 1,237 | 38.935484 | 89 | py |
signal_transformer | signal_transformer-master/model/multi_head_attention.py | import tensorflow as tf
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
| 2,522 | 39.047619 | 123 | py |
signal_transformer | signal_transformer-master/model/positional_encoding.py | import numpy as np
import tensorflow as tf
class PositionalEmbedding(tf.keras.Model):
def __init__(self, dict_size, max_sequence_size, embedding_size):
super(PositionalEmbedding, self).__init__(name='positional_embedding')
self.embedding_size = embedding_size
self.embedding = tf.keras.layers.Embedding(dict_size, embedding_size)
self.embedding_positional = positional_encoding(max_sequence_size, embedding_size)
self.supports_masking = True
def call(self, inputs, **kwargs):
in_shape = tf.shape(inputs)
positions = tf.range(0, in_shape[1])[tf.newaxis]
positions = tf.tile(positions, (in_shape[0], 1))
dict_emb = self.embedding(positions)
dict_emb *= tf.math.sqrt(tf.cast(self.embedding_size, tf.float32))
return dict_emb + inputs
def warmup(self):
with tf.name_scope(self.name):
self.embedding.build(None)
self.embedding_positional.build(None)
self.built = True
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
return tf.cast(angle_rads, dtype=tf.float32)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
| 1,466 | 33.928571 | 107 | py |
signal_transformer | signal_transformer-master/model/signal_transformer.py | import tensorflow as tf
from encoder import Encoder
class SignalTransformer(tf.keras.Model):
def __init__(self, num_signals, num_layers, d_model, num_heads, dff, latent_vector_size, input_signal_length,
rate=0.1):
super(SignalTransformer, self).__init__(name='signal_transformer')
self.projection = tf.keras.Sequential([
tf.keras.layers.Dense(d_model, activation='relu'),
tf.keras.layers.LayerNormalization()
])
self.encoder = Encoder(num_layers, d_model, num_heads, dff, input_signal_length, input_signal_length, rate)
self.pooling = tf.keras.layers.AveragePooling1D(d_model, data_format='channels_first')
self.embedding_gen = tf.keras.Sequential([
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(latent_vector_size)
])
self.call = self.call.get_concrete_function(
inputs=tf.TensorSpec([None, input_signal_length, num_signals], tf.float32),
training=tf.TensorSpec([], tf.bool)
)
@tf.function
def call(self, inputs, training):
projection_output = self.projection(inputs)
enc_output = self.encoder(projection_output, training, mask=None) # (batch_size, inp_seq_len, d_model)
pooling_out = self.pooling(enc_output)
pooling_out = tf.squeeze(pooling_out, axis=-1)
embeddings = self.embedding_gen(pooling_out) # (batch_size, tar_seq_len, target_vocab_size)
return embeddings
def warmup(self):
self(tf.zeros([1, 160, 6], tf.float32), tf.constant(False))
if __name__=="__main__":
model = SignalTransformer(num_signals=6,
num_layers=1,
d_model=16,
num_heads=2,
dff=8,
latent_vector_size=256,
input_signal_length=160)
model.warmup()
model.summary() | 2,032 | 37.358491 | 115 | py |
signal_transformer | signal_transformer-master/training/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.