prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# pip install panel pandas matplotlib
import numpy as np
import pandas as pd
from matplotlib.figure import Figure
data_url = "https://cdn.jsdelivr.net/gh/holoviz/panel@master/examples/assets/occupancy.csv"
data = pd.read_csv(data_url, parse_dates=["date"]).set_index("date")
primary_color = "#0072B5"
secondary_color = "#94EA84"
def mpl_plot(avg, highlight):
fig = Figure(figsize=(10,5))
ax = fig.add_subplot()
avg.plot(ax=ax, c=primary_color)
if len(highlight):
highlight.plot(style="o", ax=ax, c=secondary_color)
return fig
def find_outliers(variable="Temperature", window=20, sigma=10, view_fn=mpl_plot):
avg = data[variable].rolling(window=window).mean()
residual = data[variable] - avg
std = residual.rolling(window=window).std()
outliers =
|
np.abs(residual)
|
numpy.abs
|
"""
This code is attributed to <NAME> (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)
https://github.com/safe-graph/DGFraud-TF2
"""
from typing import Tuple, Union
import scipy.sparse as sp
import numpy as np
def sparse_to_tuple(sparse_mx: sp.coo_matrix) -> Tuple[np.array, np.array,
np.array]:
"""
Convert sparse matrix to tuple representation.
:param sparse_mx: the graph adjacency matrix in scipy sparse matrix format
"""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_adj(adj: np.array) -> sp.coo_matrix:
"""
Symmetrically normalize adjacency matrix
Parts of this code file were originally forked from
https://github.com/tkipf/gcn
:param adj: the graph adjacency matrix
"""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj: np.array, to_tuple: bool = True) -> \
Union[Tuple[np.array, np.array, np.array], sp.coo_matrix]:
"""
Preprocessing of adjacency matrix for simple GCN model
and conversion to tuple representation.
Parts of this code file were originally forked from
https://github.com/tkipf/gcn
:param adj: the graph adjacency matrix
"""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
if to_tuple:
return sparse_to_tuple(adj_normalized)
else:
return adj_normalized
def preprocess_feature(features: np.array, to_tuple: bool = True) -> \
Union[Tuple[np.array, np.array, np.array], sp.csr_matrix]:
"""
Row-normalize feature matrix and convert to tuple representation
Parts of this code file were originally forked from
https://github.com/tkipf/gcn
:param features: the node feature matrix
:param to_tuple: whether cast the feature matrix to scipy sparse tuple
"""
features = sp.lil_matrix(features)
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if to_tuple:
return sparse_to_tuple(features)
else:
return features
def sample_mask(idx: np.array, n_class: int) -> np.array:
"""
Create mask for GCN.
Parts of this code file were originally forked from
https://github.com/tkipf/gcn
:param idx: the train/val/test indices
:param n_class: the number of classes for the data
"""
mask = np.zeros(n_class)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def pad_adjlist(x_data):
# Get lengths of each row of data
lens = np.array([len(x_data[i]) for i in range(len(x_data))])
# Mask of valid places in each row
mask = np.arange(lens.max()) < lens[:, None]
# Setup output array and put elements from data into masked positions
padded = np.zeros(mask.shape)
for i in range(mask.shape[0]):
padded[i] = np.random.choice(x_data[i], mask.shape[1])
padded[mask] = np.hstack((x_data[:]))
return padded
def matrix_to_adjlist(M, pad=True):
adjlist = []
for i in range(len(M)):
adjline = [i]
for j in range(len(M[i])):
if M[i][j] == 1:
adjline.append(j)
adjlist.append(adjline)
if pad:
adjlist = pad_adjlist(adjlist)
return adjlist
def pairs_to_matrix(pairs, nodes):
M = np.zeros((nodes, nodes))
for i, j in pairs:
M[i][j] = 1
return M
# Random walk on graph
def generate_random_walk(adjlist, start, walklength):
t = 1
walk_path = np.array([start])
while t <= walklength:
neighbors = adjlist[start]
current = np.random.choice(neighbors)
walk_path = np.append(walk_path, current)
start = current
t += 1
return walk_path
# sample multiple times for each node
def random_walks(adjlist, numerate, walklength):
nodes = range(0, len(adjlist)) # node index starts from zero
walks = []
for n in range(numerate):
for node in nodes:
walks.append(generate_random_walk(adjlist, node, walklength))
pairs = []
for i in range(len(walks)):
for j in range(1, len(walks[i])):
pair = [walks[i][0], walks[i][j]]
pairs.append(pair)
return pairs
def negative_sampling(adj_nodelist):
degree = [len(neighbors) for neighbors in adj_nodelist]
node_negative_distribution = np.power(np.array(degree, dtype=np.float32),
0.75)
node_negative_distribution /= np.sum(node_negative_distribution)
node_sampling = AliasSampling(prob=node_negative_distribution)
return node_negative_distribution, node_sampling
def get_negative_sampling(pairs, adj_nodelist, Q=3, node_sampling='atlas'):
num_of_nodes = len(adj_nodelist) # 8
u_i = []
u_j = []
graph_label = []
node_negative_distribution, nodesampling = negative_sampling(adj_nodelist)
for index in range(0, num_of_nodes):
u_i.append(pairs[index][0])
u_j.append(pairs[index][1])
graph_label.append(1)
for i in range(Q):
while True:
if node_sampling == 'numpy':
negative_node = np.random. \
choice(num_of_nodes, node_negative_distribution)
if negative_node not in adj_nodelist[pairs[index][0]]:
break
elif node_sampling == 'atlas':
negative_node = nodesampling.sampling()
if negative_node not in adj_nodelist[pairs[index][0]]:
break
elif node_sampling == 'uniform':
negative_node =
|
np.random.randint(0, num_of_nodes)
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 14:36:49 2018
@author: Zymieth
"""
import numpy as np
def merge_sort(num_list):
'''
Implementation of vanilla merge sort, O(nlogn)
'''
if len(num_list) == 1:
return num_list
l = len(num_list)//2
l1 = merge_sort(num_list[:l])
l2 = merge_sort(num_list[l:])
v = []
i = 0
j = 0
# merging subroutine, break from loop when one of the pointers falls off
for k in range(len(l1)+len(l2)):
if j == len(l2) or i == len(l1): break
if l1[i] <= l2[j]:
v.append(l1[i])
i += 1
else:
v.append(l2[j])
j += 1
# handling final remaining elements in either l1 or l2
while(i < len(l1)):
v.append(l1[i])
i += 1
while(j < len(l2)):
v.append(l2[j])
j += 1
return v
def count_inversions(num_list, count = 0):
'''
Counts number of inversions in array of numbers num_list
'''
if len(num_list) == 1:
return num_list, count
l = len(num_list)//2
l1, count = count_inversions(num_list[:l], count)
l2, count = count_inversions(num_list[l:], count)
v = []
i = 0
j = 0
# merging subroutine, break from loop when one of the pointers falls off
for k in range(len(l1)+len(l2)):
if j == len(l2) or i == len(l1): break
if l1[i] <= l2[j]:
v.append(l1[i])
i += 1
else:
v.append(l2[j])
j += 1
count += len(l1)-i
# handling final remaining elements in either l1 or l2
while(i < len(l1)):
v.append(l1[i])
i += 1
while(j < len(l2)):
v.append(l2[j])
j += 1
return v, count
###################################################### TESTS: merge_sort
n = np.array([2,5,1,3,4])
n = merge_sort(n)
assert np.all([n[i] <= n[i+1] for i in range(len(n)-1)])
n = np.array([6,3,6,8,3,2,-40,20,4.2,4,0,3,215])
n = merge_sort(n)
assert np.all([n[i] <= n[i+1] for i in range(len(n)-1)])
n = np.random.rand(100)
n = merge_sort(n)
assert np.all([n[i] <= n[i+1] for i in range(len(n)-1)])
n = np.loadtxt("L:\Algorithms\tests-merge.txt")
n = merge_sort(
|
np.array(n)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 4 10:33:40 2014
Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>
@email: <EMAIL>
@license: BSD 3-clause.
"""
from nose.tools import assert_less, assert_equal, assert_almost_equal
import numpy as np
import parsimony.utils.consts as consts
try:
from .tests import TestCase # When imported as a package.
except:
from tests import TestCase # When run as a program.
# TODO: Test penalty_start.
# TODO: Test total variation.
class TestLogisticRegression(TestCase):
def test_logistic_regression(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
import numpy as np
import parsimony.functions.losses as losses
import parsimony.functions.nesterov.tv as tv
import parsimony.algorithms.gradient as gradient
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape)
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p, p) \
+ (1.0 - alpha) * np.random.randn(p, p)
mean = np.zeros(p)
X = np.random.multivariate_normal(mean, Sigma, n)
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X.dtype)
eps = 1e-8
max_iter = 2500
gd = gradient.GradientDescent(eps=eps, max_iter=max_iter)
lr = losses.LogisticRegression(X, y, mean=True)
beta_start = start_vector.get_weights(p)
beta = gd.run(lr, beta_start)
try:
import spams
params = {
"loss": "logistic",
"regul": "l2",
"lambda1": 0.0,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": False,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[0.52689775], [-2.21446548], [-1.68294898], [-1.22239288],
[0.47106769], [-0.10104761], [0.54922885], [-0.50684862],
[0.01819947], [-0.41118406], [-0.01530228], [0.64481785],
[3.5877543], [0.50909281], [0.52942673], [1.11978225],
[-1.58908044], [-1.19893318], [0.14065587], [0.82819336],
[0.3968046], [0.26822936], [0.25214453], [1.84717067],
[1.66235707], [0.38522443], [0.63089985], [-1.25171818],
[0.17358699], [-0.47456136], [-1.89701774], [1.06870497],
[-0.44173062], [-0.67056484], [-1.89417281], [1.61253148],
[1.509571], [-0.38479991], [-0.7179952], [-2.62763962],
[-1.27630807], [0.63975966], [1.42323595], [1.1770713],
[-2.69662968], [1.05365595], [0.90026447], [-0.68251909],
[0.01953592], [-0.55014376], [1.26436814], [0.04729847],
[0.85184395], [0.85604811], [1.76795929], [1.08078563],
[-0.13504478], [-0.36605844], [-0.40414262],
[-2.38631966], [-1.94121299], [0.23513673], [1.17573164],
[1.69009136]])
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(0.0, 0.0, 0.0,
A=A, mu=mu,
class_weight=None,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
mean=True)
logreg_est.fit(X, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 0.058501,
msg="The found regression vector is not correct.",
places=5)
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 0.090917,
msg="The found regression vector is not correct.",
places=5)
f_spams = lr.f(beta_spams)
f_parsimony = lr.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_almost_equal(err, 0.265056,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
f_logreg = lr.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_almost_equal(err, 0.265555,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
def test_l1(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape)
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p, p) \
+ (1.0 - alpha) * np.random.randn(p, p)
mean = np.zeros(p)
X = np.random.multivariate_normal(mean, Sigma, n)
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X.dtype)
eps = 1e-8
max_iter = 5000
l = 0.001
k = 0.0
g = 0.0
algorithm = proximal.ISTA(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X, y, mean=True))
function.add_prox(penalties.L1(l))
beta_start = start_vector.get_weights(p)
beta = algorithm.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "l1",
"lambda1": l,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": False,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[0.], [-2.88026664], [-1.75569266], [-0.10270371], [0.],
[0.], [0.80004525], [0.], [0.], [-0.53624278], [0.], [0.],
[3.43963221], [0.], [0.], [0.13833778], [-1.08009022],
[-0.12296525], [0.], [0.79860615], [0.], [0.], [0.],
[0.99982627], [0.79121183], [0.], [0.23196695], [0.],
[0.], [0.], [-1.83907578], [0.08524181], [0.],
[-0.34237679], [-1.47977854], [2.04629155], [0.12090069],
[0.], [-0.05009145], [-1.89909595], [-1.62591414], [0.],
[0.61486582], [0.], [-2.26199047], [0.57935073], [0.],
[0.], [0.], [-0.23379695], [0.67479097], [0.], [0.], [0.],
[1.03600365], [0.4471462], [0.0916708], [0.], [0.],
[-1.97299116], [-2.17942795], [0.], [0.10224431],
[0.15781433]])
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
class_weight=None)
logreg_est.fit(X, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 0.05,
msg="The found regression vector is not correct.")
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 0.04,
msg="The found regression vector is not correct.")
f_spams = function.f(beta_spams)
f_parsimony = function.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_less(err, 0.002,
msg="The found regression vector does not give "
"the correct function value.")
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_less(err, 0.002,
msg="The found regression vector does not give "
"the correct function value.")
def test_l1_intercept(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape) + 1
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p - 1, p - 1) \
+ (1.0 - alpha) * np.random.randn(p - 1, p - 1)
mean = np.zeros(p - 1)
X0 = np.random.multivariate_normal(mean, Sigma, n)
X_parsimony = np.hstack((np.ones((n, 1)), X0))
X_spams = np.hstack((X0, np.ones((n, 1))))
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X0.dtype)
eps = 1e-8
max_iter = 5000
l = 0.001
k = 0.0
g = 0.0
algorithm = proximal.ISTA(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X_parsimony, y,
mean=True))
function.add_prox(penalties.L1(l, penalty_start=1))
beta_start = start_vector.get_weights(p)
beta = algorithm.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "l1",
"lambda1": l,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": True,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X_spams),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[0.], [-2.84363846], [-1.76319723], [-0.08899283], [0.],
[0.], [0.82070549], [0.], [0.], [-0.55865068], [0.], [0.],
[3.42071574], [0.], [0.], [0.16652413], [-1.0945443],
[-0.10645896], [0.], [0.81766639], [0.], [0.], [0.],
[0.98030827], [0.79143542], [0.], [0.24412592], [0.],
[0.], [0.], [-1.82650966], [0.06380246], [0.],
[-0.33460657], [-1.45350214], [2.04841906], [0.09839289],
[0.], [-0.04710919], [-1.89327998], [-1.6531038], [0.],
[0.59239045], [0.], [-2.29161034], [0.57808221], [0.],
[0.], [0.], [-0.24979285], [0.668358], [0.], [0.], [0.],
[1.00250306], [0.44168083], [0.09592583], [0.], [0.],
[-1.97492771], [-2.21610942], [0.], [0.10819641],
[0.17640387], [0.0920676]])
beta_spams = np.vstack((beta_spams[p - 1, :],
beta_spams[0:p - 1, :]))
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
penalty_start=1,
class_weight=None)
logreg_est.fit(X_parsimony, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 0.039952,
msg="The found regression vector is not correct.")
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 0.040015,
msg="The found regression vector is not correct.")
f_spams = function.f(beta_spams)
f_parsimony = function.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_less(err, 5e-3, msg="The found regression vector does not " \
"give the correct function value.")
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_less(err, 5e-3, msg="The found regression vector does not " \
"give the correct function value.")
def test_l2(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.algorithms.gradient as gradient
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape)
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p, p) \
+ (1.0 - alpha) * np.random.randn(p, p)
mean = np.zeros(p)
X = np.random.multivariate_normal(mean, Sigma, n)
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X.dtype)
eps = 1e-8
max_iter = 1000
l = 0.0
k = 0.618
g = 0.0
gd = gradient.GradientDescent(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X, y, mean=True))
function.add_penalty(penalties.L2Squared(k))
beta_start = start_vector.get_weights(p)
beta = gd.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "l2",
"lambda1": k,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": False,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[5.33853917e-02], [-1.42699512e-01], [-8.72668527e-02],
[-3.65487726e-02], [2.83354831e-02], [1.13264613e-02],
[8.15039993e-03], [-2.37846195e-02], [-2.19065128e-03],
[-5.16555341e-02], [-3.15120681e-02], [-4.22206985e-02],
[1.34004557e-01], [8.44413972e-02], [1.69872397e-02],
[7.28223134e-02], [-1.37888694e-01], [-8.35291457e-02],
[5.83353207e-02], [5.89209520e-02], [3.30824577e-02],
[-1.73109060e-05], [1.48936475e-02], [8.74385474e-02],
[1.00948985e-01], [1.08614513e-02], [6.51250680e-03],
[-1.13890284e-01], [5.54004534e-02], [-9.89017587e-02],
[-5.43921421e-02], [5.83618885e-02], [8.52661577e-03],
[-3.61046922e-02], [-1.22802849e-01], [9.65240799e-02],
[6.63903145e-02], [-7.17642493e-02], [-1.04853964e-02],
[-1.23097313e-01], [-6.13912331e-02], [8.97501765e-03],
[6.78529451e-02], [4.33676933e-02], [-1.06618077e-01],
[3.40561568e-02], [2.59810765e-02], [1.66312745e-02],
[-1.60401993e-02], [-3.82916547e-02], [1.59030182e-02],
[4.43776091e-02], [-2.76431899e-02], [3.59701032e-03],
[7.27998486e-02], [1.41382762e-02], [-1.63489132e-02],
[1.24814735e-02], [-3.02671096e-02], [-1.92387219e-01],
[-9.46001894e-02], [-2.06080852e-02], [6.72162798e-02],
[5.40284401e-02]])
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
class_weight=None)
logreg_est.fit(X, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 1.188998e-08,
msg="The found regression vector is not correct.",
places=5)
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 3.738028e-08,
msg="The found regression vector is not correct.",
places=5)
f_spams = function.f(beta_spams)
f_parsimony = function.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_almost_equal(err, 2.046041e-16,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_almost_equal(err, 2.046041e-16,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
# Compare functions
import parsimony.functions as functions
from parsimony.utils import class_weight_to_sample_weight
sample_weight = class_weight_to_sample_weight(None, y)
l = 10000.0
function_1 = losses.RidgeLogisticRegression(X, y, l,
weights=sample_weight,
penalty_start=0,
mean=True)
function_2 = functions.CombinedFunction()
function_2.add_loss(losses.LogisticRegression(X, y, mean=True))
function_2.add_penalty(penalties.L2Squared(l, penalty_start=0))
beta = start_vector.get_weights(p)
assert abs(function_1.f(beta) - function_2.f(beta)) < consts.TOLERANCE
assert maths.norm(function_1.grad(beta) - function_2.grad(beta)) \
< consts.TOLERANCE
def test_l2_intercept(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.gradient as gradient
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape) + 1
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p - 1, p - 1) \
+ (1.0 - alpha) * np.random.randn(p - 1, p - 1)
mean = np.zeros(p - 1)
X0 = np.random.multivariate_normal(mean, Sigma, n)
X_parsimony = np.hstack((np.ones((n, 1)), X0))
X_spams = np.hstack((X0, np.ones((n, 1))))
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X0.dtype)
eps = 1e-8
max_iter = 60
l = 0.0
k = 0.618
g = 0.0
gd = gradient.GradientDescent(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X_parsimony, y,
mean=True))
function.add_penalty(penalties.L2Squared(k, penalty_start=1))
beta_start = start_vector.get_weights(p)
beta = gd.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "l2",
"lambda1": k,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": True,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X_spams),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[0.05313997], [-0.14296077], [-0.08703832], [-0.03643685],
[0.028458], [0.01129562], [0.00812442], [-0.02348346],
[-0.00195203], [-0.05122321], [-0.03192026],
[-0.04222126], [0.13433481], [0.08448324], [0.01667175],
[0.07278472], [-0.1378397], [-0.08352936], [0.05828094],
[0.0585371], [0.0332106], [-0.00051077], [0.01486762],
[0.08740097], [0.10075053], [0.0109332], [0.00625134],
[-0.11434899], [0.05559258], [-0.09866443], [-0.05440752],
[0.05850469], [0.00810353], [-0.03600913], [-0.12275238],
[0.09644776], [0.06654187], [-0.07197764], [-0.01066],
[-0.12312596], [-0.06133673], [0.0088412], [0.06797135],
[0.0432135], [-0.1066665], [0.03402393], [0.02572417],
[0.01659111], [-0.01602115], [-0.03806548], [0.01591459],
[0.04462776], [-0.02769855], [0.00410674], [0.07298038],
[0.01383948], [-0.01658243], [0.01240699], [-0.03036137],
[-0.19220114], [-0.09440627], [-0.02093642], [0.06733479],
[0.05368342], [-0.00686121]])
beta_spams = np.vstack((beta_spams[p - 1, :],
beta_spams[0:p - 1, :]))
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
penalty_start=1,
class_weight=None)
logreg_est.fit(X_parsimony, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 5e-3,
msg="The found regression vector is not correct.")
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 5e-3,
msg="The found regression vector is not correct.")
f_spams = function.f(beta_spams)
f_parsimony = function.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_less(err, 5e-6, msg="The found regression vector does not " \
"give the correct function value.")
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_less(err, 5e-6, msg="The found regression vector does not " \
"give the correct function value.")
def test_gl(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.functions.nesterov.gl as gl
import parsimony.estimators as estimators
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
# Note that p must be even!
n, p = 50, 100
groups = [list(range(0, int(p / 2))), list(range(int(p / 2), p))]
# weights = [1.5, 0.5]
A = gl.linear_operator_from_groups(p, groups=groups) # , weights=weights)
alpha = 0.9
V = np.random.randn(p, p)
Sigma = alpha * np.eye(p, p) \
+ (1.0 - alpha) * np.dot(V.T, V)
mean = np.zeros(p)
X = np.random.multivariate_normal(mean, Sigma, n)
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X.dtype)
eps = 1e-8
max_iter = 10000
l = 0.0
k = 0.0
g = 0.001
mu = 5e-4
algorithm = proximal.ISTA(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X, y, mean=True))
function.add_penalty(gl.GroupLassoOverlap(l=g, A=A, mu=mu,
penalty_start=0))
beta_start = start_vector.get_weights(p)
beta = algorithm.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "group-lasso-l2",
"groups": np.array([1] * int(p / 2) + [2] * int(p / 2),
dtype=np.int32),
"lambda1": g,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": False,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
except ImportError:
beta_spams = np.asarray(
[[4.69125211e-04], [-5.76698788e-02], [-2.40078974e-01],
[-6.61532107e-03], [-3.03512327e-01], [-1.83545174e-01],
[-2.86425232e-01], [9.25436278e-02], [-3.69882368e-02],
[-2.58152199e-01], [-1.57006492e-01], [-2.12059086e-01],
[-3.64822932e-01], [-1.77213770e-02], [1.37712226e-01],
[1.36983267e-01], [1.21019611e-01], [-1.14300309e-01],
[-1.07108453e-01], [2.94683117e-01], [4.62945669e-02],
[2.04873107e-01], [1.14232456e-01], [-1.02701573e-01],
[-1.66498758e-01], [-3.40062598e-01], [5.78832448e-02],
[-3.17271478e-02], [-2.17243625e-01], [7.18038071e-02],
[-2.67045631e-01], [-2.09562234e-01], [1.79610439e-01],
[-5.40938258e-01], [-5.36039494e-01], [-2.89187125e-02],
[4.33817576e-01], [2.67831633e-01], [-1.63875210e-01],
[-4.31756685e-01], [2.24698003e-01], [3.49821459e-01],
[2.31160454e-01], [-7.42394377e-02], [1.13454429e-01],
[2.86104705e-01], [3.23831912e-01], [7.53906314e-02],
[2.92770430e-01], [-7.43106086e-02], [3.48688828e-01],
[-9.88751796e-02], [3.50475276e-02], [-1.00405317e-01],
[-4.16408430e-01], [4.55376777e-02], [2.01379801e-01],
[2.05662044e-01], [2.78957686e-01], [-2.66772715e-02],
[-5.66780405e-02], [6.13880915e-02], [3.53253584e-02],
[2.83592934e-01], [-2.01475234e-01], [7.37472943e-02],
[3.38869207e-02], [4.57371333e-01], [2.33202529e-01],
[8.48612914e-02], [-1.53078084e-01], [-4.68795061e-02],
[2.60334837e-01], [5.34128752e-01], [3.09231961e-01],
[6.75427437e-02], [-3.70493876e-01], [-3.85837135e-02],
[-1.32100270e-01], [-2.41449544e-01], [1.12424646e-01],
[4.00124617e-01], [2.69803273e-01], [1.75762562e-01],
[1.24632543e-01], [2.61731447e-01], [2.66625353e-01],
[3.10319953e-01], [-2.33788511e-01], [-3.89499749e-01],
[-8.00569373e-02], [4.50647251e-01], [3.38820788e-01],
[-6.44928333e-02], [2.23326668e-01], [3.05168971e-01],
[2.92517617e-01], [-3.49537305e-01], [2.57928416e-02],
[-1.42370130e-01]])
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_less(re, 0.25, "The found regression vector is not correct.")
# mu = None
logreg_est = estimators.LogisticRegressionL1L2GL(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
penalty_start=0,
mean=True,
class_weight=None)
logreg_est.fit(X, y)
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", res
assert_less(re, 0.27, "The found regression vector is not correct.")
f_parsimony = function.f(beta)
f_spams = function.f(beta_spams)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_less(re, 0.27, "The found regression vector does not give "
"the correct function value.")
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_less(err, 0.018, "The found regression vector does not give "
"the correct function value.")
def test_l1_l2(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
np.random.seed(42)
start_vector = weights.RandomUniformWeights(normalise=True)
px = 4
py = 4
pz = 4
shape = (pz, py, px)
n, p = 50, np.prod(shape)
A = tv.linear_operator_from_shape(shape)
alpha = 0.9
Sigma = alpha * np.eye(p, p) \
+ (1.0 - alpha) * np.random.randn(p, p)
mean = np.zeros(p)
X = np.random.multivariate_normal(mean, Sigma, n)
y = np.array(np.random.randint(0, 2, (n, 1)), dtype=X.dtype)
eps = 1e-8
max_iter = 1000
l = 0.0318
k = 1.0 - l
g = 0.0
algorithm = proximal.ISTA(eps=eps, max_iter=max_iter)
function = CombinedFunction()
function.add_loss(losses.LogisticRegression(X, y, mean=True))
function.add_penalty(penalties.L2Squared(k))
function.add_prox(penalties.L1(l))
beta_start = start_vector.get_weights(p)
beta = algorithm.run(function, beta_start)
try:
import spams
params = {"loss": "logistic",
"regul": "elastic-net",
"lambda1": l,
"lambda2": k,
"max_it": max_iter,
"tol": eps,
"ista": True,
"numThreads": -1,
"intercept": False,
}
y_ = y.copy()
y_[y_ == 0.0] = -1.0
beta_spams, optim_info = \
spams.fistaFlat(Y=np.asfortranarray(y_),
X=np.asfortranarray(X),
W0=np.asfortranarray(beta_start),
return_optim_info=True,
**params)
# print beta_spams
except ImportError:
beta_spams = np.asarray(
[[0.01865551], [-0.08688886], [-0.03926606], [0.], [0.],
[0.], [0.], [0.], [0.], [-0.01936916], [-0.00304969],
[-0.01971763], [0.06632631], [0.04543627], [0.],
[0.02784156], [-0.08828684], [-0.03966364], [0.01372838],
[0.02745133], [0.], [0.], [0.], [0.04458341],
[0.05834843], [0.], [0.], [-0.06292223], [0.02541458],
[-0.05460034], [-0.0122713], [0.01416604], [0.], [0.],
[-0.06551936], [0.04436878], [0.02159705], [-0.0397886],
[0.], [-0.06515573], [-0.01723167], [0.], [0.01591231],
[0.00780168], [-0.04363237], [0.], [0.], [0.], [0.],
[-0.00113133], [0.], [0.01304487], [-0.01113588], [0.],
[0.03037163], [0.], [0.], [0.], [0.], [-0.12029642],
[-0.03927743], [0.], [0.01994069], [0.00128412]])
mu = None
logreg_est = estimators.LogisticRegressionL1L2TV(l, k, g,
A=A, mu=mu,
algorithm=proximal.ISTA(),
algorithm_params=dict(eps=eps,
max_iter=max_iter),
class_weight=None)
logreg_est.fit(X, y)
re = maths.norm(beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 1.129260e-09,
msg="The found regression vector is not correct.",
places=5)
re = maths.norm(logreg_est.beta - beta_spams) / maths.norm(beta_spams)
# print "re:", re
assert_almost_equal(re, 9.893653e-09,
msg="The found regression vector is not correct.",
places=5)
f_spams = function.f(beta_spams)
f_parsimony = function.f(beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_parsimony - f_spams) / f_spams
else:
err = abs(f_parsimony - f_spams)
# print "err:", err
assert_almost_equal(err, 1.737077e-16,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
f_logreg = function.f(logreg_est.beta)
if abs(f_spams) > consts.TOLERANCE:
err = abs(f_logreg - f_spams) / f_spams
else:
err = abs(f_logreg - f_spams)
# print "err:", err
assert_almost_equal(err, 1.737077e-16,
msg="The found regression vector does not give " \
"the correct function value.",
places=5)
def test_l1_l2_intercept(self):
# Spams: http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23
from parsimony.functions import CombinedFunction
import parsimony.functions.losses as losses
import parsimony.functions.penalties as penalties
import parsimony.algorithms.proximal as proximal
import parsimony.utils.weights as weights
import parsimony.utils.maths as maths
import parsimony.estimators as estimators
import parsimony.functions.nesterov.tv as tv
|
np.random.seed(42)
|
numpy.random.seed
|
"""
Script to compare the index retained from the fitted PCA
The script train a norm base model with the three following conditions:
- human
- monkey
- human + monkey
and compare the most important features kept by the PCA
run: python -m tests.NormBase.t05_compare_retained_PCA_index.py
"""
import numpy as np
import tensorflow as tf
from utils.load_config import load_config
from utils.load_data import load_data
from models.NormBase import NormBase
np.set_printoptions(precision=3, linewidth=250, suppress=True)
# small testing examples
test = np.arange(36)
n_feature_map = 4
feature_map_size = 3
t = np.reshape(test, (feature_map_size, feature_map_size, n_feature_map))
print(t[:, :, 0])
print(t[:, :, 1])
print(t[:, :, 2])
print(t[:, :, 3])
x, y, z = np.unravel_index(17, (feature_map_size,feature_map_size,n_feature_map))
#x, y, z = get_feature_map_index(17, n_feature_map, feature_map_size)
print("({}, {}, {})".format(x, y, z))
# load configuration
config_name = "norm_base_investigate_PCA_m0001.json"
config = load_config(config_name)
# fit models with each condition
avatars = ["human_orig", "monkey_orig", "all_orig"]
# avatars = ["human_orig"]
indexes = []
# pca_threshold = [300, 300, 1500]
pca_threshold = [600, 600, 2000]
for i, avatar in enumerate(avatars):
# modify condition according to loop
config['train_avatar'] = avatar
# define and train norm base model
norm_base = NormBase(config, input_shape=(224, 224, 3))
norm_base.pca.var_threshold = pca_threshold[i]
norm_base.fit(load_data(config, train=True), fit_dim_red=True, fit_ref=False, fit_tun=False)
# get index from the feature having the most variance
predict_v4 = norm_base.v4_predict
var_predict = np.std(predict_v4, axis=0)
index = np.flip(np.argsort(var_predict))[:config['PCA']]
# save index
indexes.append(np.array(index))
indexes = np.array(indexes)
# get position within feature maps
v4_shape = norm_base.shape_v4
print("v4_shape", v4_shape)
n_feature_map = v4_shape[-1]
feature_map_size = v4_shape[1]
print("feature map size: ({}, {}, {})".format(feature_map_size, feature_map_size, n_feature_map))
print()
positions = []
for a, avatar in enumerate(avatars):
avatar_positions = []
for i in range(len(indexes[a])):
index = indexes[a, i]
x,y,f = np.unravel_index(index, v4_shape[1:])
#(x, y, f) = get_feature_map_index(index, n_feature_map, feature_map_size)
avatar_positions.append((x, y, f))
positions.append(np.array(avatar_positions))
positions = np.array(positions)
print("indexes:")
print(indexes)
# print positions
for i in range(np.shape(positions)[1]):
print("h: ({}) m: ({}) h+m ({})".format(positions[0, i], positions[1, i], positions[2, i]))
# print only features maps index
f_map_h = np.sort(positions[0, :, 2])
f_map_m =
|
np.sort(positions[1, :, 2])
|
numpy.sort
|
import sys, os
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_make_graphs')
from PygDocsGraphDataset import PygDocsGraphDataset as PDGD
from torch_geometric.data import DataLoader
import torch
import numpy as np
import random
import argparse
from gensim.models import Word2Vec
import pandas as pd
from tqdm import tqdm
def show_statisctic(train_set, test_set):
# min_len = 10000
# aver_len = 0
# max_len = 0
training_sent_num = []
training_vocab = set()
training_words = []
training_jisjoint_words = []
for data in tqdm(train_set):
training_sent_num.append(data.batch_n[-1].item() + 1)
training_words.append(data.x_p.size(0))
training_jisjoint_words.append(data.x_n.size(0))
for word in data.x_n_id.data.numpy().tolist():
training_vocab.add(word)
train_cs = pd.DataFrame(train_set.data.y_p.tolist())[0].value_counts().values.tolist()
train_p = train_cs[-1] / train_cs[0]
test_vocab = set()
test_sent_num = []
intersected_vocab = set()
test_words = []
test_disjoint_words = []
for data in tqdm(test_set):
test_sent_num.append(data.batch_n[-1].item()+1)
test_words.append(data.x_p.size(0))
test_disjoint_words.append(data.x_n.size(0))
for word in data.x_n_id.data.numpy().tolist():
test_vocab.add(word)
if word in training_vocab:
intersected_vocab.add(word)
test_cs = pd.DataFrame(test_set.data.y_p.tolist())[0].value_counts().values.tolist()
test_p = test_cs[-1] / test_cs[0]
avg_trianing_sent_num = np.array(training_sent_num).mean()
avg_test_sent_num= np.array(test_sent_num).mean()
avg_sent_num = np.array(training_sent_num+test_sent_num).mean()
avg_training_words = np.array(training_words).mean()
avg_training_disjoint_words =
|
np.array(training_jisjoint_words)
|
numpy.array
|
from __future__ import division
from scipy import signal, stats, ndimage
from datetime import datetime
import os
import re
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.patches import Rectangle
import matplotlib.transforms as transforms
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
from ephysiopy.dacq2py import axonaIO
from ephysiopy.dacq2py.tetrode_dict import TetrodeDict
from ephysiopy.common import binning
from ephysiopy.common.ephys_generic import FieldCalcs
from ephysiopy.dacq2py.spikecalcs import SpikeCalcs
from ephysiopy.common.eegcalcs import EEGCalcs
from ephysiopy.dacq2py.cluster import Kluster
from ephysiopy.dacq2py import tintcolours as tcols
from ephysiopy.common.gridcell import SAC
from itertools import combinations
from mpl_toolkits.axes_grid1 import ImageGrid
import skimage, skimage.morphology
from skimage import feature
from collections import OrderedDict
warnings.filterwarnings("ignore",
message="divide by zero encountered in int_scalars")
warnings.filterwarnings("ignore",
message="divide by zero encountered in divide")
warnings.filterwarnings("ignore",
message="invalid value encountered in divide")
warnings.filterwarnings("ignore",
message="Casting complex values to real discards the imaginary part")
class Trial(axonaIO.IO, SAC, dict):
"""
Provides methods to plot electrophysiology data acquired using the Axona DACQ recording system
and methods to extract some measures from that data
The actual loading of the data is done lazily i.e. only when you ask for
position data (say plotting the path the animal took in the trial) is the
position data actually loaded. The class also uses as attibutes several
instances of subpackages (binning.Ratemap for example) so that the code
could be made more modular.
Parameters
----------
filename_root : str
Absolute location on the filesystem of the set of files without a suffix
Attributes
----------
filename_root : str
Absolute location on the filesystem of the set of files without a suffix
basename : str
Basename of the set of files without a suffix (everything after the last trailing slash)
EEG : dacq2py.axonaIO.EEG
Containing data from .eeg file
EGF : dacq2py.axonaIO.EEG
Containing data from .egf file
STM : dacq2py.axonaIO.Stim
Contains stimulation data (timestamps mostly) and header + some additions work done below
POS : dacq2py.axonaIO.Pos
Contains raw and post-processed position data (xy, dir, speed etc) & header
TETRODE : extension of Pythons dict
Each value is an instance of dacq2py.axonaIO.Tetrode. Contains methods to get cluster spike times, cluster indices etc
posFilter : dict
Keys are things like 'speed', 'time'; values are n x 2 arrays of range of values *to keep*
setheader : dict
Corresponds to the .set file for the file set. Keys/ values are all strings
_available_files : list
All files matching the filename_root + any valid suffix
metadata : collections.OrderedDict
Some basic info if the file is an "rh" one (see _parseMetaData)
ratemap : dacq2py.ephys_generic.binning.Ratemap class instance
See Also
--------
ephysiopy.common.binning :Basic binning of data, calculation of bin sizes etc
ephysiopy.common.eegcalcs : Contains filters, eeg power spectra methods
ephysiopy.common.spikecalcs : Temporal measures of spike trains and extracting parameters from waveforms and clusters
ephysiopy.common.fieldcalcs : Methods for extracting information from 2D ratemaps
Examples
--------
>>> from dacq2py.dacq2py_util import Trial
>>> T = Trial(r'/media/robin/data/Dropbox/Science/Recordings/M851/M851_140908t1rh')
"""
def __init__(self, filename_root, **kwargs):
# try and intelligently get full filename from just the root
filename_root = self.getFullFile(filename_root)
self.basename = os.path.basename(filename_root)
self.filename_root = filename_root
self._EEG = None
self._EGF = None
self._STM = None
self._POS = None
if 'volts' in kwargs:
useVolts = kwargs['volts']
self.TETRODE = TetrodeDict(filename_root, volts=useVolts) # see TETRODE class above
else:
self.TETRODE = TetrodeDict(filename_root)
self._posFilter = None # a dict used to filter pos
self._setheader = None
self.ratemap = None #becomes binning.RateMap instance - see POS getter property below
self.spikecalcs = SpikeCalcs()
self.fieldcalcs = FieldCalcs()
self._isinteractive = 1
self._figNum = 1
self._min_spks = 1
self._available_files = None
self._getAvailableFiles()
self.metadata = OrderedDict()
self.tetrodes = None
self.clusters = None
self.pos_weights = None
if 'cm' in kwargs:
self.useCm = kwargs['cm']
else:
self.useCm = False
try:
self._parseMetaData()
except:
self.metadata = {'Contents': 'Not an rhayman file'}
try:
self.getTsAndCs()
except:
pass
self.eeg_file = 1
def __repr__(self):
return '{self.__class__.__name__}({self.filename_root})'.format(self=self)
def hasFiles(self):
"""
Checks for some automated yaml processing
"""
for i in self.axona_files:
if os.path.isfile(self.filename_root + i):
self['has_' + i[1:]] = True
else:
self['has_' + i[1:]] = False
def getFullFile(self, filename):
"""
Used to constuct filename_root in __init__
Parameters
----------
filename : str
The absolute path the files being analysed here without any suffix
"""
if os.path.isdir(r'/home/robin/Dropbox/Science/Recordings'):
pname, _ = os.path.split(filename)
if len(pname) == 0:
defaultDir = r'/home/robin/Dropbox/Science/Recordings'
animal = filename.split('_')[0]
filename = os.path.join(defaultDir, animal, filename)
return filename
@property
def setheader(self):
"""
Returns
----------
dict : setheader
Matches contents of .set file with keys and values all mapped as strings
"""
if self._setheader is None:
try:
self._setheader = self.getHeader(self.filename_root + '.set')
except IOError:
self._setheader = None
return self._setheader
@setheader.setter
def setheader(self, value):
self._setheader = value
@property
def ppm(self):
return self.__ppm
@ppm.setter
def ppm(self, value):
self.__ppm = value
# Update POS
self.POS.ppm = value
# Update Ratemap
self.ratemap = binning.RateMap(self.POS.xy, self.POS.dir, self.POS.speed, self.pos_weights, self.POS.ppm, self.useCm)
@property
def POS(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.POS:
Contains raw and post-processed position data
"""
if self._POS is None:
try:
self._POS = axonaIO.Pos(self.filename_root, cm=self.useCm)
self._POS.postprocesspos()
self._xlims = (int(self.POS.xy[0,:].min()),
int(self.POS.xy[0,:].max()))
self._ylims = (int(self.POS.xy[1,:].min()),
int(self.POS.xy[1,:].max()))
self.pos_weights = np.ravel(np.ones((1, self.POS.npos), dtype=np.float) / self.POS.pos_sample_rate)
self.ratemap = binning.RateMap(self.POS.xy, self.POS.dir, self.POS.speed, self.pos_weights, self.POS.ppm, self.useCm)
except IOError:
self._POS = None
return self._POS
@POS.setter
def POS(self, value):
self._POS = value
@property
def EEG(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.EEG:
eeg data and header
"""
if self._EEG is None:
try:
self._EEG = axonaIO.EEG(self.filename_root, eeg_file=self.eeg_file)
self.pos2eegScale = int(self.EEG.sample_rate /
self.POS.pos_sample_rate)
except IOError:
self._EEG = None
return self._EEG
@EEG.setter
def EEG(self, value):
self._EEG = value
@property
def EGF(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.EGF:
eeg data and header from .egf file
"""
if self._EGF is None:
try:
self._EGF = axonaIO.EEG(self.filename_root, eeg_file=self.eeg_file, egf=1)
self.pos2egfScale = int(self.EGF.sample_rate /
self.POS.pos_sample_rate)
except IOError:
self._EGF = None
return self._EGF
@EGF.setter
def EGF(self, value):
self._EGF = value
@property
def STM(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.Stim:
Stimulation data and header + some extras parsed from pos, eeg and set files
"""
if self._STM is None:
try:
self._STM = axonaIO.Stim(self.filename_root)
"""
update the STM dict with some relevant values from the .set file and the headers
of the eeg and pos files
"""
posHdr = self.getHeader(self.filename_root + '.pos')
eegHdr = self.getHeader(self.filename_root + '.eeg')
self._STM['posSampRate'] = self.getHeaderVal(posHdr, 'sample_rate')
self._STM['eegSampRate'] = self.getHeaderVal(eegHdr, 'sample_rate')
try:
egfHdr = self.getHeader(self.filename_root + '.egf')
self._STM['egfSampRate'] = self.getHeaderVal(egfHdr, 'sample_rate')
except:
pass
stim_pwidth = int(self.setheader['stim_pwidth']) / int(1000) # get into ms
self._STM['off'] = self._STM['on'] + int(stim_pwidth)
"""
There are a set of key / value pairs in the set file that
correspond to the patterns/ protocols specified in the
Stimulator menu in DACQ. Extract those items now...
There are five possibe "patterns" that can be used in a trial. Those patterns
consist of either "Pause (no stimulation)" or some user-defined stimulation pattern.
Whether or not one of the five was used is specified in "stim_patternmask_n" where n
is 1-5. Confusingly in dacqUSB these 5 things are called "Protocols" accessed from
the menu Stimulator/Protocols... within that window they are actually called "Phase 1",
"Phase 2" etc. To keep everything in order it's best to iterate through using a for loop
as a dict is not guaranteed to be ordered and I cba to use an OrderedDict.
In dacqUSB nomencalture the pattern is actually the stimulation you
want to apply i.e. 10ms pulse every 150ms or whatever. The "pattern" is what is applied
within every Phase.
"""
# phase_info : a dict for each phase that is active
phase_info = {'startTime': None, 'duration': None, 'name': None, 'pulseWidth': None, 'pulsePause': None}
stim_dict = {}
stim_patt_dict = {}
for k,v in self.setheader.items():
if k.startswith("stim_patternmask_"):
if (int(v) == 1):
# get the number of the phase
phase_num = k[-1]
stim_dict['Phase_' + phase_num] = phase_info.copy()
if k.startswith("stim_patt_"):
stim_patt_dict[k] = v
self.patt_dict = stim_patt_dict
for k,v in stim_dict.items():
phase_num = k[-1]
stim_dict[k]['duration'] = int(self.setheader['stim_patterntimes_' + phase_num])
phase_name = self.setheader['stim_patternnames_' + phase_num]
stim_dict[k]['name'] = phase_name
if not (phase_name.startswith("Pause")):
# find the matching string in the stim_patt_dict
for kk,vv in stim_patt_dict.items():
split_str = vv.split('"')
patt_name = split_str[1]
if (patt_name == phase_name):
ss = split_str[2].split()
stim_dict[k]['pulseWidth'] = int(ss[0])
stim_dict[k]['pulsePause'] = int(ss[2])
# make the dict ordered by Phase number
self.STM['stim_params'] = OrderedDict(sorted(stim_dict.items()))
except IOError:
self._STM = None
return self._STM
@STM.setter
def STM(self, value):
self._STM = value
@property
def posFilter(self):
"""
self.posFilter : dict
Keys are strings such as 'speed', 'time' etc. Values are n x 2 arrays of values *to keep*
"""
return self._posFilter
@posFilter.setter
def posFilter(self, value):
"""Filters data depending on the filter specified in the dictionary value
Parameters
----------
value : dict
Filter dict. Legal keys include: 'time', 'dir', 'speed', 'xrange',
'yrange'. If key is 'time', values must be a n x 2 numpy array that
specifies the times to keep in SECONDS. If key is 'dir' values must
be a two element list/ array that specifies the directions to keep
in DEGREES NB the values can be singular strings of either 'w',
'e', 'n' or 's' which filters for a +/-45 degree range around that
cardinal direction. If key is 'speed' values are a 2 element list/
array to keep specified in m/s. If key is 'xrange' or 'yrange'
values are a two element list/ array that specify the x or y values
to keep in PIXELS.
Returns
-------
dacq2py_util.Trial : object
The Trial object is modified in place and all the relevant
variables are filtered and changed to numpy masked arrays
Examples
--------
>>> import numpy as np
>>> T = dacq2py_util.Trial(r'D:\M851\M851_140908t1rh')
>>> T.posFilter = {'time': np.array([600,1200])}
"""
# If masked, remove all masks on all aspects of data
if np.ma.is_masked(self.POS.speed):
self.POS.speed.mask = np.ma.nomask
if np.ma.is_masked(self.POS.dir):
self.POS.dir.mask = np.ma.nomask
if np.ma.is_masked(self.POS.xy):
self.POS.xy.mask = np.ma.nomask
if np.ma.is_masked(self.EEG.eeg):
self.EEG.eeg.mask = np.ma.nomask
if np.ma.is_masked(self.EGF.eeg):
self.EGF.eeg.mask = np.ma.nomask
if np.any(self.EEG.EEGphase):
if np.ma.is_masked(self.EEG.EEGphase):
self.EEG.EEGphase.mask = np.ma.nomask
if self.TETRODE:#true if TETRODE dict has entries
for tet in self.TETRODE.keys():
if np.ma.is_masked(self.TETRODE[tet].waveforms):
self.TETRODE[tet].waveforms.mask = np.ma.nomask
self.TETRODE[tet].spk_ts.mask = np.ma.nomask
if value is None:
return
idx = self.POS.filterPos(value)
if self.TETRODE:
for tet in self.TETRODE.keys():
posSamps = self.TETRODE[tet].getPosSamples()
common = np.in1d(posSamps, np.nonzero(idx)[1])
# Mask timestamps first as this is a vector, then expand
# out the mask array (common)
self.TETRODE[tet].spk_ts = np.ma.masked_where(common, self.TETRODE[tet].spk_ts)
common = common[:, None, None]
common = np.repeat(np.repeat(common, 4, axis=1), 50, axis=-1)
self.TETRODE[tet].waveforms = np.ma.masked_where(common, self.TETRODE[tet].waveforms)
self.POS.speed = np.squeeze(np.ma.masked_where(idx, np.expand_dims(self.POS.speed,0)))
self.POS.dir = np.squeeze(np.ma.masked_where(idx, np.expand_dims(self.POS.dir,0)))
posMask = np.squeeze(idx)
posMask = np.vstack((posMask, posMask))
self.POS.xy = np.ma.masked_where(posMask, self.POS.xy)
self.EEG.eeg = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2eegScale), self.EEG.eeg)
if self.EGF:
self.EGF.eeg = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2egfScale), self.EGF.eeg)
if np.any(self.EEG.EEGphase):
self.EEG.EEGphase = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2eegScale), self.EEG.EEGphase)
self._posFilter = value
def print_stim_dict(self):
"""
Prints out keys/ values of STM dict
"""
for k,v in self.STM.items():
print(k, v)
def _filterForStm(self, laser=None):
"""
Cycles through the STM dict and fiters for laser on / off periods and
applies the filter to the pos and eeg data NB tetrode data not dealt with
yet
Parameters
----------
laser : bool
Whether to filter for laser stimulation events
"""
if laser is not None:
times = [0]
phaseType = []
for k, d in self.STM['stim_params'].items():
for kk, v in d.items():
if 'duration' in kk:
times.append(v)
if 'name' in kk:
phaseType.append(v)
periods = np.cumsum(times)
period_bounds = dict.fromkeys(set(phaseType), [])
for pk in period_bounds.keys():
bounds = []
for k, d in self.STM['stim_params'].items():
if pk == d['name']:
idx = int(k.split('_')[1])
bounds.append(periods[idx-1:idx+1])
period_bounds[pk] = bounds
for k, v in period_bounds.items():
if laser == 0:
if 'Pause' in k:
self.posFilter = {'time': np.array(v)}
elif laser == 1:
if 'Pause' not in k:
self.posFilter = {'time': np.array(v)}
def _getAvailableFiles(self):
self._available_files = glob(self.filename_root + '*')
def _getMap(self, tetrode=None, cluster=None, var2bin='pos', binsize=3,
smooth_sz=5, smooth=True, **kwargs):
"""
Returns the ratemap (smoothed or unsmoothed) for a given tetrode and
cluster
Parameters
----------
tetrode : int
the tetrode you want to look at
cluster : int, 1xn array/ list
a single number or list (or 1xn array) of the clusters to plot
binsize : int, optional
size of bins. Defaults to 3
smooth_sz : int
the width of the smoothing kernel (see **kwargs for more)
var2bin : str
(Optional) Defaults to 'pos'. Which variable to bin. Can be either
'pos', 'dir' or 'speed'. Works with masked arrays
smooth : bool, optional.
Defaults to true. Whether to smooth the data or not
**kwargs : extra arguments include:
'gaussian' - the smoothing kernel used is gaussian in shape
not the default boxcar
'after' - smoothing of the pos and spike maps is done after
spikes are divided by pos
'shuffle' - the time in ms by how much to shift the spikes
by. Used for generated distributions for null hypothesis
testing
Returns
-------
rmap : np.array
The data binned up as requested
"""
if 'pos' in var2bin:
varType = 'xy'
else:
varType = var2bin
if tetrode is None:
idx = np.arange(0, self.POS.npos)
mapType = 'pos'
else:
idx = self.TETRODE[tetrode].getClustIdx(cluster)
mapType = 'rate'
spk_weights = np.bincount(idx, minlength=self.POS.npos)
if 'shuffle' in kwargs.keys():
spk_weights = np.roll(spk_weights, int(kwargs['shuffle']) * 50) # * 50 to go from seconds into pos_samples
if np.ma.is_masked(self.POS.xy):
mask = ~
|
np.ma.getmask(self.POS.xy[0])
|
numpy.ma.getmask
|
# Originated from https://github.com/ellisdg/3DUnetCNN/blob/master/unet3d/utils/patches.py
import numpy as np
def compute_patch_indices(image_shape, patch_size, overlap, start=None):
if isinstance(overlap, int):
overlap = np.asarray([overlap] * len(image_shape))
if start is None:
n_patches = np.ceil(image_shape / (patch_size - overlap))
overflow = (patch_size - overlap) * n_patches - image_shape + overlap
start = -np.ceil(overflow/2)
elif isinstance(start, int):
start = np.asarray([start] * len(image_shape))
stop = image_shape + start
step = patch_size - overlap
return get_set_of_patch_indices(start, stop, step)
def get_set_of_patch_indices(start, stop, step):
return np.asarray(np.mgrid[start[0]:stop[0]:step[0], start[1]:stop[1]:step[1],
start[2]:stop[2]:step[2]].reshape(3, -1).T, dtype=np.int)
def get_random_patch_index(image_shape, patch_shape):
"""
Returns a random corner index for a patch. If this is used during training, the middle pixels will be seen by
the model way more often than the edge pixels (which is probably a bad thing).
:param image_shape: Shape of the image
:param patch_shape: Shape of the patch
:return: a tuple containing the corner index which can be used to get a patch from an image
"""
return get_random_nd_index(np.subtract(image_shape, patch_shape))
def get_random_nd_index(index_max):
return tuple([np.random.choice(index_max[index] + 1) for index in range(len(index_max))])
def get_patch_from_3d_data(data, patch_shape, patch_index):
"""
Returns a patch from a numpy array.
:param data: numpy array from which to get the patch.
:param patch_shape: shape/size of the patch.
:param patch_index: corner index of the patch.
:return: numpy array take from the data with the patch shape specified.
"""
patch_index = np.asarray(patch_index, dtype=np.int16)
patch_shape = np.asarray(patch_shape)
image_shape = data.shape[-3:]
if np.any(patch_index < 0) or np.any((patch_index + patch_shape) > image_shape):
data, patch_index = fix_out_of_bound_patch_attempt(data, patch_shape, patch_index)
return data[..., patch_index[0]:patch_index[0]+patch_shape[0], patch_index[1]:patch_index[1]+patch_shape[1],
patch_index[2]:patch_index[2]+patch_shape[2]]
def fix_out_of_bound_patch_attempt(data, patch_shape, patch_index, ndim=3):
"""
Pads the data and alters the patch index so that a patch will be correct.
:param data:
:param patch_shape:
:param patch_index:
:return: padded data, fixed patch index
"""
image_shape = data.shape[-ndim:]
pad_before = np.abs((patch_index < 0) * patch_index)
pad_after = np.abs(((patch_index + patch_shape) > image_shape) * ((patch_index + patch_shape) - image_shape))
pad_args = np.stack([pad_before, pad_after], axis=1)
if pad_args.shape[0] < len(data.shape):
pad_args = [[0, 0]] * (len(data.shape) - pad_args.shape[0]) + pad_args.tolist()
data = np.pad(data, pad_args, mode="edge")
patch_index += pad_before
return data, patch_index
def reconstruct_from_patches(patches, patch_indices, data_shape, default_value=0):
"""
Reconstructs an array of the original shape from the lists of patches and corresponding patch indices. Overlapping
patches are averaged.
:param patches: List of numpy array patches.
:param patch_indices: List of indices that corresponds to the list of patches.
:param data_shape: Shape of the array from which the patches were extracted.
:param default_value: The default value of the resulting data. if the patch coverage is complete, this value will
be overwritten.
:return: numpy array containing the data reconstructed by the patches.
"""
data = np.ones(data_shape) * default_value
image_shape = data_shape[-3:]
count = np.zeros(data_shape, dtype=np.int)
for patch, index in zip(patches, patch_indices):
image_patch_shape = patch.shape[-3:]
if np.any(index < 0):
fix_patch = np.asarray((index < 0) * np.abs(index), dtype=np.int)
patch = patch[..., fix_patch[0]:, fix_patch[1]:, fix_patch[2]:]
index[index < 0] = 0
if np.any((index + image_patch_shape) >= image_shape):
fix_patch = np.asarray(image_patch_shape - (((index + image_patch_shape) >= image_shape)
* ((index + image_patch_shape) - image_shape)), dtype=np.int)
patch = patch[..., :fix_patch[0], :fix_patch[1], :fix_patch[2]]
patch_index = np.zeros(data_shape, dtype=np.bool)
patch_index[...,
index[0]:index[0]+patch.shape[-3],
index[1]:index[1]+patch.shape[-2],
index[2]:index[2]+patch.shape[-1]] = True
patch_data = np.zeros(data_shape)
patch_data[patch_index] = patch.flatten()
new_data_index = np.logical_and(patch_index,
|
np.logical_not(count > 0)
|
numpy.logical_not
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y =
|
np.ndarray((), buffer=x)
|
numpy.ndarray
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for telluride_decoding.infer_decoder."""
import io
import os
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import matplotlib
# pylint: disable=g-import-not-at-top
matplotlib.use('Agg') # Needed for plotting to a file, before the next import
import matplotlib.pyplot as plt
import mock
import numpy as np
from telluride_decoding import brain_data
from telluride_decoding import infer_decoder
from telluride_decoding import ingest
import tensorflow.compat.v2 as tf
flags.DEFINE_string(
'tmp_dir', os.environ.get('TMPDIR') or '/tmp',
'Temporary directory location.')
FLAGS = flags.FLAGS
@tf.function
def _linear_model(input_dict):
"""The simplest possible linear model for testing.
Args:
input_dict: A TF dataset, only one field needed (input_1) containing the
EEG data from which we predict intensity.
Returns:
The predicted intensity
"""
eeg = input_dict['input_1']
return _eeg_to_intensity(eeg)
@tf.function
def _cca_model(input_dict, cca_dims=2):
"""The simplest possible CCA model for testing.
Args:
input_dict: A TF dataset with two fields that are rotated via CCA.
cca_dims: How many CCA dimensions to compute.
Returns:
A concatenated pair of arrays with the best correlation.
"""
return tf.concat((input_dict['input_1'][:, 0:cca_dims], # EEG data
input_dict['input_2'][:, 0:cca_dims]), # Intensity data
axis=1)
def _eeg_to_intensity(eeg):
"""Intensity is uniform random between [0, 1], eeg is [-1, 1]."""
return eeg/2.0 + 0.5
def _intensity_to_eeg(intensity):
"""Intensity is uniform random between [0, 1], eeg is [-1, 1]."""
return (intensity - 0.5)*2.0
_NUM_TEST_POINTS = 1000 # Arbitrary for testing.
class InferDecoderTest(parameterized.TestCase):
def setUp(self):
"""Stores and prepares tf.dataset tests with three kinds of test data.
These data are:
Plain training data,
More training data, but with input and output mixed up for null test,
Test data which switches attention periodically.
"""
super(InferDecoderTest, self).setUp()
params = self.get_default_params()
attended_speaker = 'intensity1'
self._train_filename = self.create_sample_data_file(with_noise=False,
with_switches=False)
self._train_data = infer_decoder.create_dataset(self._train_filename,
params,
attended_speaker)
self._mixed_data = infer_decoder.create_dataset(self._train_filename,
params,
attended_speaker,
mixup_batch=True)
self._test_filename = self.create_sample_data_file(with_noise=False,
with_switches=True)
self._test_data = infer_decoder.create_dataset(self._test_filename, params,
attended_speaker)
def create_sample_data_file(self, test_name='test',
num_dimensions=4,
with_switches=False, with_noise=False):
"""Create a TFRecord data file with two intensity profiles and EEG data."""
intensity1 = np.random.rand(_NUM_TEST_POINTS, num_dimensions)
intensity2 = np.random.rand(_NUM_TEST_POINTS, num_dimensions)
speaker_flag = np.zeros((_NUM_TEST_POINTS), dtype=np.int32)
if with_switches:
# Switch to speaker 2 for second half
speaker_flag[_NUM_TEST_POINTS//2:] = 1
eeg = np.zeros((_NUM_TEST_POINTS, num_dimensions))
eeg[speaker_flag == 0, :] = _intensity_to_eeg(
intensity1[speaker_flag == 0, :])
eeg[speaker_flag == 1, :] = _intensity_to_eeg(
intensity2[speaker_flag == 1, :])
if with_noise:
for i in range(num_dimensions):
frac = i/float(num_dimensions)
eeg[:, i] = (1-frac)*eeg[:, i] + frac*np.random.rand(_NUM_TEST_POINTS,)
data_dict = {'intensity1': intensity1,
'intensity2': intensity2,
'attended_speaker': speaker_flag.astype(np.float32),
'eeg': eeg,
}
brain_trial = ingest.BrainTrial(test_name)
brain_trial.model_features = data_dict
data_dir = self.create_tempdir().full_path
brain_trial.write_data_as_tfrecords(data_dir)
return os.path.join(data_dir, test_name + '.tfrecords')
def get_default_params(self):
return {'input_field': ['eeg'],
'pre_context': 0,
'post_context': 0,
'input2_pre_context': 0,
'input2_post_context': 0,
}
def test_sample_data_file(self):
"""Basic test to make sure we can create the data file and it has data."""
num_dimensions = 4
features = brain_data.discover_feature_shapes(self._train_filename)
print('sample_data_file features are:', features)
self.assertEqual(features['eeg'].shape, [num_dimensions])
self.assertEqual(features['intensity1'].shape, [num_dimensions])
self.assertEqual(features['intensity2'].shape, [num_dimensions])
count, error = brain_data.count_tfrecords(self._train_filename)
self.assertEqual(count, _NUM_TEST_POINTS)
self.assertFalse(error)
def test_conversions(self):
"""Makes sure that the model mapping is invertable."""
data = np.random.rand(1000)
converted = _eeg_to_intensity(_intensity_to_eeg(data))
np.testing.assert_allclose(data, converted, rtol=1e-5)
def test_create_dataset(self):
"""Test to see if we can create the right data file for testing a model."""
num_batches = 0
for input_data, output_data in self._test_data.take(1):
predicted_intensity = _eeg_to_intensity(input_data['input_1'].numpy())
print('Types:', predicted_intensity.dtype, output_data.numpy().dtype)
print('Shapes:', predicted_intensity.shape, output_data.numpy().shape)
np.testing.assert_allclose(predicted_intensity,
output_data.numpy(), atol=1e-7, rtol=1e-4)
num_batches += 1
self.assertGreater(num_batches, 0)
def test_correlation_calculation(self):
num_batches = 50 # Arbitrary
batch_size = 3400 # Arbitrary
total_points = num_batches * batch_size
x = np.random.randn(total_points, 3) + 1.2
y = x*3 + 3.1
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
for i in range(num_batches):
s = i*batch_size
e = s + batch_size
decoder.add_data_correlator(x[s:e, :], y[s:e, :])
r = decoder.compute_correlation(x, y)
np.testing.assert_allclose(np.mean(r), 1, rtol=1e-5)
def test_correlation_save_model(self):
num_batches = 50 # Arbitrary
batch_size = 340 # Arbitrary
total_points = num_batches * batch_size
x = np.random.randn(total_points, 3) + 1.2
y = x*3 + 3.1
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder.add_data_correlator(x, y)
x_new = np.random.randn(total_points, 3) + 1.2
y_new = x_new*3 + 3.1
r = decoder.compute_correlation(x_new, y_new)
tmp_dir = flags.FLAGS.test_tmpdir or '/tmp'
corr_save_dir = os.path.join(tmp_dir, 'corr_params.json')
decoder.save_parameters(corr_save_dir)
decoder_loaded = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder_loaded.restore_parameters(corr_save_dir)
r_loaded = decoder_loaded.compute_correlation(x_new, y_new)
np.testing.assert_equal(r_loaded, r)
def test_linear_model(self):
"""Makes sure our sample TF model performs as expected."""
intensity = np.arange(10) - 5.1 # Arbitrary set of non-positive, non-ints
eeg = _intensity_to_eeg(intensity)
prediction = _linear_model({'input_1': eeg})
np.testing.assert_allclose(intensity, prediction)
def test_cca_data(self):
"""Checks the data is being loaded into the input_dict correctly for CCA."""
def pearson_correlation(x, y):
"""Computes the Pearson correlation coefficient between tensors of data.
This routine computes a vector correlation (ala cosine distance).
Args:
x: one of two input arrays.
y: second of two input arrays.
Returns:
scalar correlation coefficient.
"""
# From: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
x_m = x - tf.math.reduce_mean(x, axis=0)
y_m = y - tf.math.reduce_mean(y, axis=0)
return tf.divide(
tf.math.reduce_sum(tf.multiply(x_m, y_m), axis=0),
tf.multiply(tf.math.sqrt(tf.math.reduce_sum(tf.math.square(x_m),
axis=0)),
tf.math.sqrt(tf.math.reduce_sum(tf.math.square(y_m),
axis=0))))
for input_dict, _ in self._test_data.take(1):
self.assertGreater(np.mean(np.abs(input_dict['input_1'] -
input_dict['input_2'])), 0.1)
r = pearson_correlation(input_dict['input_1'], input_dict['input_2'])
np.testing.assert_allclose(r, 1.0, rtol=1e-5)
@parameterized.named_parameters(
('lda', 'lda'),
('first', 'first'),
('mean', 'mean'),
('mean-squared', 'mean-squared'),
)
def test_inference(self, reduction):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
decoder.train(self._mixed_data, self._train_data)
speaker, labels = decoder.test_all(self._test_data)
plt.clf()
plt.plot(labels)
plt.plot(speaker)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'inference_%s.png' % reduction))
print('test_inference_%s:' % reduction, speaker.shape, labels.shape)
self.assertGreater(np.mean(speaker[labels == 0]), 0.5)
self.assertLess(np.mean(speaker[labels == 1]), 0.5)
@parameterized.named_parameters(
('lda', 'lda', 0.85),
('first', 'first', 0.6),
('mean', 'mean', 0.85),
('mean-squared', 'mean-squared', 0.85),
)
def test_windowed_inference(self, reduction, expected_mean):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
decoder.train(self._mixed_data, self._train_data)
speaker, _ = decoder.test_all(self._test_data)
window_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256]
windowed_means = np.zeros(len(window_sizes))
windowed_stds = np.zeros(len(window_sizes))
for i, window_size in enumerate(window_sizes):
results = []
# Evaluate performance on first half of the training data
for window_start in range(0, _NUM_TEST_POINTS//2,
window_size):
window_end = window_start + window_size
results.append(np.mean(speaker[window_start:window_end] > 0.5))
windowed_means[i] = np.mean(results)
windowed_stds[i] = np.std(results)
plt.clf()
plt.errorbar(window_sizes, windowed_means, windowed_stds)
plt.gca().set_xscale('log')
plt.title('Test_windowed_inference with %s' % reduction)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'windowed_inference_%s.png' % reduction))
plt.xlabel('Window size (frames)')
self.assertAlmostEqual(np.mean(windowed_means), expected_mean, delta=0.05)
def test_one_window(self):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder.train(self._mixed_data, self._train_data)
batch_size = 101
for speaker, label in decoder.test_by_window(self._test_data, batch_size):
self.assertEqual(speaker.shape, (batch_size, 1))
self.assertEqual(label.shape, (batch_size, 1))
def test_train_no_switches(self):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
empty_dataset = tf.data.Dataset.from_tensor_slices(({'input_1': [],
'input_2': []},
[]))
with self.assertRaisesRegex(ValueError, 'No data for class 0'):
decoder.train(empty_dataset, self._mixed_data)
with self.assertRaisesRegex(ValueError, 'No data for class 1'):
decoder.train(self._mixed_data, empty_dataset)
def test_windowing(self):
data = np.reshape(np.arange(12), (6, 2))
ave = infer_decoder.average_data(data, window_size=3)
expected = [[2, 3], [8, 9]]
np.testing.assert_array_equal(ave, expected)
@parameterized.named_parameters(
('linear_first', 'linear', 'first', 0.1, 1),
('linear_lda', 'linear', 'lda', 0.1, 1),
('linear_mean_squared', 'linear', 'mean-squared', 0.1, 1),
('CCA_first', 'CCA', 'first', 0.1, 1),
('CCA_lda', 'CCA', 'lda', 0.16, 1),
('CCA_mean_squared', 'CCA', 'mean-squared', 0.1, 1),
('linear_first-100', 'linear', 'first', 0.15, 100),
('linear_lda-100', 'linear', 'lda', 0.1, 100),
('linear_mean_squared-100', 'linear', 'mean-squared', 0.1, 100),
('CCA_first-100', 'CCA', 'first', 0.1, 100),
('CCA_lda-100', 'CCA', 'lda', 0.16, 100),
('CCA_mean_squared-100', 'CCA', 'mean-squared', 0.1, 100),
)
def test_training_and_inference(self, regressor_name, reduction,
tolerance=0.1,
window_size=1):
"""Tests the training and inference stages with a linear model."""
print('Training the %s regressor.' % regressor_name)
# Create the desired decoder class.
if regressor_name == 'linear':
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
elif regressor_name == 'CCA':
decoder = infer_decoder.CCADecoder(_cca_model, reduction=reduction)
else:
raise ValueError('Unknown decoder name: %s' % regressor_name)
dprime = decoder.train(self._mixed_data, self._train_data,
window_size=window_size)
logging.info('Infer training of %s data via %s gave a dprime of %g.',
regressor_name, reduction, dprime)
speaker, _ = decoder.test_all(self._test_data)
plt.clf()
plt.plot(speaker)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'inference_train_%s_%s.png' % (regressor_name,
reduction)))
self.assertGreater(np.mean(speaker[:(_NUM_TEST_POINTS//2)]),
1.0 - tolerance)
self.assertLess(np.mean(speaker[(_NUM_TEST_POINTS//2):]),
tolerance)
# Make sure we can retrieve and save parameters (without errors)
decoder.decoding_model_params = decoder.decoding_model_params
def test_two_dimensional_data(self):
"""A copy of the easiest test from scaled_lda_test. Just to verify function.
"""
num_dims = 2
mean_vectors = np.array([[-2, 12], [2, -1]])
d1 = np.matmul(np.random.randn(_NUM_TEST_POINTS, num_dims),
[[2, 0], [0, 0.5]]) + mean_vectors[0, :]
d2 = np.matmul(np.random.randn(_NUM_TEST_POINTS, num_dims),
[[2, 0], [0, 0.5]]) + mean_vectors[1, :]
# Plot the original data.
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(d1[:, 0], d1[:, 1], 'rx')
plt.plot(d2[:, 0], d2[:, 1], 'bo')
plt.title('Original Data')
x = np.concatenate((d1, d2), axis=0)
labels = [42, -12]
y = np.concatenate((np.ones(d1.shape[0])*labels[0],
np.ones(d2.shape[0])*labels[1]))
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
dprime = decoder.compute_lda_model(d1, d2)
logging.info('test_two_dimensional_data dprime is: %g', dprime)
self.assertAlmostEqual(dprime, 26.3253, delta=2.0)
x_lda = decoder.reduce_with_lda(x)
# Plot the transformed data.
plt.subplot(2, 1, 2)
plt.plot(x_lda[y == labels[0], 0], x_lda[y == labels[0], 1], 'rx')
plt.plot(x_lda[y == labels[1], 0], x_lda[y == labels[1], 1], 'bo')
plt.title('Transfomed Data')
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'scaled_lda.png'))
# Make sure the transformed centers are symmetric on the first (x) axis.
centers = decoder.reduce_with_lda(mean_vectors)
logging.info('Transformed centers are: %s', (centers,))
self.assertAlmostEqual(centers[0, 0], 0., delta=0.1)
self.assertAlmostEqual(centers[1, 0], 1., delta=0.1)
def generate_dprime_data(self):
dims = 10
# Create two datasets, with coupled dimensions (decreasing with dim. index)
d1 = np.random.randn(_NUM_TEST_POINTS, dims)
d2 = np.random.randn(_NUM_TEST_POINTS, dims)
for i in range(dims):
p = 2**(-i)
d2[:, i] = p*d1[:, i] + (1-p)*d2[:, i]
d2 += np.ones(d2.shape)
return d1, d2
def test_lda(self):
d1, d2 = self.generate_dprime_data()
# Build and transform the sample data.
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
with self.assertRaisesRegex(
ValueError, 'Must compute the LDA model before reducing data.'):
decoder.reduce_with_lda(24)
dprime = decoder.compute_lda_model(d1, d2)
self.assertAlmostEqual(dprime, 3.31, delta=.1)
all_data = np.concatenate((d1, d2), axis=0)
with self.assertRaisesRegex(
TypeError, 'Input data must be an numpy array, not'):
decoder.reduce_with_lda(24)
transformed_data = decoder.reduce_with_lda(all_data)
self.assertEqual(transformed_data.shape, (2*_NUM_TEST_POINTS,
2))
dprime = infer_decoder.calculate_dprime(decoder.reduce_with_lda(d1)[:, 0],
decoder.reduce_with_lda(d2)[:, 0])
self.assertAlmostEqual(dprime, 3.28, delta=.1)
def test_lda_save_model(self):
d1, d2 = self.generate_dprime_data()
# Build and transform the sample data.
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
_ = decoder.compute_lda_model(d1, d2)
all_data = np.concatenate((d1, d2), axis=0)
transformed_data = decoder.reduce_with_lda(all_data)
dprime = infer_decoder.calculate_dprime(decoder.reduce_with_lda(d1)[:, 0],
decoder.reduce_with_lda(d2)[:, 0])
print(decoder.model_params)
tmp_dir = flags.FLAGS.test_tmpdir or '/tmp'
save_lda_dir = os.path.join(tmp_dir, 'lda_params.json')
decoder.save_parameters(save_lda_dir)
decoder_loaded = infer_decoder.Decoder(lambda x: x)
decoder_loaded.restore_parameters(save_lda_dir)
transformed_data_loaded = decoder_loaded.reduce_with_lda(all_data)
dprime_loaded = infer_decoder.calculate_dprime(
decoder_loaded.reduce_with_lda(d1)[:, 0],
decoder_loaded.reduce_with_lda(d2)[:, 0])
np.testing.assert_array_equal(transformed_data, transformed_data_loaded)
np.testing.assert_array_equal(dprime, dprime_loaded)
def test_dprime(self):
"""Makes sure our d' calculation is correct."""
num = 1000
|
np.random.seed(0)
|
numpy.random.seed
|
#!/usr/bin/env python3
"""
usage: put under source folder, required files: evolving_state.txt, calib_state.txt, state.txt
After first run, integration_states.txt, vio_states.txt are generated and figures are saved in current dir
You can move the figures and state.txt, integration_states.txt, vio_states.txt into a folder
Rerun to generate graphs more efficiently by specifying the folder names that has the above three files
"""
import json
import os
from os import path as osp
import matplotlib.pyplot as plt
import numpy as np
import progressbar
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation
from utils.logging import logging
from utils.math_utils import *
color_vio = "C2"
color_ronin = "C3"
color_filter = "C0"
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def imu_integrate(gravity, last_R, last_v, last_p, accel, omega, dt):
"""
Given compensated IMU data and corresponding dt, propagate the previous state in the world frame
"""
dR = mat_exp(omega * dt)
new_R = last_R.dot(dR)
new_v = last_v + gravity * dt + last_R.dot(accel * dt)
new_p = (
last_p
+ last_v * dt
+ 0.5 * gravity * dt * dt
+ 0.5 * last_R.dot(accel * dt * dt)
)
return new_R, new_v, new_p
def load_aekf_rotation(attitude_filter_path):
# load aekf rotation
aekf_data = np.loadtxt(attitude_filter_path, delimiter=",", skiprows=3)
# load attitude filter rotation
ts_aekf = aekf_data[:, 0] * 1e-6
aekf_r = Rotation.from_quat(
np.concatenate(
[aekf_data[:, 2:5], np.expand_dims(aekf_data[:, 1], axis=1)], axis=1
)
)
R_aekf = aekf_r.as_matrix()
# aekf results with y pointing down, convert them
R_wf = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
R_aekf = Rotation.from_matrix(np.matmul(R_wf, R_aekf))
return ts_aekf, R_aekf
def load_aekf_calibration(attitude_filter_path):
with open(attitude_filter_path, "r") as f:
line = f.readline()
line = f.readline()
init_calib = np.fromstring(line, sep=",")
init_accelScaleInv = init_calib[1:10].reshape((3, 3))
init_gyroScaleInv = init_calib[10:19].reshape((3, 3))
init_gyroGSense = init_calib[19:28].reshape((3, 3))
init_accelBias = init_calib[28:31].reshape((3, 1))
init_gyroBias = init_calib[31:34].reshape((3, 1))
return (
init_gyroScaleInv,
init_gyroBias,
init_gyroGSense,
init_accelScaleInv,
init_accelBias,
)
def plot_state_euclidean(
name_plot, name_ref, components_name, ts, state, sigma=None, **kwargs
):
assert state.shape[0] == ts.shape[0]
assert state.shape[1] == len(components_name)
assert sigma.shape == state.shape if sigma is not None else True
fig = plt.figure(name_plot)
for i, name in enumerate(components_name):
plt.subplot(len(components_name), 1, i + 1, label=name_plot + " " + name)
plt.plot(ts, state[:, i], label=name_ref, **kwargs)
if sigma is not None:
plt.plot(ts, state[:, i] + 3 * sigma[:, i], "-r", linewidth=0.5, **kwargs)
plt.plot(ts, state[:, i] - 3 * sigma[:, i], "-r", linewidth=0.5, **kwargs)
plt.ylabel(name)
plt.grid(True)
plt.title(name_plot)
plt.xlabel("t (s)")
plt.legend(loc="upper center")
return fig
def plot_error_euclidean(
name_plot, name_ref, components_name, ts, err, sigma, **kwargs
):
assert err.shape[0] == ts.shape[0]
assert err.shape[1] == len(components_name)
assert sigma.shape == err.shape
fig = plt.figure(name_plot)
for i, name in enumerate(components_name):
plt.subplot(len(components_name), 1, i + 1, label=name_plot + " " + name)
plt.plot(ts, err[:, i], label=name_ref, **kwargs)
plt.plot(ts, +3 * sigma[:, i], "-r", linewidth=0.5, **kwargs)
plt.plot(ts, -3 * sigma[:, i], "-r", linewidth=0.5, **kwargs)
plt.ylabel(name)
plt.grid(True)
plt.title(name_plot)
plt.xlabel("t (s)")
plt.legend(loc="upper center")
return fig
# get RPE
def compute_rpe(rpe_ns, ps, ps_gt, yaw, yaw_gt):
ns = ps_gt.shape[0]
print("liger")
print(ns)
print(rpe_ns)
print(ns - rpe_ns)
assert ns - rpe_ns > 100
assert ps.shape == ps_gt.shape
assert yaw.shape == yaw_gt.shape
rpes = []
relative_yaw_errors = []
for i in range(0, ns - rpe_ns, 100):
chunk = ps[i : i + rpe_ns, :]
chunk_gt = ps_gt[i : i + rpe_ns, :]
chunk_yaw = yaw[i : i + rpe_ns, :]
chunk_yaw_gt = yaw_gt[i : i + rpe_ns, :]
initial_error_yaw = wrap_rpy(chunk_yaw[0, :] - chunk_yaw_gt[0, :])
final_error_p_relative = Rotation.from_euler(
"z", initial_error_yaw, degrees=True
).as_matrix().dot((chunk[[-1], :] - chunk[[0], :]).T)[0, :, :].T - (
chunk_gt[[-1], :] - chunk_gt[[0], :]
)
final_error_yaw = wrap_rpy(chunk_yaw[[-1], :] - chunk_yaw_gt[[-1], :])
rpes.append(final_error_p_relative)
relative_yaw_errors.append(wrap_rpy(final_error_yaw - initial_error_yaw))
rpes = np.concatenate(rpes, axis=0)
relative_yaw_errors = np.concatenate(relative_yaw_errors, axis=0)
plt.figure("relative yaw error")
plt.plot(relative_yaw_errors)
plt.figure("rpes list")
plt.plot(rpes)
## compute statistics over z separatly
rpe_rmse = np.sqrt(np.mean(np.sum(rpes ** 2, axis=1)))
rpe_rmse_z = np.sqrt(np.mean(rpes[:, 2] ** 2))
relative_yaw_rmse = np.sqrt(np.mean(relative_yaw_errors ** 2))
return rpe_rmse, rpe_rmse_z, relative_yaw_rmse
def compute_rpe_distance(rpe_ns, ps, ps_gt, yaw, yaw_gt):
ns = ps_gt.shape[0]
print("liger")
print(ns)
assert ns - rpe_ns > 100
assert ps.shape == ps_gt.shape
assert yaw.shape == yaw_gt.shape
rpes = []
relative_yaw_errors = []
k = 0
distance_traveled = 0
for i in range(ns):
if i == 0:
continue
distance_traveled += np.linalg.norm(ps[i-1,:] - ps[i,:])
# print(f"Dist: {ps[i,:]}, {ps[k,:]}")
# print(f"Distance Traveled: {distance_traveled}")
if(distance_traveled) >= 5:
chunk = ps[k : i, :]
chunk_gt = ps_gt[k : i, :]
chunk_yaw = yaw[k : i, :]
chunk_yaw_gt = yaw_gt[k : i, :]
initial_error_yaw = wrap_rpy(chunk_yaw[0, :] - chunk_yaw_gt[0, :])
final_error_p_relative = Rotation.from_euler(
"z", initial_error_yaw, degrees=True
).as_matrix().dot((chunk[[-1], :] - chunk[[0], :]).T)[0, :, :].T - (
chunk_gt[[-1], :] - chunk_gt[[0], :]
)
final_error_yaw = wrap_rpy(chunk_yaw[[-1], :] - chunk_yaw_gt[[-1], :])
rpes.append(final_error_p_relative)
relative_yaw_errors.append(wrap_rpy(final_error_yaw - initial_error_yaw))
k += 1
distance_traveled -= np.linalg.norm(ps[k-1,:] - ps[k,:])
if distance_traveled < 0:
distance_traveled = 0
rpes = np.concatenate(rpes, axis=0)
relative_yaw_errors = np.concatenate(relative_yaw_errors, axis=0)
plt.figure("relative yaw error")
plt.plot(relative_yaw_errors)
plt.figure("rpes list")
plt.plot(rpes)
## compute statistics over z separatly
rpe_rmse = np.sqrt(np.mean(np.sum(rpes ** 2, axis=1)))
rpe_rmse_z = np.sqrt(np.mean(rpes[:, 2] ** 2))
relative_yaw_rmse = np.sqrt(np.mean(relative_yaw_errors ** 2))
return rpe_rmse, rpe_rmse_z, relative_yaw_rmse
def run(args, dataset):
plt.close("all")
if args.dir is not None:
results_folder = args.dir
else:
results_folder = os.path.join(args.log_dir, dataset)
try:
print("here wtf")
print(os.path.join(results_folder, args.log_filename + ".npy"))
states = np.load(os.path.join(results_folder, args.log_filename + ".npy"))
save_vio_states = True # traj is done
except:
logging.warning(
"Relying on .txt file because .npy was not found. Surely means filter did not finish"
)
states = np.loadtxt(
os.path.join(results_folder, args.log_filename), delimiter=","
) # traj is still processing
save_vio_states = False # traj is done
R_init = states[0, :9].reshape(-1, 3, 3)
r_init = Rotation.from_matrix(R_init)
Rs = states[:, :9].reshape(-1, 3, 3)
rs = Rotation.from_matrix(Rs)
euls = rs.as_euler("xyz", degrees=True)
vs = states[:, 9:12]
ps = states[:, 12:15]
ps_dr = np.cumsum(
states[:, 9:12] * np.diff(states[:, 27:28], prepend=states[0, 27], axis=0),
axis=0,
)
ba = states[:, 15:18]
bg = states[:, 18:21]
accs = states[:, 21:24] # offline calib compensated, scale+bias
gyrs = states[:, 24:27] # offline calib compensated, scale+bias
ts = states[:, 27]
sigma_r = np.sqrt(states[:, 28:31]) * 180.0 / np.pi
sigma_v = np.sqrt(states[:, 31:34])
sigma_p = np.sqrt(states[:, 34:37])
sigma_bg = np.sqrt(states[:, 37:40])
sigma_ba = np.sqrt(states[:, 40:43])
innos = states[:, 43:46]
meas = states[:, 46:49]
pred = states[:, 49:52]
meas_sigma = states[:, 52:55]
inno_sigma = states[:, 55:58]
nobs_sigma = states[:, 58 : 58 + 16]
N = ts.shape[0]
# get RoNIN concatenation results
if args.ronin_dir is not None:
try:
ronin = np.loadtxt(
osp.join(args.ronin_dir, dataset + ".txt"), delimiter=","
)
logging.info(
f"Reading ronin data from {osp.join(args.ronin_dir, dataset)}.txt"
)
except:
ronin = np.loadtxt(
osp.join(args.ronin_dir, dataset, "trajectory.txt"), delimiter=","
)
logging.info(
f"Reading ronin data from {osp.join(args.ronin_dir, dataset, 'trajectory.txt')}"
)
ronin_ts = ronin[:, 0]
ronin_p = ronin[:, 1:4]
if ronin_ts[0] > ts[0]:
ronin_ts = np.insert(ronin_ts, 0, ts[0])
ronin_p = np.concatenate([ronin_p[0].reshape(1, 3), ronin_p], axis=0)
if ronin_ts[-1] < ts[-1]:
ronin_ts = np.insert(ronin_ts, -1, ts[-1])
ronin_p = np.concatenate([ronin_p, ronin_p[-1].reshape(1, 3)], axis=0)
ronin_p = interp1d(ronin_ts, ronin_p, axis=0)(ts)
# get vio states
if args.plot_sim is False:
if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
vio_euls = vio_states[:, :3]
vio_p = vio_states[:, 3:6]
vio_v = vio_states[:, 6:9]
vio_ba = vio_states[:, 9:12]
vio_bg = vio_states[:, 12:15]
vio_disp = vio_states[:, 15:18]
vio_ba_b = vio_states[:, 18:21]
vio_bg_b = vio_states[:, 21:24]
vio_accelScaleInv_flat = vio_states[:, 24:33]
vio_gyroScaleInv_flat = vio_states[:, 33:42]
vio_accelScaleInv = vio_accelScaleInv_flat.reshape((-1, 3, 3))
vio_gyroScaleInv = vio_gyroScaleInv_flat.reshape((-1, 3, 3))
else:
vio_states = np.loadtxt(
os.path.join(args.root_dir, dataset, "evolving_state.txt"),
delimiter=",",
)
vio_calibs = np.loadtxt(
os.path.join(args.root_dir, dataset, "calib_state.txt"), delimiter=","
)
vio_ts = vio_states[:, 0] * 1e-6
vio_rq = vio_states[:, 1:5]
vio_p = vio_states[:, 5:8]
vio_v = vio_states[:, 8:11]
vio_r = Rotation.from_quat(
np.concatenate(
[vio_rq[:, 1:4], np.expand_dims(vio_rq[:, 0], axis=1)], axis=1
)
)
vio_euls = vio_r.as_euler("xyz", degrees=True)
vio_calib_ts = vio_calibs[:, 0] * 1e-6
vio_accelScaleInv = vio_calibs[:, 1:10].reshape((-1, 3, 3))
vio_gyroScaleInv = vio_calibs[:, 10:19].reshape((-1, 3, 3))
vio_gyroGSense = vio_calibs[:, 19:28].reshape((-1, 3, 3))
vio_ba = vio_calibs[:, 28:31]
vio_bg = vio_calibs[:, 31:34]
vio_pj_idx = np.searchsorted(vio_ts, ts) - 1
vio_pi_idx = np.searchsorted(vio_ts, ts - args.displacement_time) - 1
vio_pj = vio_p[vio_pj_idx, :]
vio_pi = vio_p[vio_pi_idx, :]
vio_disp = vio_pj - vio_pi
vio_Ri = vio_r[vio_pi_idx].as_matrix()
ri_z = Rotation.from_matrix(vio_Ri).as_euler("xyz")[:, 2]
vio_Riz = Rotation.from_euler("z", ri_z).as_matrix()
vio_Rizt = np.transpose(vio_Riz, (0, 2, 1))
vio_disp = np.squeeze(
np.matmul(vio_Rizt, np.expand_dims(vio_disp, axis=-1))
)
vio_uw_euls = unwrap_rpy(vio_euls)
if vio_ts[0] > ts[0]:
vio_ts = np.insert(vio_ts, 0, ts[0])
vio_uw_euls = np.concatenate(
[vio_uw_euls[0].reshape(1, 3), vio_uw_euls], axis=0
)
vio_p = np.concatenate([vio_p[0].reshape(1, 3), vio_p], axis=0)
vio_v = np.concatenate([vio_v[0].reshape(1, 3), vio_v], axis=0)
if vio_ts[-1] < ts[-1]:
vio_ts = np.insert(vio_ts, -1, ts[-1])
vio_uw_euls = np.concatenate(
[vio_uw_euls, vio_uw_euls[-1].reshape(1, 3)], axis=0
)
vio_p = np.concatenate([vio_p, vio_p[-1].reshape(1, 3)], axis=0)
vio_v = np.concatenate([vio_v, vio_v[-1].reshape(1, 3)], axis=0)
vio_uw_euls_interp = interp1d(vio_ts, vio_uw_euls, axis=0)(ts)
vio_euls = wrap_rpy(vio_uw_euls_interp)
vio_p = interp1d(vio_ts, vio_p, axis=0)(ts)
vio_v = interp1d(vio_ts, vio_v, axis=0)(ts)
# This compute bias in non scaled sensor frame (I think)
vio_accelScaleInv_flat = vio_accelScaleInv.reshape((-1, 9))
vio_gyroScaleInv_flat = vio_gyroScaleInv.reshape((-1, 9))
if vio_calib_ts[0] > ts[0]:
vio_calib_ts = np.insert(vio_calib_ts, 0, ts[0])
vio_ba = np.concatenate([vio_ba[0].reshape(1, 3), vio_ba], axis=0)
vio_bg = np.concatenate([vio_bg[0].reshape(1, 3), vio_bg], axis=0)
vio_accelScaleInv_flat = np.concatenate(
[vio_accelScaleInv_flat[0].reshape(1, 9), vio_accelScaleInv_flat],
axis=0,
)
vio_gyroScaleInv_flat = np.concatenate(
[vio_gyroScaleInv_flat[0].reshape(1, 9), vio_gyroScaleInv_flat],
axis=0,
)
if vio_calib_ts[-1] < ts[-1]:
vio_calib_ts = np.insert(vio_calib_ts, -1, ts[-1])
vio_ba = np.concatenate([vio_ba, vio_ba[-1].reshape(1, 3)], axis=0)
vio_bg = np.concatenate([vio_bg, vio_bg[-1].reshape(1, 3)], axis=0)
vio_accelScaleInv_flat = np.concatenate(
[vio_accelScaleInv_flat, vio_accelScaleInv_flat[-1].reshape(1, 9)],
axis=0,
)
vio_gyroScaleInv_flat = np.concatenate(
[vio_gyroScaleInv_flat, vio_gyroScaleInv_flat[-1].reshape(1, 9)],
axis=0,
)
vio_ba = interp1d(vio_calib_ts, vio_ba, axis=0)(ts)
vio_bg = interp1d(vio_calib_ts, vio_bg, axis=0)(ts)
vio_accelScaleInv_flat = interp1d(
vio_calib_ts, vio_accelScaleInv_flat, axis=0
)(ts)
vio_gyroScaleInv_flat = interp1d(
vio_calib_ts, vio_gyroScaleInv_flat, axis=0
)(ts)
vio_accelScaleInv = vio_accelScaleInv_flat.reshape((-1, 3, 3))
vio_gyroScaleInv = vio_gyroScaleInv_flat.reshape((-1, 3, 3))
vio_ba_temp = np.expand_dims(vio_ba, axis=-1)
vio_bg_temp = np.expand_dims(vio_bg, axis=-1)
vio_ba_b = np.squeeze(
np.matmul(np.linalg.inv(vio_accelScaleInv), vio_ba_temp)
)
vio_bg_b = np.squeeze(
np.matmul(np.linalg.inv(vio_gyroScaleInv), vio_bg_temp)
)
if save_vio_states:
vio_states = np.concatenate(
[
vio_euls,
vio_p,
vio_v,
vio_ba,
vio_bg,
vio_disp,
vio_ba_b,
vio_bg_b,
vio_accelScaleInv_flat,
vio_gyroScaleInv_flat,
],
axis=1,
)
np.save(os.path.join(results_folder, "vio_states.npy"), vio_states)
else:
logging.warning(
"Not saving vio_states.npy because traj is still processing"
)
# get simulation states
if args.plot_sim:
sim_data = np.loadtxt(
args.sim_data_path,
delimiter=",",
usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
)
sim_ts = sim_data[:, 0]
sim_p = sim_data[:, 1:4]
sim_rq = sim_data[:, 4:8]
sim_v = sim_data[:, 11:14]
# acc_all = sim_data[:,8:11]
# gyr_all = sim_data[:,14:17]
sim_r = Rotation.from_quat(sim_rq)
sim_euls = sim_r.as_euler("xyz", degrees=True)
sim_pj_idx = np.searchsorted(sim_ts, ts) - 1
sim_pi_idx = np.searchsorted(sim_ts, ts - args.displacement_time) - 1
sim_pj = sim_p[sim_pj_idx, :]
sim_pi = sim_p[sim_pi_idx, :]
sim_disp = sim_pj - sim_pi
ri_z = sim_r[sim_pi_idx].as_matrix().as_euler("xyz")[:, 2]
sim_Riz = Rotation.from_euler("z", ri_z).as_matrix()
sim_Rizt = np.transpose(sim_Riz, (0, 2, 1))
sim_disp = np.squeeze(np.matmul(sim_Rizt, np.expand_dims(sim_disp, axis=-1)))
sim_uw_euls = unwrap_rpy(sim_euls)
if sim_ts[0] > ts[0]:
sim_ts = np.insert(sim_ts, 0, ts[0])
sim_uw_euls = np.concatenate(
[sim_uw_euls[0].reshape(1, 3), sim_uw_euls], axis=0
)
sim_p = np.concatenate([sim_p[0].reshape(1, 3), sim_p], axis=0)
sim_v = np.concatenate([sim_v[0].reshape(1, 3), sim_v], axis=0)
if sim_ts[-1] < ts[-1]:
sim_ts = np.insert(sim_ts, -1, ts[-1])
sim_uw_euls = np.concatenate(
[sim_uw_euls, sim_uw_euls[-1].reshape(1, 3)], axis=0
)
sim_p = np.concatenate([sim_p, sim_p[-1].reshape(1, 3)], axis=0)
sim_v = np.concatenate([sim_v, sim_v[-1].reshape(1, 3)], axis=0)
sim_uw_euls_interp = interp1d(sim_ts, sim_uw_euls, axis=0)(ts)
sim_euls = wrap_rpy(sim_uw_euls_interp)
sim_p = interp1d(sim_ts, sim_p, axis=0)(ts)
sim_v = interp1d(sim_ts, sim_v, axis=0)(ts)
sim_ba = np.zeros((ts.shape[0], 3)) + np.array([0.3, -0.2, 0.4])
sim_bg = np.zeros((ts.shape[0], 3)) + np.array([0.0005, 0.002, -0.001])
if args.plot_sim:
ref_type = "sim"
ref_p = sim_p
ref_v = sim_v
ref_bg = sim_bg
ref_ba = sim_ba
ref_euls = sim_euls
ref_disp = sim_disp
else:
ref_type = "vio"
ref_p = vio_p
ref_v = vio_v
ref_bg = vio_bg
ref_ba = vio_ba
ref_euls = vio_euls
ref_disp = vio_disp
# obtain biases in the body frame in the same unit
attitude_filter_path = osp.join(args.root_dir, dataset, "calib_state.txt")
(
init_gyroScaleInv,
init_gyroBias,
init_gyroGSense,
init_accelScaleInv,
init_accelBias,
) = load_aekf_calibration(attitude_filter_path)
if args.body_bias:
ba_temp = np.expand_dims(ba, axis=-1)
bg_temp = np.expand_dims(bg, axis=-1)
ba_b = np.squeeze(np.matmul(np.linalg.inv(init_accelScaleInv), ba_temp))
bg_b = np.squeeze(np.matmul(np.linalg.inv(init_gyroScaleInv), bg_temp))
ba = ba_b
bg = bg_b
# load aekf rotation
aekf_ts, aekf_R = load_aekf_rotation(attitude_filter_path)
aekf_euls = unwrap_rpy(aekf_R.as_euler("xyz", degrees=True))
# plotting
N = ts.shape[0]
start_idx = 2000 # 2 s
end_idx = N - 1
start_ts = ts[start_idx]
end_ts = ts[end_idx]
# align diverse trajectory sources
ts = ts - start_ts
aekf_ts = aekf_ts - start_ts
try:
# align at start_ts
aekf_euls[:, 2] -= (
interp1d(aekf_ts, aekf_euls[:, 2])(ts[start_idx]) - ref_euls[start_idx, 2]
)
except:
# if we can't align at first aekf timestamp
aekf_euls[:, 2] -= aekf_euls[0, 2] - interp1d(ts, ref_euls[:, 2])(aekf_ts[0])
aekf_euls_time_aligned = interp1d(aekf_ts, aekf_euls, axis=0, fill_value=np.nan)(ts)
aekf_euls_time_aligned = wrap_rpy(aekf_euls_time_aligned)
if args.ronin_dir is not None:
ronin_p = ronin_p - (ronin_p[start_idx, :] - ref_p[start_idx, :])
ps = ps - (ps[start_idx, :] - ref_p[start_idx, :])
euls[:, 2] = euls[:, 2] - (euls[start_idx, 2] - ref_euls[start_idx, 2])
euls = wrap_rpy(euls)
ps_gt = ref_p[start_idx:end_idx, :]
euls_gt = ref_euls[start_idx:end_idx, :]
euls_aekf = aekf_euls_time_aligned[start_idx:end_idx, :]
# metrics computation
metric_map = {"filter": {}, "ronin": {}}
# align on first error, WHY IS THAT?
ps_filter = ps[start_idx:end_idx, :]
euls_filter = euls[start_idx:end_idx, :]
ps_filter = ps_filter - (ps_filter[0, :] - ps_gt[0, :])
# get drift and ATE
ps_diff = ps_gt[1:, :] - ps_gt[:-1, :]
traj_length = np.sum(np.linalg.norm(ps_diff, axis=1))
drift_filter = np.linalg.norm(ps_filter[-1, :] - ps_gt[-1, :])
angular_drift_filter = np.linalg.norm(euls[-1, 2] - ref_euls[-1, 2])
filter_heading_error = wrap_rpy(euls - ref_euls)[:, 2]
ate_filter = np.sqrt(np.mean(np.linalg.norm(ps_filter - ps_gt, axis=1) ** 2))
metric_map["filter"]["drift_ratio"] = drift_filter / traj_length
metric_map["filter"]["ate"] = ate_filter
metric_map["filter"]["mhe"] = np.sqrt(
np.nansum(filter_heading_error ** 2)
/ np.count_nonzero(~(np.isnan(filter_heading_error)))
)
metric_map["filter"]["angular_drift_deg_hour"] = (
angular_drift_filter / ts.max() * 3600
)
logging.info(f"drift of filter {metric_map['filter']['drift_ratio']}")
logging.info(f"ATE of filter {metric_map['filter']['ate']}")
logging.info(f"Mean Heading error of filter {metric_map['filter']['mhe']}")
def compute_rpe_filter(ns_rpe):
rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe_distance(
ns_rpe, ps_filter, ps_gt, euls_filter[:, [2]], euls_gt[:, [2]]
)
logging.info(f"RPE RMSE of filter over {1e-3*ns_rpe}s: {rpe_rmse}")
logging.info(f"RPE RMSE Z of filter over {1e-3*ns_rpe}s: {rpe_rmse_z}")
logging.info(f"RPE RMSE Yaw of filter over {1e-3*ns_rpe}s: {relative_yaw_rmse}")
metric_map["filter"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
metric_map["filter"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
metric_map["filter"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse
if args.rpe_1 is True:
compute_rpe_filter(1000) # 1 s
if args.rpe_10 is True:
compute_rpe_filter(10000) # 10 s
if args.rpe_100 is True:
compute_rpe_filter(100000) # 100 s
if args.ronin_dir is not None:
ps_ronin = ronin_p[start_idx:end_idx, :]
ps_ronin = ps_ronin - (ps_ronin[0, :] - ps_gt[0, :])
drift_ronin = np.linalg.norm(ps_ronin[-1, :] - ps_gt[-1, :])
ate_ronin = np.sqrt(np.mean(np.linalg.norm(ps_ronin - ps_gt, axis=1) ** 2))
angular_drift_ronin = np.linalg.norm(
aekf_euls_time_aligned[-1, 2] - ref_euls[-1, 2]
)
heading_error_ronin = wrap_rpy(aekf_euls_time_aligned - ref_euls)[:, 2]
metric_map["ronin"]["drift_ratio"] = drift_ronin / traj_length
metric_map["ronin"]["ate"] = ate_ronin
metric_map["ronin"]["mhe"] = np.sqrt(
np.nansum(heading_error_ronin ** 2)
/ np.count_nonzero(~(np.isnan(heading_error_ronin)))
)
metric_map["ronin"]["angular_drift_deg_hour"] = (
angular_drift_ronin / ts.max() * 3600
)
logging.info(f"drift of ronin {metric_map['ronin']['drift_ratio']}")
logging.info(f"ATE of ronin {metric_map['ronin']['ate']}")
logging.info(f"Mean Heading error of ronin {metric_map['ronin']['mhe']}")
def compute_rpe_ronin(ns_rpe):
rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe(
ns_rpe, ps_ronin, ps_gt, euls_aekf[:, [2]], euls_gt[:, [2]]
)
logging.info(f"RPE RMSE of ronin over 1s: {rpe_rmse}")
logging.info(f"RPE RMSE Z of ronin over 1s: {rpe_rmse_z}")
logging.info(f"RPE RMSE Yaw of ronin over 1s: {relative_yaw_rmse}")
metric_map["ronin"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
metric_map["ronin"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
metric_map["ronin"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse
if args.rpe_1 is True:
compute_rpe_ronin(1000)
if args.rpe_10 is True:
compute_rpe_ronin(10000)
if args.rpe_100 is True:
compute_rpe_ronin(100000)
if args.make_plots is False:
return metric_map
# Plot results
idxs = slice(start_idx, end_idx)
if not args.plot_sim:
plt.figure("Calibration accelerometer vio vs init")
plt.plot(ts, vio_accelScaleInv[:, 0, 0], label="1")
plt.plot(ts, vio_accelScaleInv[:, 1, 1], label="2")
plt.plot(ts, vio_accelScaleInv[:, 2, 2], label="3")
plt.plot(ts, vio_accelScaleInv[:, 0, 1], label="12")
plt.plot(ts, vio_accelScaleInv[:, 0, 2], label="13")
plt.plot(ts, vio_accelScaleInv[:, 1, 2], label="23")
plt.plot(
ts,
np.ones_like(vio_accelScaleInv[:, 0, 0]) * init_accelScaleInv[0, 0],
label="1",
)
plt.plot(
ts,
np.ones_like(vio_accelScaleInv[:, 1, 1]) * init_accelScaleInv[1, 1],
label="2",
)
plt.plot(
ts,
np.ones_like(vio_accelScaleInv[:, 2, 2]) * init_accelScaleInv[2, 2],
label="2",
)
plt.plot(
ts,
np.ones_like(vio_accelScaleInv[:, 0, 1]) * init_accelScaleInv[0, 1],
label="12",
)
plt.plot(
ts,
|
np.ones_like(vio_accelScaleInv[:, 0, 2])
|
numpy.ones_like
|
"""
Project: Visual Odometry
Name : Heru-05 | M09158023
Date
"""
import os
import numpy as np
import cv2
import random
import string
# draw a list of points with different random colors on a input image
def draw_points(img, pts, radius=5):
if img.ndim < 3:
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
for pt in pts:
color = tuple(np.random.randint(0,255,3).tolist())
img = cv2.circle(img,tuple(pt),radius,color,-1)
return img
# draw corresponding points with the same random color on two separate images
def draw_points2(img1, img2, pts1, pts2, radius=5):
if img1.ndim < 3:
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
if img2.ndim < 3:
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
for pt1,pt2 in zip(pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
img1 = cv2.circle(img1,tuple(pt1),radius,color,-1)
img2 = cv2.circle(img2,tuple(pt2),radius,color,-1)
return img1,img2
# draw lines on a image; line_edges is assumed to be a list of 2D img points
def draw_lines(img, line_edges, pts=None, radius=5):
pt = None
for i,l in enumerate(line_edges):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = l[0]
x1,y1 = l[1]
img = cv2.line(img, (int(x0),int(y0)), (int(x1),int(y1)), color,1)
if pts is not None:
pt = pts[i]
img = cv2.circle(img,tuple(pt),radius,color,-1)
return img
# combine two images horizontally
def combine_images_horizontally(img1, img2):
if img1.ndim<=2:
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2RGB)
if img2.ndim<=2:
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2RGB)
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
img3 = np.zeros((max(h1, h2), w1+w2,3), np.uint8)
img3[:h1, :w1,:3] = img1
img3[:h2, w1:w1+w2,:3] = img2
return img3
# combine two images vertically
def combine_images_vertically(img1, img2):
if img1.ndim<=2:
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2RGB)
if img2.ndim<=2:
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2RGB)
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
img3 = np.zeros((h1+h2, max(w1, w2),3), np.uint8)
img3[:h1, :w1,:3] = img1
img3[h1:h1+h2,:w2,:3] = img2
return img3
# draw features matches (images are combined horizontally)
# input:
# - kps1 = [Nx2] array of keypoint coordinates
# - kps2 = [Nx2] array of keypoint coordinates
# - kps1_sizes = [Nx1] array of keypoint sizes
# - kps2_sizes = [Nx1] array of keypoint sizes
# output: drawn image
def draw_feature_matches_horizontally(img1, img2, kps1, kps2, kps1_sizes=None, kps2_sizes=None):
img3 = combine_images_horizontally(img1,img2)
h1,w1 = img1.shape[:2]
N = len(kps1)
default_size = 2
if kps1_sizes is None:
kps1_sizes = np.ones(N,dtype=np.int32)*default_size
if kps2_sizes is None:
kps2_sizes = np.ones(N,dtype=np.int32)*default_size
for i,pts in enumerate(zip(kps1, kps2)):
p1, p2 = np.rint(pts).astype(int)
a,b = p1.ravel()
c,d = p2.ravel()
size1 = kps1_sizes[i]
size2 = kps2_sizes[i]
color = tuple(np.random.randint(0,255,3).tolist())
#cv2.line(img3, (a,b),(c,d), color, 1) # optic flow style
cv2.line(img3, (a,b),(c+w1,d), color, 1) # join corrisponding points
cv2.circle(img3,(a,b),2, color,-1)
cv2.circle(img3,(a,b), color=(0, 255, 0), radius=int(size1), thickness=1) # draw keypoint size as a circle
cv2.circle(img3,(c+w1,d),2, color,-1)
cv2.circle(img3,(c+w1,d), color=(0, 255, 0), radius=int(size2), thickness=1) # draw keypoint size as a circle
return img3
# draw features matches (images are combined vertically)
# input:
# - kps1 = [Nx2] array of keypoint coordinates
# - kps2 = [Nx2] array of keypoint coordinates
# - kps1_sizes = [Nx1] array of keypoint sizes
# - kps2_sizes = [Nx1] array of keypoint sizes
# output: drawn image
def draw_feature_matches_vertically(img1, img2, kps1, kps2, kps1_sizes=None, kps2_sizes=None):
img3 = combine_images_vertically(img1,img2)
h1,w1 = img1.shape[:2]
N = len(kps1)
default_size = 2
if kps1_sizes is None:
kps1_sizes = np.ones(N,dtype=np.int32)*default_size
if kps2_sizes is None:
kps2_sizes = np.ones(N,dtype=np.int32)*default_size
for i,pts in enumerate(zip(kps1, kps2)):
p1, p2 = np.rint(pts).astype(int)
a,b = p1.ravel()
c,d = p2.ravel()
size1 = kps1_sizes[i]
size2 = kps2_sizes[i]
color = tuple(np.random.randint(0,255,3).tolist())
#cv2.line(img3, (a,b),(c,d), color, 1) # optic flow style
cv2.line(img3, (a,b),(c,d+h1), color, 1) # join corrisponding points
cv2.circle(img3,(a,b),2, color,-1)
cv2.circle(img3,(a,b), color=(0, 255, 0), radius=int(size1), thickness=1) # draw keypoint size as a circle
cv2.circle(img3,(c,d+h1),2, color,-1)
cv2.circle(img3,(c,d+h1), color=(0, 255, 0), radius=int(size2), thickness=1) # draw keypoint size as a circle
return img3
# draw features matches (images are combined horizontally)
# input:
# - kps1 = [Nx2] array of keypoint coordinates
# - kps2 = [Nx2] array of keypoint coordinates
# - kps1_sizes = [Nx1] array of keypoint sizes
# - kps2_sizes = [Nx1] array of keypoint sizes
# output: drawn image
def draw_feature_matches(img1, img2, kps1, kps2, kps1_sizes=None, kps2_sizes=None, horizontal=True):
if horizontal:
return draw_feature_matches_horizontally(img1, img2, kps1, kps2, kps1_sizes, kps2_sizes)
else:
return draw_feature_matches_vertically(img1, img2, kps1, kps2, kps1_sizes, kps2_sizes)
def draw_random_lines(img,N=200):
lineType = 8
(h, w) = img.shape[:2]
for i in range(N):
pt1x, pt2x = np.random.randint( -0.5*w, w*1.5, 2)
pt1y, pt2y = np.random.randint( -0.5*h, h*1.5, 2)
color = tuple(np.random.randint(0,255,3).tolist())
thickness = np.random.randint(1, 10)
cv2.line(img, (pt1x,pt1y), (pt2x,pt2y), color, thickness, lineType)
def draw_random_rects(img,N=100):
lineType = 8
(h, w) = img.shape[:2]
for i in range(N):
pt1x, pt2x = np.random.randint( 0, w, 2)
pt1y, pt2y = np.random.randint( 0, h, 2)
color = tuple(np.random.randint(0,255,3).tolist())
thickness = max(np.random.randint(-3, 10),-1)
cv2.rectangle(img, (pt1x,pt1y), (pt2x,pt2y), color, thickness, lineType)
def draw_random_ellipses(img, N=100):
lineType = 8
(h, w) = img.shape[:2]
axis_ext = w*0.1
for i in range(N):
cx = np.random.randint( 0, w )
cy = np.random.randint( 0, h )
width, height = np.random.randint(0, axis_ext, 2)
angle = np.random.randint(0, 180)
color = tuple(
|
np.random.randint(0,255,3)
|
numpy.random.randint
|
# get scores for F1 Threshold(rho) experiments for APIP flavors
import re
import os
import sys
import random
import argparse
import json
from datetime import datetime
from collections import OrderedDict
import msgpack, time
from tqdm import tqdm
import numpy as np
import torch
from torch.autograd import Variable
from apip import utils
from apip.model import DocReaderModel
parser = argparse.ArgumentParser(
description='Train a Document Reader model.'
)
parser = utils.add_arguments(parser)
args = parser.parse_args()
if not args.drop_nn:
args.dropout_rate = 0.
# set model dir
model_dir = args.model_dir
model_dir = os.path.abspath(model_dir)
torch.set_printoptions(precision=10)
# save model configuration
s = "\nParameters:\n"
for k in sorted(args.__dict__):
s += "{} = {} \n".format(k, args.__dict__[k])
print(s)
# set random seed
seed = args.seed if args.seed >= 0 else int(random.random()*1000)
print ('seed:', seed)
random.seed(seed)
torch.manual_seed(seed)
if args.cuda:
torch.cuda.manual_seed(seed)
def accuracies_on_ds(data_file, inputs, model, n_ans):
train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask = inputs
model.opt['interpret'] = False
batches = utils.BatchGen(dev, batch_size=args.batch_size, evaluation=True, gpu=args.cuda)
predictions = []
pred_answers = {}
for i, batch in enumerate(batches):
pred = model.predict(batch)[0]
predictions.extend(pred)
em, f1 = utils.score(predictions, dev_y)
print("[EM: {0:.2f} F1: {1:.2f}] on {2}".format(em, f1, data_file))
batches = utils.BatchGen(dev, batch_size=args.batch_size, evaluation=True, gpu=args.cuda, shuffle=True)
model.opt['interpret'] = True
t_a, t_total_a = {0.1:0, 0.2:0, 0.3:0, 0.4:0, 0.5:0, 0.6:0, 0.7:0, 0.8:0, 0.9:0}, 0
f1s_a = []; ovs_a = []
# evaluate the model for all interpretations and all answers
# if f1 score for all GT answers is > p then count answer as correct
for i, batch in tqdm(enumerate(batches)):
i_predictions = []
truth = np.take(dev_y, batches.indices[i], 0)
if args.n_actions>0:
for a in range(args.n_actions):
latent_a = Variable(torch.ones(batch[0].size(0))*a).long().cuda()
pred = model.predict_inter(batch, latent_a=latent_a)
i_predictions.append(pred[0])
else:
i_predictions = model.predict(batch)[0]
for b in range(batch[0].size(0)):
f1s = []
for ta in truth[b]:
f1_v = []
for a in range(args.n_actions):
_, f1_a = utils.score_test_alli([i_predictions[a][b]], [[ta]])
f1_v += [f1_a]
if args.n_actions>0:
f1s += [max(f1_v)]
else:
_, f1_v = utils.score_test_alli([i_predictions[b]], [[ta]])
f1s += [f1_v]
f1s = np.array(f1s)
for p in t_a.keys():
t_a[p] = t_a[p] + int((f1s>p).sum() == n_ans)
f1_i = []; ov_i = []
for a in range(args.n_actions):
_, f1_a = utils.score_test_alli([i_predictions[a][b]], [truth[b]])
ov_a = utils.overlap([i_predictions[a][b]], [truth[b]])
f1_i += [f1_a]; ov_i += [ov_a]
if args.n_actions == 0:
_, f1_i = utils.score_test_alli([i_predictions[b]], [truth[b]])
ov_i = utils.overlap([i_predictions[b]], [truth[b]])
f1s_a += [f1_i]; ovs_a += [ov_i]
t_total_a += batch[0].size(0)
f1s_a = np.array(f1s_a); ovs_a = np.array(ovs_a)
return t_total_a, f1s_a, ovs_a, t_a
def main():
print('[program starts.]')
args.data_file = 'SQuAD/data_a2.msgpack'
train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask = utils.load_data(vars(args), args)
if args.resume:
print('[loading previous model...]')
checkpoint = torch.load(os.path.join(model_dir, args.restore_dir, args.resume))
if args.resume_options:
opt = checkpoint['config']
state_dict = checkpoint['state_dict']
model = DocReaderModel(opt, embedding, state_dict)
else:
raise RuntimeError('Include checkpoint of the trained model')
if args.cuda:
model.cuda()
inputs = [train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask]
t_total_a2, f1s_a2, ovs_a2, t_a2 = accuracies_on_ds('SQuAD/data_a2.msgpack', inputs, model, 2)
args.data_file = 'SQuAD/data_a3.msgpack'
train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask = utils.load_data(vars(args), args)
inputs = [train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask]
t_total_a3, f1s_a3, ovs_a3, t_a3 = accuracies_on_ds('SQuAD/data_a3.msgpack', inputs, model, 3)
args.data_file = 'SQuAD/data_a1.msgpack'
train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask = utils.load_data(vars(args), args)
inputs = [train, dev, dev_y, train_y, embedding, opt, q_labels, ql_mask]
t_total_a1, f1s_a1, ovs_a1, t_a1 = accuracies_on_ds('SQuAD/data_a1.msgpack', inputs, model, 1)
def toscore(score, total):
d = {}
for p,s in score.items():
d[p] = round(100.*s/total, 2)
td = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
return td
print("ratio |a|=1: ", toscore(t_a1, t_total_a1), t_total_a1)
print("ratio |a|=2: ", toscore(t_a2, t_total_a2), t_total_a2)
print("ratio |a|=3: ", toscore(t_a3, t_total_a3), t_total_a3)
def toscore2(score):
return round(100. * score.sum() / len(score), 2)
axis = 1
if args.n_actions > 0:
print("[max F1_a1: {} F1_a2: {} F1_a3: {}]".format(json.dumps(toscore2(np.max(f1s_a1, axis)),toscore2(
|
np.max(f1s_a2, axis)
|
numpy.max
|
import math
from collections import defaultdict
import cv2
import time
import argparse
import torch
from sklearn import preprocessing
import numpy as np
import posenet
import numpy as np
from scipy.spatial.distance import cosine, cdist
from fastdtw import fastdtw
import re
import numpy as np
EPS=0.000001
PART_NAMES = [
"leftShoulder", "rightShoulder", "leftElbow", "rightElbow", "leftWrist", "rightWrist",
"leftHip", "rightHip", "leftKnee", "rightKnee", "leftAnkle", "rightAnkle"
]
def normalize(pose_scores, keypoint_scores, keypoint_coords, thresh=0.1):
keypoint_scores = keypoint_scores.reshape((17, -1))
keypoint_coords = keypoint_coords.reshape((17, 2))
# Step 1: filter out bad scores
mask = (keypoint_scores.ravel() > thresh)
if not np.any(mask):
return {}
# Step 2: Crop
min_x = np.min(keypoint_coords[mask, 0])
min_y = np.min(keypoint_coords[mask, 1])
keypoint_coords[:, 0] -= min_x
keypoint_coords[:, 1] -= min_y
# Step 3: Normalize
normalized_coords = preprocessing.normalize(keypoint_coords, norm='l2')
# Step 4: Convert to dict
output = {}
for i in range(17):
if mask[i]:
output[posenet.PART_NAMES[i]] = (normalized_coords[i, 0], normalized_coords[i, 1])
return output
def to_timeseries(dictionaries):
# combines list of dictionaries
ts = defaultdict(list)
# Iterate every part name and combine part values
for mini_dict in dictionaries:
for k, v in mini_dict.items():
ts[k].append(v)
return ts
def crop_dict(pose_dict):
if pose_dict is None:
return None
min_x = 10**6
min_y = 10**6
for v in pose_dict.values():
if v[0] <= min_x:
min_x = v[0]
if v[1] <= min_y:
min_y = v[1]
for k in pose_dict.keys():
pose_dict[k][0] -= min_x
pose_dict[k][1] -= min_y
return pose_dict
def DTW(dict1, dict2, normalize_user=False):
# computes the DTW between two dictionaries & values
# outputs distances to dictionary distances
distances = {}
for key in dict1:
if key in posenet.PART_NAMES:
if key == 'leftEye' or key == 'rightEye':
continue
if dict1[key] and dict2[key]:
x =
|
np.array(dict1[key])
|
numpy.array
|
"""
RocketLogger data file import tests.
Copyright (c) 2016-2020, ETH Zurich, Computer Engineering Group
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from rocketlogger.data import (
RocketLoggerData,
RocketLoggerDataError,
RocketLoggerDataWarning,
RocketLoggerFileError,
_ROCKETLOGGER_ADC_CLOCK_SCALE,
)
import os.path
from unittest import TestCase
import unittest
import numpy as np
import rocketlogger.data as rld
import os
if os.environ.get("MATPLOTLIB_AVAILABLE") == "true":
import matplotlib.pyplot as plt
if os.environ.get("PANDAS_AVAILABLE") == "true":
import pandas as pd
_TEST_FILE_DIR = "data"
_FULL_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_full.rld")
_SINGLE_BLOCK_FILE = os.path.join(_TEST_FILE_DIR, "test_single_block.rld")
_MIN_BLOCK_SIZE_FILE = os.path.join(_TEST_FILE_DIR, "test_min_block_size.rld")
_ANALOG_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_analog_only.rld")
_HIGH_CURRENT_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_high_current.rld")
_STEPS_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_steps.rld")
_INCOMPATIBLE_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_unsupported.rld")
_INEXISTENT_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_inexistent.rld")
_SINGLE_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_v3_only.rld")
_HEADER_UNALIGNED_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_header_unaligned.rld")
_TRUNCATED_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_truncated.rld")
_SPLIT_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_split.rld")
_SPLIT_TRUNCATED_TEST_FILE = os.path.join(_TEST_FILE_DIR, "test_split_truncated.rld")
_TEMP_FILE = os.path.join(_TEST_FILE_DIR, "temp_data.rld")
def _file_copy_byte_flipped(file_in, file_out, offset, mask=0xA5):
data = np.fromfile(file_in, np.uint8)
data[offset] = data[offset] ^ mask
data.tofile(
file_out,
)
class TestDecimation(TestCase):
def test_binary_decimation(self):
data_in = np.ones((100))
data_in[0:2:100] = 0
data_ref = np.ones((10))
data_out = rld._decimate_binary(data_in, 10)
self.assertAlmostEqual(sum(abs(data_out - data_ref)), 0)
def test_min_decimation(self):
data_in = np.ones((100))
data_in[0:100:10] = 0
data_ref = np.zeros((10))
data_out = rld._decimate_min(data_in, 10)
self.assertEqual(sum(abs(data_out - data_ref)), 0)
def test_max_decimation(self):
data_in = np.zeros((100))
data_in[0:100:10] = 1
data_ref = np.ones((10))
data_out = rld._decimate_max(data_in, 10)
self.assertAlmostEqual(sum(abs(data_out - data_ref)), 0)
def test_mean_decimation(self):
data_in = np.arange(0, 100)
data_ref = 4.5 + np.arange(0, 100, 10)
data_out = rld._decimate_mean(data_in, 10)
self.assertAlmostEqual(sum(abs(data_out - data_ref)), 0)
class TestFileImport(TestCase):
def test_normal(self):
data = RocketLoggerData(_FULL_TEST_FILE)
self.assertEqual(data.get_data().shape, (5000, 16))
def test_no_file(self):
with self.assertRaisesRegex(NotImplementedError, "data file creation"):
RocketLoggerData()
def test_inexistent_file(self):
with self.assertRaises(FileNotFoundError):
RocketLoggerData(_INEXISTENT_TEST_FILE)
def test_overload_existing(self):
data = RocketLoggerData(_FULL_TEST_FILE)
with self.assertRaisesRegex(RocketLoggerDataError, "file is already loaded"):
data.load_file(_FULL_TEST_FILE)
def test_invalid_header_magic(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x00)
with self.assertRaisesRegex(RocketLoggerFileError, "file magic"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_version(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x04)
with self.assertRaisesRegex(RocketLoggerFileError, "data file version"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_length(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x06)
with self.assertRaisesRegex(RocketLoggerFileError, "header size"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_data_block_size(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x08)
with self.assertRaisesRegex(RocketLoggerFileError, "number of samples"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_data_block_count(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x0C)
with self.assertRaisesRegex(RocketLoggerFileError, "number of samples"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_data_sample_count(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x10)
with self.assertRaisesRegex(RocketLoggerDataError, "corrupt data: file size"):
with self.assertWarnsRegex(
RocketLoggerDataWarning, "Skipping incomplete data"
):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_sample_count_inconsistent(self):
_file_copy_byte_flipped(_FULL_TEST_FILE, _TEMP_FILE, 0x10, 0x04)
with self.assertRaisesRegex(RocketLoggerFileError, "number of samples"):
RocketLoggerData(_TEMP_FILE)
def test_invalid_header_comment_length(self):
with self.assertWarnsRegex(RocketLoggerDataWarning, "comment length unaligned"):
RocketLoggerData(_HEADER_UNALIGNED_TEST_FILE)
def test_header_dict(self):
data = RocketLoggerData(_FULL_TEST_FILE)
header = {
"data_block_count": 5,
"data_block_size": 1000,
"file_version": 2,
"mac_address": "12:34:56:78:90:ab",
"sample_count": 5000,
"sample_rate": 1000,
"start_time": np.datetime64("2017-05-10T09:05:17.438817080"),
}
self.assertDictEqual(data.get_header(), header)
def test_header_dict_with_decimation(self):
data = RocketLoggerData(_FULL_TEST_FILE, decimation_factor=10)
header = {
"data_block_count": 5,
"data_block_size": 100,
"file_version": 2,
"mac_address": "12:34:56:78:90:ab",
"sample_count": 500,
"sample_rate": 100,
"start_time": np.datetime64("2017-05-10T09:05:17.438817080"),
}
self.assertDictEqual(data.get_header(), header)
def test_with_decimation(self):
data = RocketLoggerData(_FULL_TEST_FILE, decimation_factor=10)
self.assertEqual(data._header["data_block_size"], 100)
def test_with_invalid_decimation(self):
with self.assertRaisesRegex(ValueError, "Decimation factor"):
RocketLoggerData(_FULL_TEST_FILE, decimation_factor=3)
def test_direct_import(self):
data = RocketLoggerData(_FULL_TEST_FILE, memory_mapped=False)
self.assertEqual(data.get_data("V1").shape, (5000, 1))
def test_direct_import_with_decimation(self):
data = RocketLoggerData(
_FULL_TEST_FILE, memory_mapped=False, decimation_factor=10
)
self.assertEqual(data._header["data_block_size"], 100)
self.assertEqual(data._header["sample_count"], 500)
self.assertEqual(data._header["sample_rate"], 100)
self.assertEqual(data.get_data("V1").shape, (500, 1))
def test_direct_vs_memory_mapped(self):
data_mm = RocketLoggerData(_FULL_TEST_FILE, memory_mapped=True)
data_ff = RocketLoggerData(_FULL_TEST_FILE, memory_mapped=False)
self.assertEqual(data_mm._header, data_ff._header)
self.assertEqual(len(data_mm._data), len(data_ff._data))
for i in range(len(data_mm._data)):
arrays_equal = np.array_equal(data_mm._data[i], data_ff._data[i])
self.assertTrue(arrays_equal)
def test_single_block_import(self):
data = RocketLoggerData(_SINGLE_BLOCK_FILE, memory_mapped=False)
self.assertEqual(data._header["data_block_count"], 1)
self.assertEqual(data.get_data("V1").shape, (1000, 1))
def test_min_block_size_import(self):
data = RocketLoggerData(_MIN_BLOCK_SIZE_FILE, memory_mapped=False)
self.assertEqual(data._header["data_block_size"], 1)
self.assertEqual(data.get_data("V1").shape, (5, 1))
@classmethod
def tearDownClass(cls):
try:
os.remove(_TEMP_FILE)
except FileNotFoundError:
pass
class TestFullFile(TestCase):
def setUp(self):
self.data = RocketLoggerData(_FULL_TEST_FILE)
def tearDown(self):
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_header_field_count(self):
self.assertEqual(len(self.data._header), 14)
def test_header_channel_count(self):
self.assertEqual(len(self.data._header["channels"]), 16)
def test_data_size(self):
self.assertEqual(self.data.get_data().shape, (5000, 16))
def test_channel_names(self):
self.assertEqual(
self.data.get_channel_names(),
sorted(
[
"DI1",
"DI2",
"DI3",
"DI4",
"DI5",
"DI6",
"I1L_valid",
"I2L_valid",
"V1",
"V2",
"V3",
"V4",
"I1L",
"I1H",
"I2L",
"I2H",
]
),
)
class TestSingleChannelFile(TestCase):
def setUp(self):
self.data = RocketLoggerData(_SINGLE_TEST_FILE)
def tearDown(self):
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_header_field_count(self):
self.assertEqual(len(self.data._header), 14)
def test_header_channel_count(self):
self.assertEqual(len(self.data._header["channels"]), 1)
def test_data_size(self):
self.assertEqual(self.data.get_data().shape, (5000, 1))
def test_channel_names(self):
self.assertEqual(self.data.get_channel_names(), ["V3"])
class TestJoinFile(TestCase):
def setUp(self):
self.data = RocketLoggerData(_SPLIT_TEST_FILE)
def tearDown(self):
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_header_field_count(self):
self.assertEqual(len(self.data._header), 14)
def test_header_channel_count(self):
self.assertEqual(len(self.data._header["channels"]), 16)
def test_data_size(self):
self.assertEqual(self.data.get_data().shape, (3 * 128000, 16))
def test_channel_names(self):
self.assertEqual(
self.data.get_channel_names(),
sorted(
[
"DI1",
"DI2",
"DI3",
"DI4",
"DI5",
"DI6",
"I1L_valid",
"I2L_valid",
"V1",
"V2",
"V3",
"V4",
"I1L",
"I1H",
"I2L",
"I2H",
]
),
)
class TestJoinExclude(TestCase):
def test_exclude_all(self):
with self.assertRaisesRegex(RocketLoggerDataError, "Could not load valid data"):
RocketLoggerData(_SPLIT_TEST_FILE, exclude_part=np.arange(0, 3))
def test_exclude_non_join(self):
with self.assertRaisesRegex(ValueError, "exclude_part"):
RocketLoggerData(
_SPLIT_TEST_FILE, join_files=False, exclude_part=np.arange(0, 3)
)
class TestJoinExcludeFirst(TestCase):
def setUp(self):
self.full_reference = RocketLoggerData(_SPLIT_TEST_FILE)
self.data = RocketLoggerData(_SPLIT_TEST_FILE, exclude_part=0)
def tearDown(self):
del self.full_reference
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_data_values(self):
reference_range = np.arange(128000, 3 * 128000)
reference = self.full_reference.get_data()[reference_range]
self.assertTrue(np.array_equal(self.data.get_data(), reference))
def test_data_timestamp_monotonic(self):
reference_range = np.arange(2, 3 * 2)
reference = self.full_reference._timestamps_monotonic[reference_range]
self.assertTrue(np.array_equal(self.data._timestamps_monotonic, reference))
def test_data_timestamp_realtime(self):
reference_range = np.arange(2, 3 * 2)
reference = self.full_reference._timestamps_realtime[reference_range]
self.assertTrue(np.array_equal(self.data._timestamps_realtime, reference))
class TestJoinExcludeLast(TestCase):
def setUp(self):
self.full_reference = RocketLoggerData(_SPLIT_TEST_FILE)
self.data = RocketLoggerData(_SPLIT_TEST_FILE, exclude_part=2)
def tearDown(self):
del self.full_reference
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_data_values(self):
reference_range = np.arange(2 * 128000)
reference = self.full_reference.get_data()[reference_range]
self.assertTrue(np.array_equal(self.data.get_data(), reference))
def test_data_timestamp_monotonic(self):
reference_range = np.arange(2 * 2)
reference = self.full_reference._timestamps_monotonic[reference_range]
self.assertTrue(np.array_equal(self.data._timestamps_monotonic, reference))
def test_data_timestamp_realtime(self):
reference_range = np.arange(2 * 2)
reference = self.full_reference._timestamps_realtime[reference_range]
self.assertTrue(np.array_equal(self.data._timestamps_realtime, reference))
class TestJoinExcludeMultiple(TestCase):
def setUp(self):
self.full_reference = RocketLoggerData(_SPLIT_TEST_FILE)
self.data = RocketLoggerData(_SPLIT_TEST_FILE, exclude_part=[0, 2])
def tearDown(self):
del self.full_reference
del self.data
def test_load(self):
self.assertIsInstance(self.data, RocketLoggerData)
def test_data_values(self):
reference_range = np.arange(128000, 2 * 128000)
reference = self.full_reference.get_data()[reference_range]
self.assertTrue(np.array_equal(self.data.get_data(), reference))
def test_data_timestamp_monotonic(self):
reference_range =
|
np.arange(2, 2 * 2)
|
numpy.arange
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Tests of linear transforms."""
import os
import pytest
import numpy as np
from subprocess import check_call
import shutil
import h5py
import nibabel as nb
from nibabel.eulerangles import euler2mat
from nibabel.affines import from_matvec
from .. import linear as nitl
from .utils import assert_affines_by_filename
RMSE_TOL = 0.1
APPLY_LINEAR_CMD = {
"fsl": """\
flirt -setbackground 0 -interp nearestneighbour -in {moving} -ref {reference} \
-applyxfm -init {transform} -out {resampled}\
""".format,
"itk": """\
antsApplyTransforms -d 3 -r {reference} -i {moving} \
-o {resampled} -n NearestNeighbor -t {transform} --float\
""".format,
"afni": """\
3dAllineate -base {reference} -input {moving} \
-prefix {resampled} -1Dmatrix_apply {transform} -final NN\
""".format,
"fs": """\
mri_vol2vol --mov {moving} --targ {reference} --lta {transform} \
--o {resampled} --nearest""".format,
}
@pytest.mark.parametrize("matrix", [[0.0], np.ones((3, 3, 3)), np.ones((3, 4)),])
def test_linear_typeerrors1(matrix):
"""Exercise errors in Affine creation."""
with pytest.raises(TypeError):
nitl.Affine(matrix)
def test_linear_typeerrors2(data_path):
"""Exercise errors in Affine creation."""
with pytest.raises(TypeError):
nitl.Affine.from_filename(data_path / "itktflist.tfm", fmt="itk")
def test_linear_valueerror():
"""Exercise errors in Affine creation."""
with pytest.raises(ValueError):
nitl.Affine(np.ones((4, 4)))
def test_loadsave_itk(tmp_path, data_path, testdata_path):
"""Test idempotency."""
ref_file = testdata_path / "someones_anatomy.nii.gz"
xfm = nitl.load(data_path / "itktflist2.tfm", fmt="itk")
assert isinstance(xfm, nitl.LinearTransformsMapping)
xfm.reference = ref_file
xfm.to_filename(tmp_path / "transform-mapping.tfm", fmt="itk")
assert (data_path / "itktflist2.tfm").read_text() == (
tmp_path / "transform-mapping.tfm"
).read_text()
single_xfm = nitl.load(data_path / "affine-LAS.itk.tfm", fmt="itk")
assert isinstance(single_xfm, nitl.Affine)
assert single_xfm == nitl.Affine.from_filename(
data_path / "affine-LAS.itk.tfm", fmt="itk"
)
@pytest.mark.parametrize("fmt", ["itk", "fsl", "afni", "lta"])
def test_loadsave(tmp_path, data_path, testdata_path, fmt):
"""Test idempotency."""
ref_file = testdata_path / "someones_anatomy.nii.gz"
xfm = nitl.load(data_path / "itktflist2.tfm", fmt="itk")
xfm.reference = ref_file
fname = tmp_path / ".".join(("transform-mapping", fmt))
xfm.to_filename(fname, fmt=fmt)
if fmt == "fsl":
# FSL should not read a transform without reference
with pytest.raises(ValueError):
nitl.load(fname, fmt=fmt)
nitl.load(fname, fmt=fmt, moving=ref_file)
with pytest.warns(UserWarning):
assert np.allclose(
xfm.matrix,
nitl.load(fname, fmt=fmt, reference=ref_file).matrix,
)
assert np.allclose(
xfm.matrix,
nitl.load(fname, fmt=fmt, reference=ref_file, moving=ref_file).matrix,
)
else:
assert xfm == nitl.load(fname, fmt=fmt, reference=ref_file)
xfm.to_filename(fname, fmt=fmt, moving=ref_file)
if fmt == "fsl":
assert np.allclose(
xfm.matrix,
nitl.load(fname, fmt=fmt, reference=ref_file, moving=ref_file).matrix,
rtol=1e-2, # FSL incurs into large errors due to rounding
)
else:
assert xfm == nitl.load(fname, fmt=fmt, reference=ref_file)
ref_file = testdata_path / "someones_anatomy.nii.gz"
xfm = nitl.load(data_path / "affine-LAS.itk.tfm", fmt="itk")
xfm.reference = ref_file
fname = tmp_path / ".".join(("single-transform", fmt))
xfm.to_filename(fname, fmt=fmt)
if fmt == "fsl":
assert np.allclose(
xfm.matrix,
nitl.load(fname, fmt=fmt, reference=ref_file, moving=ref_file).matrix,
rtol=1e-2, # FSL incurs into large errors due to rounding
)
else:
assert xfm == nitl.load(fname, fmt=fmt, reference=ref_file)
xfm.to_filename(fname, fmt=fmt, moving=ref_file)
if fmt == "fsl":
assert np.allclose(
xfm.matrix,
nitl.load(fname, fmt=fmt, reference=ref_file, moving=ref_file).matrix,
rtol=1e-2, # FSL incurs into large errors due to rounding
)
else:
assert xfm == nitl.load(fname, fmt=fmt, reference=ref_file)
@pytest.mark.parametrize("image_orientation", ["RAS", "LAS", "LPS", "oblique"])
@pytest.mark.parametrize("sw_tool", ["itk", "fsl", "afni", "fs"])
def test_linear_save(tmpdir, data_path, get_testdata, image_orientation, sw_tool):
"""Check implementation of exporting affines to formats."""
if (image_orientation, sw_tool) == ("oblique", "afni"):
pytest.skip("AFNI Deoblique unsupported.")
tmpdir.chdir()
img = get_testdata[image_orientation]
# Generate test transform
T = from_matvec(euler2mat(x=0.9, y=0.001, z=0.001), [4.0, 2.0, -1.0])
if sw_tool == "fs":
# Account for the fact that FS defines LTA transforms reversed
T = np.linalg.inv(T)
xfm = nitl.Affine(T)
xfm.reference = img
ext = ""
if sw_tool == "itk":
ext = ".tfm"
elif sw_tool == "fs":
ext = ".lta"
xfm_fname1 = "M.%s%s" % (sw_tool, ext)
xfm.to_filename(xfm_fname1, fmt=sw_tool)
xfm_fname2 = str(data_path / "affine-%s.%s%s") % (image_orientation, sw_tool, ext)
assert_affines_by_filename(xfm_fname1, xfm_fname2)
@pytest.mark.parametrize("image_orientation", ["RAS", "LAS", "LPS", ]) # 'oblique',
@pytest.mark.parametrize("sw_tool", ["itk", "fsl", "afni", "fs"])
def test_apply_linear_transform(tmpdir, get_testdata, get_testmask, image_orientation, sw_tool):
"""Check implementation of exporting affines to formats."""
tmpdir.chdir()
img = get_testdata[image_orientation]
msk = get_testmask[image_orientation]
# Generate test transform
T = from_matvec(euler2mat(x=0.9, y=0.001, z=0.001), [4.0, 2.0, -1.0])
xfm = nitl.Affine(T)
xfm.reference = img
ext = ""
if sw_tool == "itk":
ext = ".tfm"
elif sw_tool == "fs":
ext = ".lta"
img.to_filename("img.nii.gz")
msk.to_filename("mask.nii.gz")
# Write out transform file (software-dependent)
xfm_fname = "M.%s%s" % (sw_tool, ext)
xfm.to_filename(xfm_fname, fmt=sw_tool)
cmd = APPLY_LINEAR_CMD[sw_tool](
transform=os.path.abspath(xfm_fname),
reference=os.path.abspath("mask.nii.gz"),
moving=os.path.abspath("mask.nii.gz"),
resampled=os.path.abspath("resampled_brainmask.nii.gz"),
)
# skip test if command is not available on host
exe = cmd.split(" ", 1)[0]
if not shutil.which(exe):
pytest.skip("Command {} not found on host".format(exe))
# resample mask
exit_code = check_call([cmd], shell=True)
assert exit_code == 0
sw_moved_mask = nb.load("resampled_brainmask.nii.gz")
nt_moved_mask = xfm.apply(msk, order=0)
nt_moved_mask.set_data_dtype(msk.get_data_dtype())
diff = np.asanyarray(sw_moved_mask.dataobj) - np.asanyarray(nt_moved_mask.dataobj)
assert np.sqrt((diff ** 2).mean()) < RMSE_TOL
cmd = APPLY_LINEAR_CMD[sw_tool](
transform=os.path.abspath(xfm_fname),
reference=os.path.abspath("img.nii.gz"),
moving=os.path.abspath("img.nii.gz"),
resampled=os.path.abspath("resampled.nii.gz"),
)
brainmask = np.asanyarray(nt_moved_mask.dataobj, dtype=bool)
exit_code = check_call([cmd], shell=True)
assert exit_code == 0
sw_moved = nb.load("resampled.nii.gz")
sw_moved.set_data_dtype(img.get_data_dtype())
nt_moved = xfm.apply(img, order=0)
diff = (sw_moved.get_fdata() - nt_moved.get_fdata())
diff[~brainmask] = 0.0
diff[
|
np.abs(diff)
|
numpy.abs
|
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score, roc_auc_score, auc, precision_recall_curve
from tqdm import tqdm
from imblearn.over_sampling import SMOTE
from cuml.cluster import KMeans as cuKMeans
from sklearn.cluster import KMeans
from prettytable import PrettyTable
# define compared algorithms from the Experiments section
evaluated_algorithms = [
'WO_TTA_Baseline',
'Gaussian_TTA_Baseline',
'Euclidean_SMOTE_TTA',
'Siamese_SMOTE_TTA',
'Euclidean_Kmeans_TTA',
'Siamese_Kmeans_TTA'
]
# define anomaly detection estimators from the Experiments section
evaluated_estimators = [
'Autoencoder',
'Isolation Forest',
'One-Class SVM',
'Local Outlier Factor'
]
def test(X, folded_test_datasets_list, trained_estimators_list, trained_siamese_network, euclidean_nn_model, siamese_nn_model, args):
"""
Performing test phase on the test set with all of the compared algorithms described in the Experiments section
Parameters
----------
X: ndarray of shape (#num_samples, #features). The dataset's features
folded_test_datasets_list: list. The test set of each split in the k-fold
trained_estimators_list: list. The trained estimator of each split in hte k-fold
trained_siamese_network. TF's Model. The trained siamese internal model. used to obtain embedding of each test instance
euclidean_nn_model: trained Neareset Neighbors model with euclidean distance metric
siamese_nn_model: trained Neareset Neighbors model with Siamese distance metric
args: argparse args. The args given to the program
"""
# define k-fold split's metrics dict
# algorithm:estimator:metric:fold
algorithms_folds_metrics_dict = {algorithm: {estimator: [] for estimator in evaluated_estimators} for algorithm in evaluated_algorithms}
for split_index in range(args.n_folds):
# test set-up
test_ds = folded_test_datasets_list[split_index]
trained_estimator = trained_estimators_list[split_index]
ae_test_step_func = ae_test_step()
print(f"--- Testing k-fold split index: {split_index+1} ---")
# testing current k-fold split
current_split_algorithms_metrics = test_loop(X, test_ds, trained_estimator, euclidean_nn_model, siamese_nn_model, trained_siamese_network, ae_test_step_func, args)
# update the folds metrics dictionary
for algorithm, estimator_metrics in current_split_algorithms_metrics.items():
for estimator, metrics in estimator_metrics.items():
algorithms_folds_metrics_dict[algorithm][estimator].append(metrics)
for algorithm, estimator_folds_metrics in algorithms_folds_metrics_dict.items():
for estimator, folds_metrics in estimator_folds_metrics.items():
algorithms_folds_metrics_dict[algorithm][estimator] = np.array(folds_metrics)
# presenting results
print_test_results(algorithms_folds_metrics_dict, args)
def test_loop(X, test_ds, trained_estimator, euclidean_nn_model, siamese_nn_model, trained_siamese_network, ae_test_step_func, args):
"""
Performing the test loop with every evaluated algorithm.
Parameters
----------
X: numpy ndarray of shape (#num_samples, #features). The dataset's features
test_ds: TF's Dataset. The test set
trained_estimator: A trained anomaly detector.
euclidean_nn_model. trained Nearest Neighbors model with euclidean distance metric
siamese_nn_model. trained Nearest Neighbors mkodel with siamese distance metric
trained_siamese_network: TF's Model. The trained siamese model used for calculating two samples' distance
ae_test_step_func: function. The function that is used for performing single test step with AE anomaly detection estimator
args: argparse args. The args given to the program
"""
# loss function - with reduction equals to `NONE` in order to get the loss of every test example
loss_func = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
# extract estimators
trained_encoder, trained_decoder = trained_estimator['ae'][0], trained_estimator['ae'][1]
trained_if = trained_estimator['if']
trained_ocs = trained_estimator['ocs']
trained_lof = trained_estimator['lof']
num_neighbors = args.num_neighbors
num_augmentations = args.num_augmentations
algorithms_test_loss = {algorithm: {estimator: [] for estimator in evaluated_estimators} for algorithm in evaluated_algorithms}
algorithms_metrics = {algorithm: {estimator: None for estimator in evaluated_estimators} for algorithm in evaluated_algorithms}
test_labels = []
tqdm_total_bar = test_ds.cardinality().numpy()
for step, (x_batch_test, y_batch_test) in tqdm(enumerate(test_ds), total=tqdm_total_bar):
# predicting with each estimator without TTA (first Baseline)
ae_reconstruction_loss = ae_test_step_func(x_batch_test, trained_encoder, trained_decoder, loss_func).numpy()
if_anomaly_score = if_test_step(x_batch_test, trained_if)
ocs_anomaly_score = ocs_test_step(x_batch_test, trained_ocs)
lof_anomaly_score = lof_test_step(x_batch_test, trained_lof)
# saving first Baseline results for each estimator
algorithms_test_loss['WO_TTA_Baseline']['Autoencoder'].append(ae_reconstruction_loss)
algorithms_test_loss['WO_TTA_Baseline']['Isolation Forest'].append(if_anomaly_score)
algorithms_test_loss['WO_TTA_Baseline']['One-Class SVM'].append(ocs_anomaly_score)
algorithms_test_loss['WO_TTA_Baseline']['Local Outlier Factor'].append(lof_anomaly_score)
test_labels.append(y_batch_test.numpy())
# calculate euclidean nn indices
euclidean_nn_batch_neighbors_indices = euclidean_nn_model.kneighbors(X=x_batch_test.numpy(), n_neighbors=num_neighbors, return_distance=False)
# calculate siamese nn indices
test_batch_latent_features = trained_siamese_network(x_batch_test).numpy()
siamese_nn_batch_neighbors_indices = siamese_nn_model.kneighbors(X=test_batch_latent_features, n_neighbors=num_neighbors, return_distance=False)
euclidean_nn_batch_neighbors_features = X[euclidean_nn_batch_neighbors_indices]
siamese_nn_batch_neighbors_features = X[siamese_nn_batch_neighbors_indices]
algorithms_tta_samples_dict = {
'Gaussian_TTA_Baseline': generate_random_noise_tta_samples(x_batch_test.numpy(), num_augmentations=num_augmentations),
'Euclidean_SMOTE_TTA': generate_oversampling_tta_samples(euclidean_nn_batch_neighbors_features, oversampling_method=SMOTE, num_neighbors=num_neighbors, num_augmentations=num_augmentations),
'Siamese_SMOTE_TTA': generate_oversampling_tta_samples(siamese_nn_batch_neighbors_features, oversampling_method=SMOTE, num_neighbors=num_neighbors, num_augmentations=num_augmentations),
'Euclidean_Kmeans_TTA': generate_kmeans_tta_samples(euclidean_nn_batch_neighbors_features, args.with_cuml, num_augmentations=num_augmentations),
'Siamese_Kmeans_TTA': generate_kmeans_tta_samples(siamese_nn_batch_neighbors_features, args.with_cuml, num_augmentations=num_augmentations)
}
# making prediction (with the anomaly detection estimator) for every tta sample
algorithms_tta_predictions_dict = {
algorithm: {
'Autoencoder': ae_test_step_func(tta_samples, trained_encoder, trained_decoder, loss_func).numpy(),
'Isolation Forest': if_test_step(tta_samples, trained_if),
'One-Class SVM': ocs_test_step(tta_samples, trained_ocs),
'Local Outlier Factor': lof_test_step(tta_samples, trained_lof)
}
for algorithm, tta_samples in algorithms_tta_samples_dict.items()
}
# merging given test sample's prediction with its tta predictions
for algorithm, estimators_predictions in algorithms_tta_predictions_dict.items():
for estimator, tta_preds in estimators_predictions.items():
wo_tta_pred = algorithms_test_loss['WO_TTA_Baseline'][estimator][step]
# combine original test samples' predictions with the kmeans regular-NN TTA samples' prediction
for wo_tta_single_pred, tta_single_pred in list(zip(wo_tta_pred, tta_preds)):
combined_tta_loss = np.concatenate([[wo_tta_single_pred], tta_single_pred])
algorithms_test_loss[algorithm][estimator].append(np.mean(combined_tta_loss))
# flatten w/o tta baseline test loss and the test_labels vectors
# algorithms_test_loss['WO_TTA_Baseline'] = np.concatenate(algorithms_test_loss['WO_TTA_Baseline'], axis=0)
for estimator, estimator_test_loss in algorithms_test_loss['WO_TTA_Baseline'].items():
algorithms_test_loss['WO_TTA_Baseline'][estimator] = np.concatenate(estimator_test_loss, axis=0)
test_labels = np.concatenate(test_labels, axis=0)
y_true = np.asarray(test_labels).astype(int)
# calculating AUC
for algorithm, estimator_final_preds in algorithms_metrics.items():
for estimator in estimator_final_preds.keys():
algorithms_metrics[algorithm][estimator] = (roc_auc_score(y_true, algorithms_test_loss[algorithm][estimator]))
return algorithms_metrics
def ae_test_step():
# @tf.function
def test_one_step(inputs, encoder, decoder, loss_func):
latent_var = encoder(inputs)
reconstructed = decoder(latent_var)
reconstruction_loss = loss_func(inputs, reconstructed)
return reconstruction_loss
return test_one_step
def if_test_step(test_X, trained_if):
"""
A test phase with Isolation Forest on the given test set
Parameters
----------
test_X: numpy ndarray of shape (batch_size, num_augmentations, dataset's features dim) or (batch_size, dataset's features dim). The batch test set
trained_if: scikit-learn's IsolationForest. The trained Isolation Forest as anomaly detection estimator
"""
if len(test_X.shape) == 3:
batch_anomaly_score = []
for one_test_tta_samples in test_X:
anomaly_score = -1 * trained_if.score_samples(one_test_tta_samples)
batch_anomaly_score.append(anomaly_score)
anomaly_score = np.array(batch_anomaly_score)
else:
anomaly_score = -1 * trained_if.score_samples(test_X)
return anomaly_score
def lof_test_step(test_X, trained_lof):
"""
A test phase with Local Outlier Factor on the given test set
Parameters
----------
test_X: numpy ndarray of shape (batch_size, dataset's features dim). The batch test set
trained_lof: scikit-learn's LocalOutlierFactor. The trained Local Outlier Factor as anomaly detection estimator
"""
if len(test_X.shape) == 3:
batch_anomaly_score = []
for one_test_tta_samples in test_X:
anomaly_score = -1 * trained_lof.score_samples(one_test_tta_samples)
batch_anomaly_score.append(anomaly_score)
anomaly_score = np.array(batch_anomaly_score)
else:
anomaly_score = -1 * trained_lof.score_samples(test_X)
return anomaly_score
def ocs_test_step(test_X, trained_ocs):
"""
A test phase with One-Class SVM on the given test set
Parameters
----------
test_X: numpy ndarray of shape (batch_size, dataset's features dim). The batch test set
trained_ocs: scikit-learn's OneClassSVM. The trained One-Class SVM as anomaly detection estimator
"""
if len(test_X.shape) == 3:
batch_anomaly_score = []
for one_test_tta_samples in test_X:
anomaly_score = -1 * trained_ocs.score_samples(one_test_tta_samples)
batch_anomaly_score.append(anomaly_score)
anomaly_score = np.array(batch_anomaly_score)
else:
anomaly_score = -1 * trained_ocs.score_samples(test_X)
return anomaly_score
def generate_random_noise_tta_samples(x_batch_test, num_augmentations):
"""
Generating TTA with random Gaussian noise
Parameters
----------
x_batch_test: ndarray of shape (batch_size, #features). The features of each test sample in the batch
num_augmentations: int. The nubmer of augmentations to produce
"""
# scale = 0.2
random_noise = np.random.normal(size=(x_batch_test.shape[0], num_augmentations, x_batch_test.shape[1]))
# adding the noise to the original batch test samples. expanding the middle dim of x_batch_test to make it (batch_size, 1, dataset_features_dim)
gaussian_tta_samples = np.expand_dims(x_batch_test, axis=1) + random_noise
return gaussian_tta_samples
def generate_kmeans_tta_samples(batch_neighbors_features, with_cuML, num_augmentations):
"""
Generating TTA with trained k-means
Parameters
----------
batch_neighbors_features: numpy ndarray of shape (batch_size, num_neighbors, #features). The features of each neighbor of each test sample that is in the batch
with_cuML: bool. If True, then using cuML's k-Means model otherwise using scikit-learn's k-Means model
num_augmentations: int. The number of augmentations to produce
"""
batch_tta_samples = []
for neighbors_features in batch_neighbors_features:
if with_cuML:
kmeans_model = cuKMeans(n_clusters=num_augmentations, random_state=1234)
else:
kmeans_model = cuKMeans(n_clusters=num_augmentations, random_state=1234)
neighbors_features = neighbors_features.astype(np.float32)
kmeans_model.fit(X=neighbors_features)
tta_samples = kmeans_model.cluster_centers_
# appending to the batch tta samples
batch_tta_samples.append(tta_samples)
return np.array(batch_tta_samples)
def generate_oversampling_tta_samples(oversampling_batch_neighbors_features, num_neighbors, num_augmentations, oversampling_method):
"""
Generating TTA with oversampling method (SMOTE)
Parameters
----------
oversampling_batch_neighbors_features: numpy ndarray of shape (batch_size, num_neighbors, #features). The features of each neighbor of each test sample that is in the batch
num_neighbors: int. The number of neighbor each test sample in the batch has
num_augmentations: int. The number of augmentations to produce
oversampling_meethod: function. SMOTE function
"""
batch_size, features_dim = oversampling_batch_neighbors_features.shape[0], oversampling_batch_neighbors_features.shape[-1]
oversampling_batch_tta_samples =
|
np.zeros((batch_size, num_augmentations, features_dim))
|
numpy.zeros
|
from IB import *
import matplotlib.pyplot as plt
import os
import numpy as np
import math
def gen_easytest(plot=True):
# set name
name = "easytest"
n = 10
# set generative parameters
mu1 = np.array([0,0])
sig1 = np.eye(2)
n1 = n
mu2 = np.array([math.sqrt(75),5])
sig2 = np.eye(2)
n2 = n
mu3 = np.array([0,10])
sig3 = np.eye(2)
n3 = n
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2,
'mu3': mu3, 'sig3': sig3, 'n3': n3}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2),
np.random.multivariate_normal(mu3,sig3,n3)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name,smoothing_type='u',smoothing_center='d')
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_blob(plot=True):
# set name
name = "blob"
# set generative parameters
mu1 = np.array([0,0])
sig1 = np.eye(2)
n1 = 90
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1}
# make labels
labels = np.array([0]*n1)
# make coordinates
coord = np.random.multivariate_normal(mu1,sig1,n1)
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_3sph_evensamp_evenspacing(plot=True):
# set name
name = "3sph_evensamp_evenspacing"
# set generative parameters
mu1 = np.array([0,0])
sig1 = np.eye(2)
n1 = 30
mu2 = np.array([math.sqrt(75),5])
sig2 = np.eye(2)
n2 = 30
mu3 = np.array([0,10])
sig3 = np.eye(2)
n3 = 30
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2,
'mu3': mu3, 'sig3': sig3, 'n3': n3}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2),
np.random.multivariate_normal(mu3,sig3,n3)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_3sph_unevensamp_evenspacing(plot=True):
# set name
name = "3sph_unevensamp_evenspacing"
# set generative parameters
mu1 = np.array([0,0])
sig1 = np.eye(2)
n1 = 10
mu2 = np.array([math.sqrt(75),5])
sig2 = np.eye(2)
n2 = 30
mu3 = np.array([0,10])
sig3 = np.eye(2)
n3 = 60
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2,
'mu3': mu3, 'sig3': sig3, 'n3': n3}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2),
np.random.multivariate_normal(mu3,sig3,n3)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_3sph_evensamp_unevenspacing(plot=True):
# set name
name = "3sph_evensamp_unevenspacing"
# set generative parameters
mu1 = np.array([0,2.5])
sig1 = np.eye(2)
n1 = 30
mu2 = np.array([0,-2.5])
sig2 = np.eye(2)
n2 = 30
mu3 = np.array([15,0])
sig3 = np.eye(2)
n3 = 30
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2,
'mu3': mu3, 'sig3': sig3, 'n3': n3}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2),
np.random.multivariate_normal(mu3,sig3,n3)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def make_circle(radius,num_points):
count = 0
points = np.zeros((num_points,2))
while count<num_points:
x1 = 2*radius*np.random.rand()-radius
x2 = 2*radius*np.random.rand()-radius
x = np.array([x1,x2])
if np.linalg.norm(x)<radius:
points[count,:] = x
count += 1
return points
def gen_mouse(plot=True):
# set name
name = "mouse"
# set generative parameters
mu1 = np.array([0,0])
rad1 = 4
n1 = 180
mu2 = np.array([-3.5,5])
rad2 = 1.4
n2 = 25
mu3 = np.array([3.5,5])
rad3 = 1.4
n3 = 25
param = {'mu1': mu1, 'rad1': rad1, 'n1': n1,
'mu2': mu2, 'rad2': rad2, 'n2': n2,
'mu3': mu3, 'rad3': rad3, 'n3': n3}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3)
# make coordinates
coord = np.concatenate((make_circle(rad1,n1)+mu1,
make_circle(rad2,n2)+mu2,
make_circle(rad3,n3)+mu3))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_circleandcigar(plot=True):
# set name
name = "circleandcigar"
# set generative parameters
mu1 = np.array([5,0])
sig1 = np.eye(2)
n1 = 50
mu2 = np.array([-5,0])
sig2 = np.array([[1,0],[0,25]])
n2 = 50
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2}
# make labels
labels = np.array([0]*n1+[1]*n2)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_2cigars(plot=True):
# set name
name = "2cigars"
# set generative parameters
mu1 = np.array([0,-4])
sig1 = np.array([[25,0],[0,1]])
n1 = 50
mu2 = np.array([0,4])
sig2 = np.array([[25,0],[0,1]])
n2 = 50
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2}
# make labels
labels = np.array([0]*n1+[1]*n2)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_2over3(plot=True):
# set name
name = "2over3"
# set generative parameters
sig = .75
mu1 = np.array([0,0])
sig1 = (sig**2)*np.eye(2)
n1 = 20
mu2 = np.array([-4,0])
sig2 = (sig**2)*np.eye(2)
n2 = 20
mu3 = np.array([4,0])
sig3 = (sig**2)*np.eye(2)
n3 = 20
mu4 = np.array([-2,12])
sig4 = (sig**2)*np.eye(2)
n4 = 20
mu5 = np.array([2,12])
sig5 = (sig**2)*np.eye(2)
n5 = 20
param = {'mu1': mu1, 'sig1': sig1, 'n1': n1,
'mu2': mu2, 'sig2': sig2, 'n2': n2,
'mu3': mu3, 'sig3': sig3, 'n3': n3,
'mu4': mu4, 'sig4': sig4, 'n4': n4,
'mu5': mu5, 'sig5': sig5, 'n5': n5}
# make labels
labels = np.array([0]*n1+[1]*n2+[2]*n3+[3]*n4+[4]*n5)
# make coordinates
coord = np.concatenate((np.random.multivariate_normal(mu1,sig1,n1),
np.random.multivariate_normal(mu2,sig2,n2),
np.random.multivariate_normal(mu3,sig3,n3),
np.random.multivariate_normal(mu4,sig4,n4),
np.random.multivariate_normal(mu5,sig5,n5)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_halfconcentric(plot=True):
# set name
name = "halfconcentric"
# set generative parameters
nt = 80 # number of thetas
nd = 1 # number of samples per theta
no = nd*nt # number of samples for outer circle
ni = 20 # number of samples for inner circle
r = 5 # radius of outer loop
so = .25 # gaussian noise variance of outer circle
si = .25 # gaussian noise variance of inner circle
thetas = -np.linspace(0,math.pi,nt)
x = [r*math.cos(theta) for theta in thetas]
y = [r*math.sin(theta) for theta in thetas]
param = {'nt': nt, 'nd': nd, 'no': no, 'ni': ni, 'r': r, 'so': so, 'si': si}
# make labels
labels = np.array([0]*ni+[1]*no)
# make coordinates
coord = np.random.multivariate_normal(np.array([0,0]),si*np.eye(2),ni)
for i in range(len(x)):
coord = np.concatenate((coord,np.random.multivariate_normal(np.array([x[i],y[i]]),so*np.eye(2),nd)))
# make dataset
ds = dataset(coord = coord, labels = labels, gen_param = param, name = name)
# plot coordinates
if plot: ds.plot_coord()
# normalize
ds.normalize_coord()
if plot: ds.plot_coord()
return ds
def gen_concentric(plot=True):
# set name
name = "concentric"
# set generative parameters
nt = 80 # number of thetas
nd = 1 # number of samples per theta
no = nd*nt # number of samples for outer circle
ni = 20 # number of samples for inner circle
r = 8 # radius of outer loop
so = .25 # gaussian noise variance of outer circle
si = .25 # gaussian noise variance of inner circle
thetas = -np.linspace(0,2*math.pi,nt)
x = [r*math.cos(theta) for theta in thetas]
y = [r*math.sin(theta) for theta in thetas]
param = {'nt': nt, 'nd': nd, 'no': no, 'ni': ni, 'r': r, 'so': so, 'si': si}
# make labels
labels = np.array([0]*ni+[1]*no)
# make coordinates
coord = np.random.multivariate_normal(np.array([0,0]),si*
|
np.eye(2)
|
numpy.eye
|
import numpy as np
from scipy import signal
from scipy.interpolate import splev, splrep
class Record:
sr = 20000 # Sample Rate - 20 kHz
def __init__(self,array):
self.array = array
## Instance Variables
# Control System Features
self.dom_pp = []
self.rec_pp = []
self.dom_bp = []
self.rec_bp = []
self.dom_pt = []
self.rec_pt = []
self.dom_ssv = []
self.rec_ssv = []
self.dom_sse = []
self.rec_sse = []
self.dom_po = []
self.rec_po = []
self.dom_st_s = []
self.rec_st_s = []
self.dom_rt_s = []
self.rec_rt_s = []
self.dom_dt_s = []
self.rec_dt_s = []
# Spectral Analysis Features
self.dom_pulse_data = []
self.rec_pulse_data = []
self.dom_sd = []
self.rec_sd = []
self.dom_snr = []
self.rec_snr = []
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
# Outlier Check Results
self.outlier_count = []
## Instance Methods
# Control System Processing
self.PeakDetection()
self.PeakTime()
self.SteadyStateValErr()
self.PercentOvershoot()
self.SettlingTime()
self.RiseTime()
self.DelayTime()
# Spectral Analysis Processing
self.RunSpectralAnalysis()
self.total_rec = np.min((len(self.dom_sd), len(self.rec_sd)))
# # Outlier Detection and Removal
# self.OutlierCount()
# self.RemoveOutliers()
# # Build Feature Datastructure
self.features = self.GenerateFeatures()
self.headers = []
self.headers.append('Dom_Peak_Time')
self.headers.append('Dom_Steady_State_Value')
self.headers.append('Dom_Steady_State_Error')
self.headers.append('Dom_Percent_Overshoot')
self.headers.append('Dom_Settling_Time')
self.headers.append('Dom_Rise_Time')
self.headers.append('Dom_Delay_Time')
for i in np.arange(len(self.dom_sd[0])):
self.headers.append('Dom_Spectral_Bin_%d' % i)
self.headers.append('Dom_SNR')
self.headers.append('Dom_Mean_Freq')
self.headers.append('Dom_Median_Freq')
self.headers.append('Rec_Peak_Time')
self.headers.append('Rec_Steady_State_Value')
self.headers.append('Rec_Steady_State_Error')
self.headers.append('Rec_Percent_Overshoot')
self.headers.append('Rec_Settling_Time')
self.headers.append('Rec_Rise_Time')
self.headers.append('Rec_Delay_Time')
for i in np.arange(len(self.rec_sd[0])):
self.headers.append('Rec_Spectral_Bin_%d' % i)
self.headers.append('Rec_SNR')
self.headers.append('Rec_Mean_Freq')
self.headers.append('Rec_Median_Freq')
def PeakDetection(self):
##### PeakDetection
# Input: array - raw signal data for record
# Output: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
### Pulse Peak Detection ###
# Calculate difference array
arr_diff = np.diff(self.array, prepend=self.array[0])
# Perform moving average filter, width=3, x2
w = 3
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
# Prepend zeros to offset processing delay
arr_diff = np.insert(arr_diff, 0, np.zeros((w-1)*2), axis=0)
# Crossing filter to detect dominant and recessive leading edge zones
dom_pp_ts = (arr_diff > 0.2).astype(float)
rec_pp_ts = (arr_diff < -0.2).astype(float)
# Find peak for each zone (dominant)
a = np.where(dom_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
dom_pp = a[c].astype(int)
# Remove errant peaks (dominant)
corr_idx = np.concatenate((np.diff(dom_pp),[np.average(np.diff(dom_pp))]))
if np.min(np.diff(corr_idx)) < 100:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
dom_pp = dom_pp[corr_idx]
# Find peak for each zone (recessive)
a = np.where(rec_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
rec_pp = a[c].astype(int)
# Remove errant peaks (recessive)
corr_idx = np.concatenate((np.diff(rec_pp),[np.average(np.diff(rec_pp))]))
if np.min(np.diff(corr_idx)) < 15:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
rec_pp = rec_pp[corr_idx]
# Pair dom and rec indices
dom_len = len(dom_pp)
rec_len = len(rec_pp)
dom_is_larger = []
if dom_len > rec_len + 1:
dom_is_larger = 1
elif rec_len > dom_len + 1:
dom_is_larger = 0
if not dom_is_larger == []:
len_min = np.min((dom_len, rec_len))
len_dif = np.abs(dom_len - rec_len) + 1
dif_amt = []
for i in np.arange(len_dif):
if dom_is_larger:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
else:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
temp = np.abs(temp)
temp = np.sum(temp)
dif_amt.append(temp)
dif_loc = np.where(np.min(dif_amt) == dif_amt)[0]
if dom_is_larger:
dom_pp = dom_pp[dif_loc[0]:rec_len+dif_loc[0]+1]
else:
rec_pp = rec_pp[dif_loc[0]:dom_len+dif_loc[0]+1]
# Create timestamps using indices
dom_pp_ts = np.zeros(dom_pp_ts.size)
dom_pp_ts[dom_pp] = 1
self.dom_pp = np.where(dom_pp_ts == 1)[0]
rec_pp_ts = np.zeros(rec_pp_ts.size)
rec_pp_ts[rec_pp] = 1
self.rec_pp = np.where(rec_pp_ts == 1)[0]
### Pre-Peak Detection ###
# Crossing filter to detect pre-dominant steady state (Before Leading-edge)
dom_bp_ts = np.abs(np.diff(self.array - 2.5, prepend = self.array[0]))
w = 5
dom_bp_ts = np.convolve(dom_bp_ts, np.ones(w), 'valid') / w
dom_bp_ts = np.insert(dom_bp_ts, 0, np.zeros(w-1), axis=0)
dom_bp_ts = 1-(dom_bp_ts > 0.05).astype(float)
# Crossing filter to detect pre-recessive steady state (Before Leading-edge)
rec_bp_ts = np.abs(np.diff(3.5 - self.array, prepend = self.array[0]))
w = 5
rec_bp_ts = np.convolve(rec_bp_ts, np.ones(w), 'valid') / w
rec_bp_ts = np.insert(rec_bp_ts, 0, np.zeros(w-1), axis=0)
rec_bp_ts = 1-(rec_bp_ts > 0.05).astype(float)
## Find the last instance of steady state prior to dominant peaks
jj = np.zeros(dom_pp.size).astype(int)
for k in np.arange(0,dom_pp.size):
# "Dominant-low steady state" indices before peak
j = np.where(dom_bp_ts[0:dom_pp[k]] == 1)
j = j[0]
# Find nearest index before dominant peak
min_idx = j-dom_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + dom_pp[k])[0])
# Dominant prior-to-peak steady-state indices
dom_bp_ts2 = np.zeros(dom_bp_ts.size, dtype=int)
dom_bp_ts2[jj] = 1
self.dom_bp = jj
## Find the last instance of steady state prior to recessive peaks
jj = np.zeros(rec_pp.size).astype(int)
for k in np.arange(0,rec_pp.size):
# "Recesive-low steady state" indices before peak
j = np.where(rec_bp_ts[0:rec_pp[k]] == 1)
j = j[0]
# Find nearest index before recessive peak
min_idx = j-rec_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + rec_pp[k])[0])
# Recessive prior-to-peak steady-state indices
rec_bp_ts2 = np.zeros(rec_bp_ts.size, dtype=int)
rec_bp_ts2[jj] = 1
self.rec_bp = jj
def PeakTime(self):
##### PeakTime
# Input: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
# sr - sample rate of the raw data
# Output: dom_pt - dominant Peak Time
# rec_pt - recessive Peak Time
self.dom_pt = (self.dom_pp-self.dom_bp)/Record.sr
self.rec_pt = (self.rec_pp-self.rec_bp)/Record.sr
def SteadyStateValErr(self):
##### Steady State Value and Error
# Input: array - raw signal data for record
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# Output: dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# dom_sse - dominant Steady State Error
# rec_sse - recessive Steady State Error
# Perform moving average filter, width=19
w = 19
arr_avg = np.convolve(self.array, np.ones(w), 'valid') / w
arr_avg = np.insert(arr_avg, 0, arr_avg[0]*np.ones(w-1), axis=0)
# Extract Steady State Value from previous Steady State Index
dom_ssv_idx = self.rec_bp
rec_ssv_idx = self.dom_bp
self.dom_ssv = arr_avg[dom_ssv_idx]
self.rec_ssv = arr_avg[rec_ssv_idx]
# Calculate Steady State Error
self.dom_sse = arr_avg[dom_ssv_idx] - 3.5
self.rec_sse = arr_avg[rec_ssv_idx] - 2.5
def PercentOvershoot(self):
##### Percent Overshoot
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# Output: dom_po - dominant Percent Overshoot
# rec_po - recessive Percent Overshoot
dom_pv = self.array[self.dom_pp]
rec_pv = self.array[self.rec_pp]
try:
self.dom_po = 100 * (dom_pv - self.dom_ssv) / self.dom_ssv
self.rec_po = 100 * (self.rec_ssv - rec_pv) / self.rec_ssv
except:
self.dom_po = 100 * (dom_pv - np.average(self.dom_ssv)) / np.average(self.dom_ssv)
self.rec_po = 100 * (np.average(self.rec_ssv) - rec_pv) / np.average(self.rec_ssv)
def SettlingTime(self):
##### Settling Time
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_st_s - dominant Settling Time (s)
# rec_st_s - recessive Settling Time (s)
ss_rng = 0.05 # 5% Steady State Range of 1V Vpp design
# Find index and time of settling point (dominant)
w = 3
arr_avg1 = np.convolve(np.abs(self.array-np.average(self.dom_ssv)), np.ones(w), 'valid') / w
arr_avg1 = np.insert(arr_avg1, 0, arr_avg1[0]*np.ones(w-1), axis=0)
arr_avg11 = np.abs(np.round(arr_avg1,decimals=2))
dom_st_idx = np.where(arr_avg11 <= ss_rng)[0]
dom_st = np.zeros(self.dom_pp.size)
if dom_st_idx.size != 0:
for i in np.arange(self.dom_pp.size):
dom_st_idx[dom_st_idx <= self.dom_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(dom_st_idx - self.dom_pp[i]))
== np.abs(dom_st_idx - self.dom_pp[i])
)[0][-1]
dom_st[i] = dom_st_idx[j]
dom_st = dom_st.astype(int)
else:
self.dom_st = np.concatenate((self.dom_pp[1:],[self.array.size]))
self.dom_st_s = (dom_st - self.dom_pp)/Record.sr
# Find index and time of settling point (dominant)
w = 3
arr_avg2 = np.convolve(np.average(self.dom_ssv)-self.array, np.ones(w), 'valid') / w
arr_avg2 = np.insert(arr_avg2, 0, arr_avg2[0]*np.ones(w-1), axis=0)
arr_avg22 = np.abs(np.round(arr_avg2,decimals=2))
rec_st_idx = np.where(arr_avg22 <= ss_rng)[0]
rec_st = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
rec_st_idx[rec_st_idx <= self.rec_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(rec_st_idx - self.rec_pp[i]))
== np.abs(rec_st_idx - self.rec_pp[i])
)[0][-1]
rec_st[i] = rec_st_idx[j]
rec_st = rec_st.astype(int)
self.rec_st_s = (rec_st - self.rec_pp)/Record.sr
def RiseTime(self):
##### Rise Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of rise point (dominant)
dom_rt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 1).astype(int)
dom_rt_idx = np.where(dom_rt_ts == 1)[0]
dom_rt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_rt_idx - self.dom_pp[i]))
== np.abs(dom_rt_idx - self.dom_pp[i]))[0][-1]
dom_rt[i] = dom_rt_idx[j]
dom_rt = dom_rt.astype(int)
self.dom_rt_s = (dom_rt - self.dom_bp)/Record.sr
# Find index and time of rise point (recessive)
rec_rt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 1).astype(int)
rec_rt_idx = np.where(rec_rt_ts == 1)[0]
rec_rt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_rt_idx - self.rec_pp[i]))
== np.abs(rec_rt_idx - self.rec_pp[i]))[0][-1]
rec_rt[i] = rec_rt_idx[j]
rec_rt = rec_rt.astype(int)
self.rec_rt_s = (rec_rt - self.rec_bp)/Record.sr
def DelayTime(self):
##### Delay Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of delay point (dominant)
dom_dt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 0.5).astype(int)
dom_dt_idx = np.where(dom_dt_ts == 1)[0]
dom_dt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_dt_idx - self.dom_pp[i]))
== np.abs(dom_dt_idx - self.dom_pp[i]))[0][-1]
dom_dt[i] = dom_dt_idx[j]
dom_dt = dom_dt.astype(int)
self.dom_dt_s = (dom_dt - self.dom_bp)/Record.sr
# Find index and time of delay point (recessive)
rec_dt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 0.5).astype(int)
rec_dt_idx = np.where(rec_dt_ts == 1)[0]
rec_dt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_dt_idx - self.rec_pp[i]))
== np.abs(rec_dt_idx - self.rec_pp[i]))[0][-1]
rec_dt[i] = rec_dt_idx[j]
rec_dt = rec_dt.astype(int)
self.rec_dt_s = (rec_dt - self.rec_bp)/Record.sr
def RunSpectralAnalysis(self):
##### Spectral Analysis
# Run the following methods:
#
# + Spectral Density Binning
# + Signal-to-Noise Ratio
# + Median Frequency
# + Mean Frequency
#
# Features will be processed for both
# Dominant and Recessive CAN High bits
self.SpectralDensityBinning()
self.SignalToNoiseRatio()
self.MeanMedianFrequency()
def SpectralDensityBinning(self):
##### Bin Spectral Density
index_shift = -5 # Include some steady state info from prev pulse
dom_pp_sd = self.dom_pp.copy() + index_shift
rec_pp_sd = self.rec_pp.copy() + index_shift
# Find the start/end pulse indices
if self.dom_pp[0] <= self.rec_pp[0]:
if len(self.dom_pp) > len(self.rec_pp):
dom_pp_sd = dom_pp_sd[0:-1]
idx_dom_se = np.array([dom_pp_sd,rec_pp_sd])
idx_rec_se = np.array([rec_pp_sd[0:-1],dom_pp_sd[1:]])
else:
if len(self.rec_pp) > len(self.dom_pp):
rec_pp_sd = rec_pp_sd[0:-1]
idx_rec_se = np.array([rec_pp_sd,dom_pp_sd])
idx_dom_se = np.array([dom_pp_sd[0:-1],rec_pp_sd[1:]])
# Remove pulses that don't provide enough steady-state information from the prev pulse
if idx_dom_se[0][0] < -index_shift:
idx_dom_se = np.array([idx_dom_se[0][1:],idx_dom_se[1][1:]])
if idx_rec_se[0][0] < -index_shift:
idx_rec_se = np.array([idx_rec_se[0][1:],idx_rec_se[1][1:]])
# Check for out-or-order index error
if idx_dom_se[0][0] > idx_dom_se[1][0]:
temp1 = np.array([idx_dom_se[1],idx_dom_se[0]])
temp2 = np.array([idx_dom_se[0],idx_rec_se[1]])
idx_dom_se = temp2
idx_rec_se = temp1
# Save dom pulse info to parent method variable dom_pulse_data
for i in np.arange(idx_dom_se.shape[1]):
self.dom_pulse_data.append(self.array[idx_dom_se[0][i]:idx_dom_se[1][i]])
# Save dom pulse info to parent method variable rec_pulse_data
for i in np.arange(idx_rec_se.shape[1]):
self.rec_pulse_data.append(self.array[idx_rec_se[0][i]:idx_rec_se[1][i]])
# Reset indices
idx_dom_se = idx_dom_se - index_shift
idx_rec_se = idx_rec_se - index_shift
# Bin power densities
def binned_sd(Pxx_den, nbins):
bs = Pxx_den.size/nbins
bs = round(bs)
Pxx_hist = []
for i in np.arange(nbins):
idx_s = i*bs
idx_e = (i+1)*bs
if idx_e >= Pxx_den.size:
idx_e = Pxx_den.size - 1
Pxx_hist.append(np.average(Pxx_den[idx_s:idx_e]))
Pxx_hist = np.nan_to_num(Pxx_hist)
return Pxx_hist
# Select bin sizes
bin_sel = 2
dom_nbin = [15,13,10] # Bin size limited by pulse length
# Perform binning of spectral density
self.dom_sd = []
for i in np.arange(len(self.dom_pulse_data)):
f, pd = signal.welch(self.dom_pulse_data[i], Record.sr, nperseg=len(self.dom_pulse_data[i]));
self.dom_sd.append(binned_sd(pd, dom_nbin[bin_sel]))
rec_nbin = [10, 8, 5] # Bin size limited by pulse length
self.rec_sd = []
for i in np.arange(len(self.rec_pulse_data)):
f, pd = signal.welch(self.rec_pulse_data[i], Record.sr, nperseg=len(self.rec_pulse_data[i]));
self.rec_sd.append(binned_sd(pd, rec_nbin[bin_sel]))
def SignalToNoiseRatio(self):
index_shift = -5
self.dom_snr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_array = self.dom_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-1).astype(float)*np.average(self.dom_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-1).astype(float)*np.average(self.rec_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl));
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise));
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.dom_snr.append(10*np.log10(Ps/Pn))
self.rec_snr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_array = self.rec_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-2).astype(float)*np.average(self.rec_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-2).astype(float)*np.average(self.dom_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl))
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise))
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.rec_snr.append(10*np.log10(Ps/Pn))
def MeanMedianFrequency(self):
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
self.dom_mnfr = []
self.dom_mdfr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_pulse = self.dom_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.dom_mdfr.append(x2[y25]) # Retrieve minima frequency
self.dom_mnfr.append(np.sum(pd*f)/np.sum(pd))
self.rec_mnfr = []
self.rec_mdfr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_pulse = self.rec_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.rec_mdfr.append(x2[y25]) # Retrieve minima frequency
self.rec_mnfr.append(np.sum(pd*f)/np.sum(pd))
def OutlierCount(self):
##### Outlier Count
# Calculates the standard deviation for each feature and creates a binary
# mask of pulses that exceed the standard deviation threshold
# Binary masks are added to determine total number of deviations per pulse
# across all features
std = 1.5 # Threshold
def fix_size_disparity(in1, in2):
if in1.size > in2.size:
in2 = np.concatenate((in2,np.zeros(in1.size - in2.size))).astype(int)
elif in2.size > in1.size:
in1 = np.concatenate((in1,np.zeros(in2.size - in1.size))).astype(int)
return in1, in2
# Outlier check and size correction
self.dom_pp, self.rec_pp = fix_size_disparity(self.dom_pp, self.rec_pp)
self.dom_bp, self.rec_bp = fix_size_disparity(self.dom_bp, self.rec_bp)
self.dom_pt, self.rec_pt = fix_size_disparity(self.dom_pt, self.rec_pt)
dom_pt_out = (np.abs(self.dom_pt-np.average(self.dom_pt)) >
std*np.std(self.dom_pt)).astype(int)
rec_pt_out = (np.abs(self.rec_pt-np.average(self.rec_pt)) >
std*np.std(self.rec_pt)).astype(int)
pt_out = dom_pt_out + rec_pt_out
self.dom_ssv, self.rec_ssv = fix_size_disparity(self.dom_ssv, self.rec_ssv)
dom_ssv_out = (np.abs(self.dom_ssv-np.average(self.dom_ssv)) >
std*np.std(self.dom_ssv)).astype(int)
rec_ssv_out = (np.abs(self.rec_ssv-np.average(self.rec_ssv)) >
std*np.std(self.rec_ssv)).astype(int)
ssv_out = dom_ssv_out + rec_ssv_out
self.dom_sse, self.rec_sse = fix_size_disparity(self.dom_sse, self.rec_sse)
dom_sse_out = (np.abs(self.dom_sse-np.average(self.dom_sse)) >
std*np.std(self.dom_sse)).astype(int)
rec_sse_out = (np.abs(self.rec_sse-np.average(self.rec_sse)) >
std*np.std(self.rec_sse)).astype(int)
sse_out = dom_sse_out + rec_sse_out
self.dom_po, self.rec_po = fix_size_disparity(self.dom_po, self.rec_po)
dom_po_out = (np.abs(self.dom_po-np.average(self.dom_po)) >
std*np.std(self.dom_po)).astype(int)
rec_po_out = (np.abs(self.rec_po-np.average(self.rec_po)) >
std*np.std(self.rec_po)).astype(int)
po_out = dom_po_out + rec_po_out
self.dom_st_s, self.rec_st_s = fix_size_disparity(self.dom_st_s, self.rec_st_s)
dom_st_s_out = (np.abs(self.dom_st_s-np.average(self.dom_st_s)) >
std*np.std(self.dom_st_s)).astype(int)
rec_st_s_out = (np.abs(self.rec_st_s-np.average(self.rec_st_s)) >
std*np.std(self.rec_st_s)).astype(int)
st_s_out = dom_st_s_out + rec_st_s_out
self.dom_rt_s, self.rec_rt_s = fix_size_disparity(self.dom_rt_s, self.rec_rt_s)
dom_rt_s_out = (np.abs(self.dom_rt_s-np.average(self.dom_rt_s)) >
std*np.std(self.dom_rt_s)).astype(int)
rec_rt_s_out = (np.abs(self.rec_rt_s-np.average(self.rec_rt_s)) >
std*np.std(self.rec_rt_s)).astype(int)
rt_s_out = dom_rt_s_out + rec_rt_s_out
self.dom_dt_s, self.rec_dt_s = fix_size_disparity(self.dom_dt_s, self.rec_dt_s)
dom_dt_s_out = (np.abs(self.dom_dt_s-np.average(self.dom_dt_s)) >
std*np.std(self.dom_dt_s)).astype(int)
rec_dt_s_out = (np.abs(self.rec_dt_s-np.average(self.rec_dt_s)) >
std*np.std(self.rec_dt_s)).astype(int)
dt_s_out = dom_dt_s_out + rec_dt_s_out
self.outlier_count = pt_out + ssv_out + sse_out + \
po_out + st_s_out + rt_s_out + dt_s_out
return self.outlier_count
def RemoveOutliers(self):
##### Remove Outlier Pulses
# Checks outlier count for each pulse and removes pulses that exceed
# the deviation threshold
dev = 6
noutlier_idx = np.where(self.outlier_count < dev + 1)[0]
self.dom_pp = self.dom_pp[noutlier_idx]
self.rec_pp = self.rec_pp[noutlier_idx]
self.dom_bp = self.dom_bp[noutlier_idx]
self.rec_bp = self.rec_bp[noutlier_idx]
self.dom_pt = self.dom_pt[noutlier_idx]
self.rec_pt = self.rec_pt[noutlier_idx]
self.dom_ssv = self.dom_ssv[noutlier_idx]
self.rec_ssv = self.rec_ssv[noutlier_idx]
self.dom_sse = self.dom_sse[noutlier_idx]
self.rec_sse = self.rec_sse[noutlier_idx]
self.dom_po = self.dom_po[noutlier_idx]
self.rec_po = self.rec_po[noutlier_idx]
self.dom_st_s = self.dom_st_s[noutlier_idx]
self.rec_st_s = self.rec_st_s[noutlier_idx]
self.dom_rt_s = self.dom_rt_s[noutlier_idx]
self.rec_rt_s = self.rec_rt_s[noutlier_idx]
self.dom_dt_s = self.dom_dt_s[noutlier_idx]
self.rec_dt_s = self.rec_dt_s[noutlier_idx]
self.OutlierCount()
def summary(self):
print('Peak Time (s):')
print(' dom: ', self.dom_pt)
print(' avg: ', np.average(self.dom_pt))
print(' std: ', np.std(self.dom_pt))
print(' dev: ', np.abs(self.dom_pt-np.average(self.dom_pt)))
# print(' out: ', dom_pt_out)
print(' rec: ', self.rec_pt)
print(' avg: ', np.average(self.rec_pt))
print(' std: ',
|
np.std(self.rec_pt)
|
numpy.std
|
import numpy as np
from numpy import random
from scipy.interpolate import interp1d
import pandas as pd
msun = 1.9891e30
rsun = 695500000.0
G = 6.67384e-11
AU = 149597870700.0
def component_noise(tessmag, readmod=1, zodimod=1):
sys = 59.785
star_mag_level, star_noise_level = np.array(
[
[4.3885191347753745, 12.090570910640581],
[12.023294509151416, 467.96434635620614],
[17.753743760399338, 7779.603209291808],
]
).T
star_pars = np.polyfit(star_mag_level, np.log10(star_noise_level), 1)
zodi_mag_level, zodi_noise_level = np.array(
[
[8.686356073211314, 18.112513551189224],
[13.08901830282862, 688.2812796087189],
[16.68801996672213, 19493.670323892282],
]
).T
zodi_pars = np.polyfit(zodi_mag_level, np.log10(zodi_noise_level), 1)
read_mag_level, read_noise_level = np.array(
[
[8.476705490848586, 12.31474807751376],
[13.019134775374376, 522.4985702369348],
[17.841098169717142, 46226.777232915076],
]
).T
read_pars = np.polyfit(read_mag_level, np.log10(read_noise_level), 1)
c1, c2, c3, c4 = (
10 ** (tessmag * star_pars[0] + star_pars[1]),
10 ** (tessmag * zodi_pars[0] + zodi_pars[1]),
10 ** (tessmag * read_pars[0] + read_pars[1]),
sys,
)
return np.sqrt(
c1 ** 2 + (readmod * c2) ** 2 + (zodimod * c3) ** 2 + c4 ** 2
)
def rndm(a, b, g, size=1):
"""Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b"""
r = np.random.random(size=size)
ag, bg = a ** g, b ** g
return (ag + (bg - ag) * r) ** (1.0 / g)
def Fressin13_select_extrap(nselect=1):
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.8-0.1.25, p=0.8-2
p1 = np.zeros(180) + 1
# pot 2 contains rp=1.25-2.0, p=0.8-2
p2 = np.zeros(170) + 2
# pot 3 contains rp=2-4, p=0.8-2
p3 = np.zeros(35) + 3
# pot 4 contains rp=4-6, p=0.8-2
p4 = np.zeros(4) + 4
# pot 5 contains rp=6-22, p=0.8-2
p5 = np.zeros(15) + 5
# pot 6 contains rp=0.8-0.1.25, p=2-3.4
p6 = np.zeros(610) + 6
# pot 7 contains rp=1.25-2.0, p=2-3.4
p7 = np.zeros(740) + 7
# pot 8 contains rp=2-4, p=2-3.4
p8 = np.zeros(180) + 8
# pot 9 contains rp=4-6, p=2-3.4
p9 = np.zeros(6) + 9
# pot 10 contains rp=6-22, p=2-3.4
p10 = np.zeros(67) + 10
# pot 11 contains rp=0.8-0.1.25, p=3.4-5.9
p11 = np.zeros(1720) + 11
# pot 12 contains rp=1.25-2.0, p=3.4-5.9
p12 = np.zeros(1490) + 12
# pot 13 contains rp=2-4, p=3.4-5.9
p13 = np.zeros(730) + 13
# pot 14 contains rp=4-6, p=3.4-5.9
p14 = np.zeros(110) + 14
# pot 15 contains rp=6-22, p=3.4-5.9
p15 = np.zeros(170) + 15
# pot 16 contains rp=0.8-0.1.25, p=5.9-10
p16 = np.zeros(2700) + 16
# pot 17 contains rp=1.25-2.0, p=5.9-10
p17 = np.zeros(2900) + 17
# pot 18 contains rp=2-4, p=5.9-10
p18 = np.zeros(1930) + 18
# pot 19 contains rp=4-6, p=5.9-10
p19 = np.zeros(91) + 19
# pot 20 contains rp=6-22, p=5.9-10
p20 = np.zeros(180) + 20
# pot 21 contains rp=0.8-0.1.25, p=10-17
p21 = np.zeros(2700) + 21
# pot 22 contains rp=1.25-2.0, p=10-17
p22 = np.zeros(4300) + 22
# pot 23 contains rp=2-4, p=10-17
p23 = np.zeros(3670) + 23
# pot 24 contains rp=4-6, p=10-17
p24 = np.zeros(290) + 24
# pot 25 contains rp=6-22, p=10-17
p25 = np.zeros(270) + 25
# pot 26 contains rp=0.8-0.1.25, p=17-29
p26 = np.zeros(2930) + 26
# pot 27 contains rp=1.25-2.0, p=17-29
p27 = np.zeros(4490) + 27
# pot 28 contains rp=2-4, p=17-29
p28 = np.zeros(5290) + 28
# pot 29 contains rp=4-6, p=17-29
p29 = np.zeros(320) + 29
# pot 30 contains rp=6-22, p=17-29
p30 = np.zeros(230) + 30
# pot 31 contains rp=0.8-0.1.25, p=29-50
p31 = np.zeros(4080) + 31
# pot 32 contains rp=1.25-2.0, p=29-50
p32 = np.zeros(5290) + 32
# pot 33 contains rp=2-4, p=29-50
p33 = np.zeros(6450) + 33
# pot 34 contains rp=4-6, p=29-50
p34 = np.zeros(490) + 34
# pot 35 contains rp=6-22, p=29-50
p35 = np.zeros(350) + 35
# pot 36 contains rp=0.8-0.1.25, p=50-85
p36 = np.zeros(3460) + 36
# pot 37 contains rp=1.25-2.0, p=50-85
p37 = np.zeros(3660) + 37
# pot 38 contains rp=2-4, p=50-85
p38 = np.zeros(5250) + 38
# pot 39 contains rp=4-6, p=50-85
p39 = np.zeros(660) + 39
# pot 40 contains rp=6-22, p=50-85
p40 = np.zeros(710) + 40
# pot 36 contains rp=0.8-0.1.25, p=50-85
p41 = np.zeros(3460) + 41
# pot 37 contains rp=1.25-2.0, p=50-85
p42 = np.zeros(3660) + 42
# pot 38 contains rp=2-4, p=50-85
p43 = np.zeros(5250) + 43
# pot 39 contains rp=4-6, p=50-85
p44 = np.zeros(660) + 44
# pot 40 contains rp=6-22, p=50-85
p45 = np.zeros(710) + 45
# pot 36 contains rp=0.8-0.1.25, p=50-85
p46 = np.zeros(3460) + 46
# pot 37 contains rp=1.25-2.0, p=50-85
p47 = np.zeros(3660) + 47
# pot 38 contains rp=2-4, p=50-85
p48 = np.zeros(5250) + 48
# pot 39 contains rp=4-6, p=50-85
p49 = np.zeros(660) + 49
# pot 40 contains rp=6-22, p=50-85
p50 = np.zeros(710) + 50
# pot 36 contains rp=0.8-0.1.25, p=50-85
p51 = np.zeros(3460) + 51
# pot 37 contains rp=1.25-2.0, p=50-85
p52 = np.zeros(3660) + 52
# pot 38 contains rp=2-4, p=50-85
p53 = np.zeros(5250) + 53
# pot 39 contains rp=4-6, p=50-85
p54 = np.zeros(660) + 54
# pot 40 contains rp=6-22, p=50-85
p55 = np.zeros(710) + 55
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
ball_lookup = {
0: [0.0, 0.0, 0.0, 0.0],
1: [0.8, 1.25, 0.8, 2.0],
2: [1.25, 2.0, 0.8, 2.0],
3: [2.0, 4.0, 0.8, 2.0],
4: [4.0, 6.0, 0.8, 2.0],
5: [6.0, 22.0, 0.8, 2.0],
6: [0.8, 1.25, 2.0, 3.4],
7: [1.25, 2.0, 2.0, 3.4],
8: [2.0, 4.0, 2.0, 3.4],
9: [4.0, 6.0, 2.0, 3.4],
10: [6.0, 22.0, 2.0, 3.4],
11: [0.8, 1.25, 3.4, 5.9],
12: [1.25, 2.0, 3.4, 5.9],
13: [2.0, 4.0, 3.4, 5.9],
14: [4.0, 6.0, 3.4, 5.9],
15: [6.0, 22.0, 3.4, 5.9],
16: [0.8, 1.25, 5.9, 10.0],
17: [1.25, 2.0, 5.9, 10.0],
18: [2.0, 4.0, 5.9, 10.0],
19: [4.0, 6.0, 5.9, 10.0],
20: [6.0, 22.0, 5.9, 10.0],
21: [0.8, 1.25, 10.0, 17.0],
22: [1.25, 2.0, 10.0, 17.0],
23: [2.0, 4.0, 10.0, 17.0],
24: [4.0, 6.0, 10.0, 17.0],
25: [6.0, 22.0, 10.0, 17.0],
26: [0.8, 1.25, 17.0, 29.0],
27: [1.25, 2.0, 17.0, 29.0],
28: [2.0, 4.0, 17.0, 29.0],
29: [4.0, 6.0, 17.0, 29.0],
30: [6.0, 22.0, 17.0, 29.0],
31: [0.8, 1.25, 29.0, 50.0],
32: [1.25, 2.0, 29.0, 50.0],
33: [2.0, 4.0, 29.0, 50.0],
34: [4.0, 6.0, 29.0, 50.0],
35: [6.0, 22.0, 29.0, 50.0],
36: [0.8, 1.25, 50.0, 85.0],
37: [1.25, 2.0, 50.0, 85.0],
38: [2.0, 4.0, 50.0, 85.0],
39: [4.0, 6.0, 50.0, 85.0],
40: [6.0, 22.0, 50.0, 85.0],
41: [0.8, 1.25, 50.0, 150.0],
42: [1.25, 2.0, 50.0, 150.0],
43: [2.0, 4.0, 50.0, 150.0],
44: [4.0, 6.0, 50.0, 150.0],
45: [6.0, 22.0, 50.0, 150.0],
46: [0.8, 1.25, 150.0, 270.0],
47: [1.25, 2.0, 150.0, 270.0],
48: [2.0, 4.0, 150.0, 270.0],
49: [4.0, 6.0, 150.0, 270.0],
50: [6.0, 22.0, 150.0, 270.0],
51: [0.8, 1.25, 270.0, 480.0],
52: [1.25, 2.0, 270.0, 480.0],
53: [2.0, 4.0, 270.0, 480.0],
54: [4.0, 6.0, 270.0, 480.0],
55: [6.0, 22.0, 270.0, 480.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
if samp in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]:
# check for giant planets
# if a giant planet than draw power law
radius[i] = rndm(6, 22, -1.7)
else:
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Dressing15_select_extrap(nselect=1):
"""
period bins = 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
"""
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.5-1.0, p=0.5-0.91
p1 = np.zeros(400) + 1
# pot 2 contains rp=1.0-1.5, p=0.5-0.91
p2 = np.zeros(460) + 2
# pot 3 contains rp=1.5-2.0, p=0.5-0.91
p3 = np.zeros(61) + 3
# pot 4 contains rp=2.0-2.5, p=0.5-0.91
p4 = np.zeros(2) + 4
# pot 5 contains rp=2.5-3.0, p=0.5-0.91
p5 = np.zeros(0) + 5
# pot 6 contains rp=3.0-3.5, p=0.5-0.91
p6 = np.zeros(0) + 6
# pot 7 contains rp=3.5-4.0, p=0.5-0.91
p7 = np.zeros(0) + 7
# pot 1 contains rp=0.5-1.0, p=0.91, 1.66
p8 = np.zeros(1500) + 8
# pot 2 contains rp=1.0-1.5, p=0.91, 1.66
p9 = np.zeros(1400) + 9
# pot 3 contains rp=1.5-2.0, p=0.91, 1.66
p10 = np.zeros(270) + 10
# pot 4 contains rp=2.0-2.5, p=0.91, 1.66
p11 = np.zeros(9) + 11
# pot 5 contains rp=2.5-3.0, p=0.91, 1.66
p12 = np.zeros(4) + 12
# pot 6 contains rp=3.0-3.5, p=0.91, 1.66
p13 = np.zeros(6) + 13
# pot 7 contains rp=3.5-4.0, p=0.91, 1.66
p14 = np.zeros(8) + 14
# pot 1 contains rp=0.5-1.0, p=1.66, 3.02
p15 = np.zeros(4400) + 15
# pot 2 contains rp=1.0-1.5, p=1.66, 3.02
p16 = np.zeros(3500) + 16
# pot 3 contains rp=1.5-2.0, p=1.66, 3.02
p17 = np.zeros(1200) + 17
# pot 4 contains rp=2.0-2.5, p=1.66, 3.02
p18 = np.zeros(420) + 18
# pot 5 contains rp=2.5-3.0, p=1.66, 3.02
p19 = np.zeros(230) + 19
# pot 6 contains rp=3.0-3.5, p=1.66, 3.02
p20 = np.zeros(170) + 20
# pot 7 contains rp=3.5-4.0, p=1.66, 3.02
p21 = np.zeros(180) + 21
# pot 1 contains rp=0.5-1.0, p=3.02, 5.49
p22 = np.zeros(5500) + 22
# pot 2 contains rp=1.0-1.5, p=3.02, 5.49
p23 = np.zeros(5700) + 23
# pot 3 contains rp=1.5-2.0, p=3.02, 5.49
p24 = np.zeros(2500) + 24
# pot 4 contains rp=2.0-2.5, p=3.02, 5.49
p25 = np.zeros(1800) + 25
# pot 5 contains rp=2.5-3.0, p=3.02, 5.49
p26 = np.zeros(960) + 26
# pot 6 contains rp=3.0-3.5, p=3.02, 5.49
p27 = np.zeros(420) + 27
# pot 7 contains rp=3.5-4.0, p=3.02, 5.49
p28 = np.zeros(180) + 28
# pot 1 contains rp=0.5-1.0, p=5.49, 10.0
p29 = np.zeros(10000) + 29
# pot 2 contains rp=1.0-1.5, p=5.49, 10.0
p30 = np.zeros(10000) + 30
# pot 3 contains rp=1.5-2.0, p=5.49, 10.0
p31 = np.zeros(6700) + 31
# pot 4 contains rp=2.0-2.5, p=5.49, 10.0
p32 = np.zeros(6400) + 32
# pot 5 contains rp=2.5-3.0, p=5.49, 10.0
p33 = np.zeros(2700) + 33
# pot 6 contains rp=3.0-3.5, p=5.49, 10.0
p34 = np.zeros(1100) + 34
# pot 7 contains rp=3.5-4.0, p=5.49, 10.0
p35 = np.zeros(360) + 35
# pot 1 contains rp=0.5-1.0, p=10.0, 18.2
p36 = np.zeros(12000) + 36
# pot 2 contains rp=1.0-1.5, p=10.0, 18.2
p37 = np.zeros(13000) + 37
# pot 3 contains rp=1.5-2.0, p=10.0, 18.2
p38 = np.zeros(13000) + 38
# pot 4 contains rp=2.0-2.5, p=10.0, 18.2
p39 = np.zeros(9300) + 39
# pot 5 contains rp=2.5-3.0, p=10.0, 18.2
p40 = np.zeros(3800) + 40
# pot 6 contains rp=3.0-3.5, p=10.0, 18.2
p41 = np.zeros(1400) + 41
# pot 7 contains rp=3.5-4.0, p=10.0, 18.2
p42 = np.zeros(510) + 42
# pot 1 contains rp=0.5-1.0, p=18.2, 33.1
p43 = np.zeros(11000) + 43
# pot 2 contains rp=1.0-1.5, p=18.2, 33.1
p44 = np.zeros(16000) + 44
# pot 3 contains rp=1.5-2.0, p=18.2, 33.1
p45 = np.zeros(14000) + 45
# pot 4 contains rp=2.0-2.5, p=18.2, 33.1
p46 = np.zeros(10000) + 46
# pot 5 contains rp=2.5-3.0, p=18.2, 33.1
p47 = np.zeros(4600) + 47
# pot 6 contains rp=3.0-3.5, p=18.2, 33.1
p48 = np.zeros(810) + 48
# pot 7 contains rp=3.5-4.0, p=18.2, 33.1
p49 = np.zeros(320) + 49
# pot 1 contains rp=0.5-1.0, p=33.1, 60.3
p50 = np.zeros(6400) + 50
# pot 2 contains rp=1.0-1.5, p=33.1, 60.3
p51 = np.zeros(6400) + 51
# pot 3 contains rp=1.5-2.0, p=33.1, 60.3
p52 = np.zeros(12000) + 52
# pot 4 contains rp=2.0-2.5, p=33.1, 60.3
p53 = np.zeros(12000) + 53
# pot 5 contains rp=2.5-3.0, p=33.1, 60.3
p54 = np.zeros(5800) + 54
# pot 6 contains rp=3.0-3.5, p=33.1, 60.3
p55 = np.zeros(1600) + 55
# pot 7 contains rp=3.5-4.0, p=33.1, 60.3
p56 = np.zeros(210) + 56
# pot 1 contains rp=0.5-1.0, p=60.3, 110.
p57 = np.zeros(10000) + 57
# pot 2 contains rp=1.0-1.5, p=60.3, 110.
p58 = np.zeros(10000) + 58
# pot 3 contains rp=1.5-2.0, p=60.3, 110.
p59 = np.zeros(8300) + 59
# pot 4 contains rp=2.0-2.5, p=60.3, 110.
p60 = np.zeros(9600) + 60
# pot 5 contains rp=2.5-3.0, p=60.3, 110.
p61 = np.zeros(4200) + 61
# pot 6 contains rp=3.0-3.5, p=60.3, 110.
p62 = np.zeros(1700) + 62
# pot 7 contains rp=3.5-4.0, p=60.3, 110.
p63 = np.zeros(420) + 63
# pot 1 contains rp=0.5-1.0, p=110., 200.
p64 = np.zeros(19000) + 64
# pot 2 contains rp=1.0-1.5, p=110., 200.
p65 = np.zeros(19000) + 65
# pot 3 contains rp=1.5-2.0, p=110., 200.
p66 = np.zeros(10000) + 66
# pot 4 contains rp=2.0-2.5, p=110., 200.
p67 = np.zeros(4500) + 67
# pot 5 contains rp=2.5-3.0, p=110., 200.
p68 = np.zeros(1100) + 68
# pot 6 contains rp=3.0-3.5, p=110., 200.
p69 = np.zeros(160) + 69
# pot 7 contains rp=3.5-4.0, p=110., 200.
p70 = np.zeros(80) + 70
# pot 1 contains rp=0.5-1.0, p=110., 200.
p71 = np.zeros(19000) + 71
# pot 2 contains rp=1.0-1.5, p=110., 200.
p72 = np.zeros(19000) + 72
# pot 3 contains rp=1.5-2.0, p=110., 200.
p73 = np.zeros(10000) + 73
# pot 4 contains rp=2.0-2.5, p=110., 200.
p74 = np.zeros(4500) + 74
# pot 5 contains rp=2.5-3.0, p=110., 200.
p75 = np.zeros(1100) + 75
# pot 6 contains rp=3.0-3.5, p=110., 200.
p76 = np.zeros(160) + 76
# pot 7 contains rp=3.5-4.0, p=110., 200.
p77 = np.zeros(80) + 77
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
p56,
p57,
p58,
p59,
p60,
p61,
p62,
p63,
p64,
p65,
p66,
p67,
p68,
p69,
p70,
p71,
p72,
p73,
p74,
p75,
p76,
p77,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
# 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
ball_lookup = {
1: [0.5, 1.0, 0.5, 0.91],
2: [1.0, 1.5, 0.5, 0.91],
3: [1.5, 2.0, 0.5, 0.91],
4: [2.0, 2.5, 0.5, 0.91],
5: [2.5, 3.0, 0.5, 0.91],
6: [3.0, 3.5, 0.5, 0.91],
7: [3.5, 4.0, 0.5, 0.91],
8: [0.5, 1.0, 0.91, 1.66],
9: [1.0, 1.5, 0.91, 1.66],
10: [1.5, 2.0, 0.91, 1.66],
11: [2.0, 2.5, 0.91, 1.66],
12: [2.5, 3.0, 0.91, 1.66],
13: [3.0, 3.5, 0.91, 1.66],
14: [3.5, 4.0, 0.91, 1.66],
15: [0.5, 1.0, 1.66, 3.02],
16: [1.0, 1.5, 1.66, 3.02],
17: [1.5, 2.0, 1.66, 3.02],
18: [2.0, 2.5, 1.66, 3.02],
19: [2.5, 3.0, 1.66, 3.02],
20: [3.0, 3.5, 1.66, 3.02],
21: [3.5, 4.0, 1.66, 3.02],
22: [0.5, 1.0, 3.02, 5.49],
23: [1.0, 1.5, 3.02, 5.49],
24: [1.5, 2.0, 3.02, 5.49],
25: [2.0, 2.5, 3.02, 5.49],
26: [2.5, 3.0, 3.02, 5.49],
27: [3.0, 3.5, 3.02, 5.49],
28: [3.5, 4.0, 3.02, 5.49],
29: [0.5, 1.0, 5.49, 10.0],
30: [1.0, 1.5, 5.49, 10.0],
31: [1.5, 2.0, 5.49, 10.0],
32: [2.0, 2.5, 5.49, 10.0],
33: [2.5, 3.0, 5.49, 10.0],
34: [3.0, 3.5, 5.49, 10.0],
35: [3.5, 4.0, 5.49, 10.0],
36: [0.5, 1.0, 10.0, 18.2],
37: [1.0, 1.5, 10.0, 18.2],
38: [1.5, 2.0, 10.0, 18.2],
39: [2.0, 2.5, 10.0, 18.2],
40: [2.5, 3.0, 10.0, 18.2],
41: [3.0, 3.5, 10.0, 18.2],
42: [3.5, 4.0, 10.0, 18.2],
43: [0.5, 1.0, 18.2, 33.1],
44: [1.0, 1.5, 18.2, 33.1],
45: [1.5, 2.0, 18.2, 33.1],
46: [2.0, 2.5, 18.2, 33.1],
47: [2.5, 3.0, 18.2, 33.1],
48: [3.0, 3.5, 18.2, 33.1],
49: [3.5, 4.0, 18.2, 33.1],
50: [0.5, 1.0, 33.1, 60.3],
51: [1.0, 1.5, 33.1, 60.3],
52: [1.5, 2.0, 33.1, 60.3],
53: [2.0, 2.5, 33.1, 60.3],
54: [2.5, 3.0, 33.1, 60.3],
55: [3.0, 3.5, 33.1, 60.3],
56: [3.5, 4.0, 33.1, 60.3],
57: [0.5, 1.0, 60.3, 110.0],
58: [1.0, 1.5, 60.3, 110.0],
59: [1.5, 2.0, 60.3, 110.0],
60: [2.0, 2.5, 60.3, 110.0],
61: [2.5, 3.0, 60.3, 110.0],
62: [3.0, 3.5, 60.3, 110.0],
63: [3.5, 4.0, 60.3, 110.0],
64: [0.5, 1.0, 110.0, 200.0],
65: [1.0, 1.5, 110.0, 200.0],
66: [1.5, 2.0, 110.0, 200.0],
67: [2.0, 2.5, 110.0, 200.0],
68: [2.5, 3.0, 110.0, 200.0],
69: [3.0, 3.5, 110.0, 200.0],
70: [3.5, 4.0, 110.0, 200.0],
71: [0.5, 1.0, 200.0, 365.0],
72: [1.0, 1.5, 200.0, 365.0],
73: [1.5, 2.0, 200.0, 365.0],
74: [2.0, 2.5, 200.0, 365.0],
75: [2.5, 3.0, 200.0, 365.0],
76: [3.0, 3.5, 200.0, 365.0],
77: [3.5, 4.0, 200.0, 365.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Petigura18_select(nselect=1):
# create a pot for pedigura numbers (balls)
balls = np.array([])
p1 = np.zeros(2) + 1
p2 = np.zeros(8) + 2
p3 = np.zeros(21) + 3
p4 = np.zeros(8) + 4
p5 = np.zeros(24) + 5
p6 = np.zeros(52) + 6
p7 = np.zeros(77) + 7
p8 = np.zeros(5) + 8
p9 = np.zeros(26) + 9
p10 = np.zeros(24) + 10
p11 = np.zeros(145) + 11
p12 = np.zeros(259) + 12
p13 = np.zeros(5) + 13
p14 = np.zeros(12) + 14
p15 = np.zeros(18) + 15
p16 = np.zeros(17) + 16
p17 = np.zeros(38) + 17
p18 = np.zeros(168) + 18
p19 = np.zeros(12) + 19
p20 = np.zeros(8) + 20
p21 = np.zeros(25) + 21
p22 = np.zeros(56) + 22
p23 = np.zeros(53) + 23
p24 = np.zeros(78) + 24
p25 = np.zeros(84) + 25
p26 = np.zeros(78) + 26
p27 = np.zeros(6) + 27
p28 = np.zeros(8) + 28
p29 = np.zeros(94) + 29
p30 = np.zeros(180) + 30
p31 = np.zeros(185) + 31
p32 = np.zeros(258) + 32
p33 = np.zeros(275) + 33
p34 = np.zeros(312) + 34
p35 = np.zeros(225) + 35
p36 = np.zeros(8) + 36
p37 = np.zeros(77) + 37
p38 = np.zeros(138) + 38
p39 = np.zeros(423) + 39
p40 = np.zeros(497) + 40
p41 = np.zeros(667) + 41
p42 = np.zeros(475) + 42
p43 = np.zeros(270) + 43
p44 = np.zeros(147) + 44
p45 = np.zeros(8) + 45
p46 = np.zeros(34) + 46
p47 = np.zeros(125) + 47
p48 = np.zeros(202) + 48
p49 = np.zeros(279) + 49
p50 = np.zeros(261) + 50
p51 = np.zeros(251) + 51
p52 = np.zeros(186) + 52
p53 = np.zeros(360) + 53
p54 = np.zeros(393) + 54
p55 = np.zeros(12) + 55
p56 = np.zeros(36) + 56
p57 = np.zeros(141) + 57
p58 = np.zeros(263) + 58
p59 = np.zeros(450) + 59
p60 = np.zeros(350) + 60
p61 = np.zeros(287) + 61
p62 = np.zeros(249) + 62
p63 = np.zeros(12) + 63
p64 = np.zeros(52) + 64
p65 = np.zeros(128) + 65
p66 = np.zeros(315) + 66
p67 = np.zeros(205) + 67
p68 = np.zeros(447) + 68
p69 = np.zeros(8) + 69
p70 = np.zeros(50) + 70
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
p56,
p57,
p58,
p59,
p60,
p61,
p62,
p63,
p64,
p65,
p66,
p67,
p68,
p69,
p70,
]
ball_lookup = {
0: [0.0, 0.0, 0.0, 0.0],
1: [11.31, 16.00, 1.00, 1.78],
2: [11.31, 16.00, 1.78, 3.16],
3: [11.31, 16.00, 3.16, 5.62],
4: [11.31, 16.00, 5.62, 10.00],
5: [11.31, 16.00, 31.62, 56.23],
6: [11.31, 16.00, 100.00, 177.83],
7: [11.31, 16.00, 177.83, 316.23],
8: [8.00, 11.31, 3.16, 5.62],
9: [8.00, 11.31, 17.78, 31.62],
10: [8.00, 11.31, 31.62, 56.23],
11: [8.00, 11.31, 100.00, 177.83],
12: [8.00, 11.31, 177.83, 316.23],
13: [5.66, 8.00, 3.16, 5.62],
14: [5.66, 8.00, 5.62, 10.00],
15: [5.66, 8.00, 10.00, 17.78],
16: [5.66, 8.00, 17.78, 31.62],
17: [5.66, 8.00, 31.62, 56.23],
18: [5.66, 8.00, 177.83, 316.23],
19: [4.00, 5.66, 3.16, 5.62],
20: [4.00, 5.66, 5.62, 10.00],
21: [4.00, 5.66, 10.00, 17.78],
22: [4.00, 5.66, 17.78, 31.62],
23: [4.00, 5.66, 31.62, 56.23],
24: [4.00, 5.66, 56.23, 100.00],
25: [4.00, 5.66, 100.00, 177.83],
26: [4.00, 5.66, 177.83, 316.23],
27: [2.83, 4.00, 1.78, 3.16],
28: [2.83, 4.00, 3.16, 5.62],
29: [2.83, 4.00, 5.62, 10.00],
30: [2.83, 4.00, 10.00, 17.78],
31: [2.83, 4.00, 17.78, 31.62],
32: [2.83, 4.00, 31.62, 56.23],
33: [2.83, 4.00, 56.23, 100.00],
34: [2.83, 4.00, 100.00, 177.83],
35: [2.83, 4.00, 177.83, 316.23],
36: [2.00, 2.83, 1.78, 3.16],
37: [2.00, 2.83, 3.16, 5.62],
38: [2.00, 2.83, 5.62, 10.00],
39: [2.00, 2.83, 10.00, 17.78],
40: [2.00, 2.83, 17.78, 31.62],
41: [2.00, 2.83, 31.62, 56.23],
42: [2.00, 2.83, 56.23, 100.00],
43: [2.00, 2.83, 100.00, 177.83],
44: [2.00, 2.83, 177.83, 316.23],
45: [1.41, 2.00, 1.00, 1.78],
46: [1.41, 2.00, 1.78, 3.16],
47: [1.41, 2.00, 3.16, 5.62],
48: [1.41, 2.00, 5.62, 10.00],
49: [1.41, 2.00, 10.00, 17.78],
50: [1.41, 2.00, 17.78, 31.62],
51: [1.41, 2.00, 31.62, 56.23],
52: [1.41, 2.00, 56.23, 100.00],
53: [1.41, 2.00, 100.00, 177.83],
54: [1.41, 2.00, 177.83, 316.23],
55: [1.00, 1.41, 1.00, 1.78],
56: [1.00, 1.41, 1.78, 3.16],
57: [1.00, 1.41, 3.16, 5.62],
58: [1.00, 1.41, 5.62, 10.00],
59: [1.00, 1.41, 10.00, 17.78],
60: [1.00, 1.41, 17.78, 31.62],
61: [1.00, 1.41, 31.62, 56.23],
62: [1.00, 1.41, 56.23, 100.00],
63: [0.71, 1.00, 1.00, 1.78],
64: [0.71, 1.00, 1.78, 3.16],
65: [0.71, 1.00, 3.16, 5.62],
66: [0.71, 1.00, 5.62, 10.00],
67: [0.71, 1.00, 10.00, 17.78],
68: [0.71, 1.00, 17.78, 31.62],
69: [0.50, 0.71, 1.00, 1.78],
70: [0.50, 0.71, 1.78, 3.16],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
if samp in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
# check for giant planets
# if a giant planet than draw power law
radius[i] = rndm(8, 16, -1.7)
else:
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def per2ars(per, mstar, rstar):
per_SI = per * 86400.0
mass_SI = mstar * msun
a3 = per_SI ** 2 * G * mass_SI / (4 * np.pi ** 2)
return a3 ** (1.0 / 3.0) / (rstar * rsun)
def get_duration(per, ars, cosi=0.0, b=0, rprs=0.0):
"""
returns the transit duration in days
"""
part1 = per / np.pi
part2 = 1.0 / ars
part3 = np.sqrt((1 + rprs) ** 2 - b ** 2)
part4 = np.sqrt(1 - cosi ** 2)
duration = part1 * np.arcsin(part2 * part3 / part4)
return duration
def get_transit_depth(Prad, rstar_solar):
"""
returns transit depth in ppm
"""
tdep = (Prad * 0.009155 / rstar_solar) ** 2 * 1.0e6 # ppm
return tdep
def get_rprs(Prad, rstar_solar):
return (Prad * 0.009155) / rstar_solar
def make_allplanets_df_vec_extrap(df, starid_zp):
# lets refector the above code to make it array operations
totalRows = df.loc[:, "Nplanets"].sum()
df.loc[:, "planetRadius"] = pd.Series()
df.loc[:, "planetPeriod"] = pd.Series()
df.loc[:, "starID"] = pd.Series()
radper_m = Dressing15_select_extrap(totalRows)
radper_fgk = Petigura18_select(totalRows)
# we need an array of indices
rowIdx = np.repeat(np.arange(df.shape[0]),
|
np.array(df.Nplanets.values)
|
numpy.array
|
from scipy.optimize import linear_sum_assignment
import numpy as np
import copy
import math
import random
class RuleFoundation():
def __init__(self, n_agent, n_thread, space, mcv):
self.n_thread = n_thread
self.n_agent = n_agent
self.handler = [None for _ in range(self.n_thread)]
assert n_agent == 10
def interact_with_env(self, team_intel):
info = team_intel['Latest-Team-Info']
done = team_intel['Env-Suffered-Reset']
step_cnt = team_intel['Current-Obs-Step']
action_list = []
for thread in range(self.n_thread):
act_dict = {'detector_act':None, 'fighter_act':None}
if done[thread]:
self.handler[thread] = RuleAgent()
self.handler[thread].set_map_info(1000, 1000, 0, 10)
act_dict['detector_act'], act_dict['fighter_act'] = self.handler[thread].get_action(obs_dict=info[thread], step_cnt=step_cnt[thread])
action_list.append(act_dict)
pass
# $n_thread.${}
return action_list, None
class RuleAgent():
def __init__(self): # 初始化接口
self.obs_ind = 'raw' # 状态信息形式
self.tar = 0
self.N = 0
self.angle=0
self.color_flag=True
self.formation_flag=4
self.star_back=True
self.missile_long = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
self.missile_short = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
# missile_long[i][0]记录的是敌方第i+1个单元身上的远程炮弹数量
# missile_long[i][k]记录的是敌方第i+1个单元身上的第k个远程炮的计时器
self.beyond_flag = False
self.leader_id = 4
self.tar_pos = np.full((4,2,2), 0)
# red
self.tar_pos[0][0][0] = 36
self.tar_pos[0][1][0] = 400
self.tar_pos[1][0][0] = 100
self.tar_pos[1][1][0] = 400
self.tar_pos[2][0][0]= 700
self.tar_pos[2][1][0]= 500
self.tar_pos[3][0][0] = 500
self.tar_pos[3][1][0] = 700
# blue
self.tar_pos[0][0][1] = 964
self.tar_pos[0][1][1] = 400
self.tar_pos[1][0][1] = 900
self.tar_pos[1][1][1] = 400
self.tar_pos[2][0][1]= 300
self.tar_pos[2][1][1]= 500
self.tar_pos[3][0][1] = 500
self.tar_pos[3][1][1] = 700
# type_data(攻击距离,发起攻击时要给敌方的索引加几,炮弹类型在self_info[j,?]中的索引)
self.long_data = (120, 1, 1)
self.short_data = (50, 11, 2)
def init_param(self):
self.tar = 0
self.N = 0
self.angle=0
self.color_flag=True
self.formation_flag=4
self.star_back=True
self.missile_long = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
self.missile_short = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
# missile_long[i][0]记录的是敌方第i+1个单元身上的远程炮弹数量
# missile_long[i][k]记录的是敌方第i+1个单元身上的第k个远程炮的计时器
self.beyond_flag = False
self.leader_id = 4
def dist(self, obs_dict, i, j):
adv_obs = obs_dict['fighter'][0]['adv_obs']
# 计算距离
x = adv_obs[i][2 * j]
y = adv_obs[i][2 * j + 1]
distance = x ** 2 + y ** 2
distance = math.sqrt(distance)
return distance
def set_map_info(self, size_x, size_y, detector_num, fighter_num): # 读取地图信息
self.size_x = size_x
self.size_y = size_y
self.detector_num = detector_num
self.fighter_num = fighter_num # 根据需要自行选择函数实现形式
self.leader_id = 4
def _bipartite_min_dists(self, dists):
ri, ci = linear_sum_assignment(dists)
return ri, ci
def tar_judge(self,adv_obs):
tar_exist = False
for i in range(self.fighter_num):
for j in range(self.fighter_num):
if adv_obs[i][j*2]!=-1000 and adv_obs[i][j*2+1]!=-1000:
tar_exist = True
break
break
return tar_exist
def sum_alive(self,alive_status):
alive_num = 0
for i in range(self.fighter_num):
if alive_status[i]:
alive_num+=1
return alive_num
def tar_assign(self,alive_status,adv_obs):
fighter_action = np.full((self.fighter_num,4),0)
for i in range(self.fighter_num):
# 判断该攻击单元是否存活
if not alive_status[i]:
continue
min_dis = 1000 ** 2 + 1000 ** 2
for j in range(self.fighter_num): # 记录离我方单元最近的敌方单位id及pos
x = adv_obs[i][2 * j]
y = adv_obs[i][2 * j + 1]
dis = x ** 2 + y ** 2
if dis < min_dis:
min_dis = dis
min_id = j
theta_start = np.arctan2(adv_obs[i][2*min_id+1], adv_obs[i][2*min_id])
if theta_start < 0:
theta_start += 2 * np.pi
course = (int)((theta_start / (2 * np.pi)) * 360)
fighter_action[i][0] = course
return fighter_action
def formation(self,alive_status,self_pos,self_info,step_cnt,formation_flag): # 编队
fighter_action = np.full((self.fighter_num, 4), 0)
if self.color == 'red':
if step_cnt % 8 == 0 or step_cnt % 9 == 0:
for i in range(self.fighter_num):
fighter_action[i][0] = (self_info[i][0] + 120) % 360
return fighter_action
else:
if step_cnt % 8 == 0 or step_cnt % 9 == 0:
for i in range(self.fighter_num):
if self_info[i][0]-120<0:
fighter_action[i][0] = self_info[i][0] -120 + 360
else:
fighter_action[i][0] = self_info[i][0] - 120
return fighter_action
# 挑选领航者
if not alive_status[self.leader_id]: # 领航者死亡
for i in range(self.fighter_num):
# 挑选存活单元作为领航者
if alive_status[i]:
self.leader_id = i
break
# 设置默认航向
if self.color == 'red':
default_course = 0
else:
default_course = 180
start_offset = 100 # 半圆大小3
# 确定领航者的航迹
for y in range(self.fighter_num):
if not alive_status[y]:
continue
if y == self.leader_id:
if self.star_back:
if self.color == 'red':
if self_pos[self.leader_id][0] > 50 :
fighter_action[self.leader_id][0] = default_course+180
else :
fighter_action[self.leader_id][0] = default_course
self.star_back=False
else :
if self_pos[self.leader_id][0] < 950:
fighter_action[self.leader_id][0] = default_course - 180
else:
self.star_back = False
fighter_action[self.leader_id][0] = default_course
else :
if self.color=='red' :
# 领航者位置到达目标位置
if self_pos[self.leader_id][0] == self.tar_pos[self.tar][0][0] and self_pos[self.leader_id][1] == self.tar_pos[self.tar][1][0]:
self.tar = self.tar + 1
else:
theta_leader = np.arctan2(self.tar_pos[self.tar][1][0] - self_pos[self.leader_id][1],self.tar_pos[self.tar][0][0] - self_pos[self.leader_id][0])
if theta_leader < 0:
theta_leader += 2 * np.pi
course = (theta_leader / (2 * np.pi)) * 360
if 90 < course < 180 or 270 < course < 360:
course = math.floor(course)
else:
course = math.ceil(course)
fighter_action[self.leader_id][0] = course
if self.tar == 4:
self.tar = 0
else :
if self_pos[self.leader_id][0] == self.tar_pos[self.tar][0][1] and self_pos[self.leader_id][1] == self.tar_pos[self.tar][1][1]:
self.tar = self.tar + 1
else:
theta_leader = np.arctan2(self.tar_pos[self.tar][1][1] - self_pos[self.leader_id][1],self.tar_pos[self.tar][0][1] - self_pos[self.leader_id][0])
if theta_leader < 0:
theta_leader += 2 * np.pi
course = (theta_leader / (2 * np.pi)) * 360
if 90 < course < 180 or 270 < course < 360:
course = math.floor(course)
else:
course = math.ceil(course)
fighter_action[self.leader_id][0] = course
if self.tar == 4 :
self.tar = 0
#print(course)
# 确定跟随者的航迹
else:
if formation_flag == 1: ##圆形编队
fighter_live_num_list = []
for fighter_live_num in range(self.fighter_num):
if alive_status[fighter_live_num]:
fighter_live_num_list.append(fighter_live_num)
angle = (int)(360 / (len(fighter_live_num_list) - 1))
expected_poses_patrol = []
leader_position_patrol = np.array([self_pos[self.leader_id][0], self_pos[self.leader_id][1]]) # 领航者的位置
for i in range(len(fighter_live_num_list)):
if fighter_live_num_list[i] != self.leader_id:
if fighter_live_num_list[i] > self.leader_id:
expected_poses_patrol.append(np.array([leader_position_patrol + start_offset * np.array([np.cos(angle * (i - 1) * np.pi / 180),np.sin(angle * (i - 1) * np.pi / 180)])]))
else:
expected_poses_patrol.append([leader_position_patrol + start_offset * np.array([np.cos(angle * i * np.pi / 180), np.sin(angle * i * np.pi / 180)])])
dists_patrol = np.array([[np.linalg.norm(np.array([self_pos[i][0], self_pos[i][1]]) - pos) for pos in expected_poses_patrol] for i in range(len(fighter_live_num_list)) if fighter_live_num_list[i] != self.leader_id])
ri, ci = self._bipartite_min_dists(dists_patrol)
for i in range(len(fighter_live_num_list)):
if y == fighter_live_num_list[i]:
if y > self.leader_id:
expected_poses_for_it = expected_poses_patrol[ci[i - 1]]
else:
expected_poses_for_it = expected_poses_patrol[ci[i]]
break
relative_value_patrol = expected_poses_for_it - np.array([self_pos[y][0], self_pos[y][1]])
theta_patrol = np.arctan2(relative_value_patrol[0][1], relative_value_patrol[0][0])
if theta_patrol < 0:
theta_patrol += 2 * np.pi
course = (int)((theta_patrol / (2 * np.pi)) * 360)
fighter_action[y][0] = course
elif formation_flag == 2: ##半圆编队
y_width = 60.0
y_offset = 120
if self.color == 'red':
x_offset = -120.0
else:
x_offset = 120.0
##确定期望位置 这个很关键
expected_poses = []
leader_position = np.array([self_pos[self.leader_id][0],self_pos[self.leader_id][1]])
for i in range(self.fighter_num - 1):
if i == 0:
temp_position = [leader_position + np.array([0.0, y_width])]
expected_poses.append(temp_position)
elif i == 1:
temp_position = [leader_position + np.array([0.0, 2 * y_width])]
expected_poses.append(temp_position)
elif i == 2:
temp_position = [leader_position + np.array([0.0, -y_width])]
expected_poses.append(temp_position)
elif i == 3:
temp_position = [leader_position + np.array([0.0, -2 * y_width])]
expected_poses.append(temp_position)
elif i == 4:
temp_position = [leader_position + np.array(
[x_offset * np.cos(60 * np.pi / 180), -y_offset * np.sin(60 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 5:
temp_position = [leader_position + np.array(
[x_offset * np.cos(30 * np.pi / 180), -y_offset * np.sin(30 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 6:
temp_position = [leader_position + np.array(
[x_offset * np.cos(0 * np.pi / 180), y_offset * np.sin(0 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 7:
temp_position = [leader_position + np.array(
[x_offset * np.cos(30 * np.pi / 180), y_offset * np.sin(30 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 8:
temp_position = [leader_position + np.array(
[x_offset * np.cos(60 * np.pi / 180), y_offset * np.sin(60 * np.pi / 180)])]
expected_poses.append(temp_position)
dists = np.array([[np.linalg.norm(np.array([self_pos[i][0],self_pos[i][1]]) - pos) for pos in expected_poses] for i in range(self.fighter_num) if i != self.leader_id])
ri, ci = self._bipartite_min_dists(dists)
if y <= self.leader_id:
ci_v1 = y
else:
ci_v1 = y - 1
relative_value = expected_poses[ci[ci_v1]] - np.array([self_pos[y][0],self_pos[y][1]])
theta_start =
|
np.arctan2(relative_value[0][1], relative_value[0][0])
|
numpy.arctan2
|
# -*- coding: utf-8 -*-
"""
cante100 Loader
.. admonition:: Dataset Info
:class: dropdown
The cante100 dataset contains 100 tracks taken from the COFLA corpus. We defined 10 style
families of which 10 tracks each are included. Apart from the style family, we manually
annotated the sections of the track in which the vocals are present. In addition, we
provide a number of low-level descriptors and the fundamental frequency corresponding to
the predominant melody for each track. The meta-information includes editoral meta-data
and the musicBrainz ID.
Total tracks: 100
cante100 audio is only available upon request. To download the audio request access in
this link: https://zenodo.org/record/1324183. Then
unzip the audio into the cante100 general dataset folder for the rest of annotations
and files.
Audio specifications:
- Sampling frequency: 44.1 kHz
- Bit-depth: 16 bit
- Audio format: .mp3
cante100 dataset has spectrogram available, in csv format. spectrogram is available to download
without request needed, so at first instance, cante100 loader uses the spectrogram of the tracks.
The available annotations are:
- F0 (predominant melody)
- Automatic transcription of notes (of singing voice)
CANTE100 LICENSE (COPIED FROM ZENODO PAGE)
.. code-block:: latex
The provided datasets are offered free of charge for internal non-commercial use.
We do not grant any rights for redistribution or modification. All data collections were gathered
by the COFLA team.
© COFLA 2015. All rights reserved.
For more details, please visit: http://www.cofla-project.com/?page_id=134
"""
import csv
import os
import logging
import xml.etree.ElementTree as ET
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import annotations
BIBTEX = """@dataset{nadine_kroher_2018_1322542,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {cante100 Metadata},
month = jul,
year = 2018,
publisher = {Zenodo},
version = {1.0},
doi = {10.5281/zenodo.1322542},
url = {https://doi.org/10.5281/zenodo.1322542}
},
@dataset{nadine_kroher_2018_1324183,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {cante100 Audio},
month = jul,
year = 2018,
publisher = {Zenodo},
version = {1.0},
doi = {10.5281/zenodo.1324183},
url = {https://doi.org/10.5281/zenodo.1324183}
}
"""
REMOTES = {
"spectrogram": download_utils.RemoteFileMetadata(
filename="cante100_spectrum.zip",
url="https://zenodo.org/record/1322542/files/cante100_spectrum.zip?download=1",
checksum="0b81fe0fd7ab2c1adc1ad789edb12981", # the md5 checksum
destination_dir="cante100_spectrum", # relative path for where to unzip the data, or None
),
"melody": download_utils.RemoteFileMetadata(
filename="cante100midi_f0.zip",
url="https://zenodo.org/record/1322542/files/cante100midi_f0.zip?download=1",
checksum="cce543b5125eda5a984347b55fdcd5e8", # the md5 checksum
destination_dir="cante100midi_f0", # relative path for where to unzip the data, or None
),
"notes": download_utils.RemoteFileMetadata(
filename="cante100_automaticTranscription.zip",
url="https://zenodo.org/record/1322542/files/cante100_automaticTranscription.zip?download=1",
checksum="47fea64c744f9fe678ae5642a8f0ee8e", # the md5 checksum
destination_dir="cante100_automaticTranscription", # relative path for where to unzip the data, or None
),
"metadata": download_utils.RemoteFileMetadata(
filename="cante100Meta.xml",
url="https://zenodo.org/record/1322542/files/cante100Meta.xml?download=1",
checksum="6cce186ce77a06541cdb9f0a671afb46", # the md5 checksum
destination_dir=None, # relative path for where to unzip the data, or None
),
"README": download_utils.RemoteFileMetadata(
filename="cante100_README.txt",
url="https://zenodo.org/record/1322542/files/cante100_README.txt?download=1",
checksum="184209b7e7d816fa603f0c7f481c0aae", # the md5 checksum
destination_dir=None, # relative path for where to unzip the data, or None
),
}
DOWNLOAD_INFO = """
This loader is designed to load the spectrum, as it is available for download.
However, the loader supports audio as well. Unfortunately the audio files of the
cante100 dataset are not available for free download, but upon request. However,
you can request de audio in both links here:
==> http://www.cofla-project.com/?page_id=208
==> https://zenodo.org/record/1324183
Then, locate the downloaded the cante100audio folder like this:
> cante100/
> cante100_spectrum/
... (rest of the annotation folders)
> cante100audio/
Remember to locate the cante100 folder to {}
"""
LICENSE_INFO = """
The provided datasets are offered free of charge for internal non-commercial use.
We do not grant any rights for redistribution or modification. All data collections
were gathered by the COFLA team. COFLA 2015. All rights reserved.
"""
def _load_metadata(data_home):
metadata_path = os.path.join(data_home, "cante100Meta.xml")
if not os.path.exists(metadata_path):
logging.info(
"Metadata file {} not found.".format(metadata_path)
+ "You can download the metadata file for cante100 "
+ "by running cante100.download()"
)
return None
tree = ET.parse(metadata_path)
root = tree.getroot()
# ids
indexes = []
for child in root:
index = child.attrib.get("id")
if len(index) == 1:
index = "00" + index
indexes.append(index)
continue
if len(index) == 2:
index = "0" + index
indexes.append(index)
continue
else:
indexes.append(index)
# musicBrainzID
identifiers = []
for ident in root.iter("musicBrainzID"):
identifiers.append(ident.text)
# artist
artists = []
for artist in root.iter("artist"):
artists.append(artist.text)
# titles
titles = []
for title in root.iter("title"):
titles.append(title.text)
# releases
releases = []
for release in root.iter("anthology"):
releases.append(release.text)
# duration
durations = []
minutes = []
for minute in root.iter("duration_m"):
minutes.append(float(minute.text) * 60)
seconds = []
for second in root.iter("duration_s"):
seconds.append(float(second.text))
for i in np.arange(len(minutes)):
durations.append(minutes[i] + seconds[i])
metadata = dict()
metadata["data_home"] = data_home
for i, j in zip(indexes, range(len(artists))):
metadata[i] = {
"musicBrainzID": identifiers[j],
"artist": artists[j],
"title": titles[j],
"release": releases[j],
"duration": durations[j],
}
return metadata
DATA = core.LargeData("cante100_index.json", _load_metadata)
class Track(core.Track):
"""cante100 track class
Args:
track_id (str): track id of the track
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets/cante100`
Attributes:
track_id (str): track id
identifier (str): musicbrainz id of the track
artist (str): performing artists
title (str): title of the track song
release (str): release where the track can be found
duration (str): duration in seconds of the track
Cached Properties:
melody (F0Data): annotated melody
notes (NoteData): annotated notes
"""
def __init__(self, track_id, data_home):
if track_id not in DATA.index["tracks"]:
raise ValueError("{} is not a valid track ID in Example".format(track_id))
self.track_id = track_id
self._data_home = data_home
self._track_paths = DATA.index["tracks"][track_id]
self.audio_path = os.path.join(self._data_home, self._track_paths["audio"][0])
self.spectrogram_path = os.path.join(
self._data_home, self._track_paths["spectrum"][0]
)
self.f0_path = os.path.join(self._data_home, self._track_paths["f0"][0])
self.notes_path = os.path.join(self._data_home, self._track_paths["notes"][0])
metadata = DATA.metadata(data_home=data_home)
if metadata is not None and track_id in metadata:
self._track_metadata = metadata[track_id]
else:
self._track_metadata = {
"musicBrainzID": None,
"artist": None,
"title": None,
"release": None,
"duration": None,
}
self.identifier = self._track_metadata["musicBrainzID"]
self.artist = self._track_metadata["artist"]
self.title = self._track_metadata["title"]
self.release = self._track_metadata["release"]
self.duration = self._track_metadata["duration"]
@property
def audio(self):
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
@property
def spectrogram(self):
"""spectrogram of The track's audio
Returns:
(np.ndarray): spectrogram
"""
return load_spectrogram(self.spectrogram_path)
@core.cached_property
def melody(self):
return load_melody(self.f0_path)
@core.cached_property
def notes(self):
return load_notes(self.notes_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
spectrogram_path=self.spectrogram_path,
f0_data=[(self.melody, "pitch_contour")],
note_data=[(self.notes, "note_hz")],
metadata=self._track_metadata,
)
def load_spectrogram(spectrogram_path):
"""Load a cante100 dataset spectrogram file.
Args:
spectrogram_path (str): path to audio file
Returns:
np.ndarray: spectrogram
"""
if not os.path.exists(spectrogram_path):
raise IOError("spectrogram_path {} does not exist".format(spectrogram_path))
parsed_spectrogram =
|
np.genfromtxt(spectrogram_path, delimiter=" ")
|
numpy.genfromtxt
|
import numpy as np
from scipy.integrate import quad
from scipy.linalg import toeplitz
from bayes_drt.utils import rel_round, is_loguniform
def get_basis_func(basis):
"Generate basis function"
# y = ln (tau/tau_m)
if basis == 'gaussian':
def phi(y, epsilon):
return np.exp(-(epsilon * y) ** 2)
elif basis == 'Cole-Cole':
def phi(y, epsilon):
return (1 / (2 * np.pi)) * np.sin((1 - epsilon) * np.pi) / (
np.cosh(epsilon * y) - np.cos((1 - epsilon) * np.pi))
elif basis == 'Zic':
def phi(y, epsilon):
# epsilon unused, included only for compatibility
return 2 *
|
np.exp(y)
|
numpy.exp
|
"""Helper classes and functions with RTOG studies.
"""
import random
import pandas as pd
import numpy as np
import pickle
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import pint
# Constants defining variable and file parsing
from rtog_constants import gcp_baseline_paths, rtog_endpoints, rtog_binary_mapping, rtog_unknown_class_X
from rtog_constants import rtog_default_class_y, rtog_text_fields, rtog_field_mapping, rtog_categorical_fields
# Functions allowing RTOG data manipulation
from rtog_constants import is_categorical, merge, serum_values_to_ng_dl
def rtog_from_study_number(study_number, create_endpoints=True, standardize=False):
"""Helper function. Loads an RTOG object given the study number (str)."""
study_path = gcp_baseline_paths[study_number]
rtog = RTOG(filename=study_path, study_number=study_number, file_type='excel', create_endpoints=create_endpoints)
if standardize:
rtog.standardize_rx()
rtog.standardize_race()
rtog.standardize_gleason_scores()
rtog.standardize_tstage()
rtog.standardize_pelvic_rt()
rtog.standardize_prostate_dose()
rtog.standardize_rt_complete()
rtog.standardize_biochemical_failure()
rtog.standardize_disease_specific_survival()
rtog.cause_of_death()
# rtog_object.standardize_baseline_serum() # Note: this line takes a long time to run, due to unit conversions. Also Osama said the data is too noisy to use.
rtog.standardize_unknown_values_in_predictor_variables() # note: this must be done after standardize_rt_complete, bc that re-sets some unknown vars. This replaces the 'unknown' classes with nans, so that boosting can intelligently impute.
print("Loaded RTOG {}, Standardized={}".format(study_number, standardize))
return rtog
class RTOG(object):
def __init__(self, filename=None, study_number=None, file_type="excel", create_endpoints=True):
self.filename = filename
self.df = None
self.study_number = study_number
# Load Endpoints, Default Classes (for y), and Unknown Classes (for X).
if self.study_number in rtog_endpoints:
self.endpoints = rtog_endpoints[study_number]
if self.study_number in rtog_default_class_y:
self.default_class_y = rtog_default_class_y[study_number]
if self.study_number in rtog_unknown_class_X:
self.unknown_class_X = rtog_unknown_class_X[study_number]
# Load Data.
if self.filename is not None:
if file_type == "excel":
self.df = pd.read_excel(filename)
elif file_type == "csv":
self.df = pd.read_csv(filename, index_col=0)
self._field_fix()
self.table_sort()
# Study-specific additional derived endpoints get hardcoded here
if study_number == '9202':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9202/All_RT_Data_9202.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
self.df_rt.rename({'pelvis_does' : 'pelvis_dose'}, axis='columns', inplace=True)
elif study_number == '9413': #note: data lacks disease specific survival
pass
elif study_number == '9408':
pass
elif study_number == '9910':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9910/Radiation_treatment_9910.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
elif study_number == "0126":
# Add Serum info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 0126/All_serum_testosteron_0126.xlsx"
self.df_serum = pd.read_excel(gcp_path)
self.df_serum.columns = self.df_serum.columns.str.lower()
else:
pass
# Replace nans with defaults in endpoint fields
self.df = self.df.fillna(self.default_class_y)
if create_endpoints:
for timeframe in [5,10,15,25]:
self.add_distant_met_Nyr_endpoint(timeframe)
self.add_biochemical_failure_Nyr_endpoint(timeframe)
self.add_disease_specific_survival_Nyr_endpoint(timeframe)
self.add_survival_Nyr_endpoint(timeframe)
def _repr_html_(self):
return self.df._repr_html_()
def __getitem__(self, columns):
if type(columns) == str:
columns = [columns]
new_rtog = self.copy()
new_rtog.df = new_rtog.df[columns]
return new_rtog
def _field_fix(self):
"""Fixes field names for uniformity and typos. Determined in rtog_constants.py
"""
self.df = self.df.rename(columns=str.lower)
self.df = self.df.rename(rtog_field_mapping, axis='columns')
def table_sort(self):
"""Sorts rows and columns in ascending order.
"""
self.df = self.df.sort_index()
self.df = self.df.sort_index(axis=1)
def add_biochemical_failure_Nyr_endpoint(self, years):
"""Adds column 'biochemical_failure_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death without failure)
"""
field_name = 'biochemical_failure'
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Does not have a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == "0126":
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
else:
raise ValueError("The failure value for biochemical_failure is not set for this study: {}".format(self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for f, fy in zip(self.df[field_name], self.df[field_name_years]):
if f == 0: # Default class for biochemical_failure is 0. Same for biochemical_failure_5yr.
new_column_vals.append(0)
if f == 2:
new_column_vals.append(2)
if f == 1:
assert ~np.isnan(fy), "Found biochemical_failure=1, with biochemical_failure_years=nan. Impossible. See rtog {}".format(
self.study_number)
if fy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_disease_specific_survival_Nyr_endpoint(self, years):
"""Adds column 'disease_specific_survival_Nyr' to self.df
Indicates if the patient has lived free of prostate cancer within N years.
Note: Contrast this with disease_free_survival, which means the patient has lived free of any disease.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'disease_specific_survival'
if self.study_number == '9202':
failure_outside_timeframe_value = 2
# field_name_years = "survival_years" # Stephanie confirmed we can use this value.
elif self.study_number == '9408':
failure_outside_timeframe_value = 2
# field_name_years = "dsm_years" # Osama confirmed we can use this value.
elif self.study_number == '9413':
failure_outside_timeframe_value = 2
elif self.study_number == '9910':
failure_outside_timeframe_value = 2
elif self.study_number == '0126':
failure_outside_timeframe_value = 2
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(
self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dss, dfsy in zip(self.df[field_name], self.df[field_name_years]):
if dss == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dss == 2:
new_column_vals.append(2)
if dss == 1:
if dfsy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
try:
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
except:
import IPython
IPython.embed()
# self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_survival_Nyr_endpoint(self, years):
"""Adds column 'survival_Nyr' to self.df. Refers to overall survival.
Args:
years(int): the years.
Column values:
0: Alive, within given years.
1: Death, within given years.
"""
field_name = 'survival'
field_name_years = "survival_years" # Note, that for disease_specific_survival=1, we can take the time period from disease_free_surival_years.
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for fn, fny in zip(self.df[field_name], self.df[field_name_years]):
if fn == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if fn == 1:
if fny <= years:
new_column_vals.append(1)
else:
new_column_vals.append(0)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_distant_met_Nyr_endpoint(self, years):
"""Adds column 'distant_met_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years (metastatic prostate cancer)
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'distant_met'
field_name_years = field_name + "_years"
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Has a 'competing events' class
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '0126':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(self.study_number))
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dm, dmy in zip(self.df[field_name], self.df[field_name_years]):
if dm == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dm == 2:
new_column_vals.append(2)
if dm == 1:
assert ~np.isnan(dmy), "Found distant_met=1, with distant_met_years=nan. Impossible. See rtog {}".format(self.study_number)
if dmy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def _add_endpoint_field(self, endpoint_field, default_class_y):
if endpoint_field in self.endpoints:
if self.default_class_y[endpoint_field] != default_class_y:
raise ValueError("Endpoint already listed, with different default class: {}. New attempt: {}".format(
self.default_class_y[endpoint_field], default_class_y
))
return
self.endpoints.append(endpoint_field)
self.default_class_y[endpoint_field] = default_class_y
def printc(self):
prev = pd.options.display.max_columns
prev_r = pd.options.display.max_rows
pd.options.display.max_columns = None
pd.options.display.max_rows = 90
display(self.df)
pd.options.display.max_columns = prev
pd.options.display.max_rows = prev_r
def get_fields(self):
return self.df.columns
def set_study_number(self, number):
if number not in rtog_endpoints:
raise ValueError('Study number not available: {}. Options: {}'.format(number, rtogendpoints.keys()))
self.study_number = number
self.endpoints = rtog_endpoints[number]
self.default_class_y = rtog_default_class_y[number]
def copy(self):
new_rtog = RTOG()
new_rtog.df = self.df.copy(deep=True)
new_rtog.filename = self.filename
new_rtog.study_number = self.study_number
new_rtog.endpoints = self.endpoints
new_rtog.default_class_y = self.default_class_y
new_rtog.unknown_class_X = self.unknown_class_X
return new_rtog
def drop(self, columns=''):
new_rtog = self.copy()
new_rtog.df = self.df.drop(columns=columns)
return new_rtog
def clear_columns(self, columns=[""]):
"""Sets the specified column values to empty.
Args:
columns(list): the names of the columns to replace.
"""
N = len(self.df)
new_rtog = self.copy()
null_columns = {c : [''] * N for c in columns}
for c, l in null_columns.items():
new_rtog.df[c] = l
return new_rtog
def endpoint_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select endpoint fields")
return self.endpoints
def text_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select text fields")
return rtog_text_fields[self.study_number]
def get_Xy(self, y_var=None, make_binary=False):
"""Returns training/testing data, properly formatted.
For each study, see the RTOG XXXX Variable Listings documents for reference.
Args:
y_var(str): the column of self.df to use as the prediction variable. E.g. y_var='cod'
Any rows with nans are removed.
make_binary(bool): if True, it returns a binary vector (0,1), using the class mapping
defined above, rtog_binary_mapping.
"""
# Set X. Don't impute. Boosting methods do this better than you can.
rtog_X = self.drop(columns=self.endpoint_fields() + self.text_fields())
rtog_X = rtog_X.copy()
rtog_meta = self.copy()
rtog_meta.df = rtog_meta.df[self.endpoint_fields()]
# Set y. Impute to default class.
rtog_y = self.copy()
rtog_y = rtog_y[rtog_y.endpoint_fields()]
if y_var:
default_class_y = self.default_class_y[y_var]
rtog_y = rtog_y[y_var]
rtog_y.df = rtog_y.df.fillna(default_class_y)
if make_binary: # Forces y to be binary, using a pre-specified mapping in the parent class.
for c in rtog_y.df.columns:
mapping = rtog_binary_mapping[self.study_number][c]
rtog_y.df[c] = rtog_y.df[c].replace(mapping)
return rtog_X, rtog_y, rtog_meta
def generate_test_set(self, size=100, seed=None, field_to_balance=""):
"""Samples a test set, printing the class statistics of each.
Args:
size(int): the number of entries to sample
seed(int): Optional. Random seed for reproducibility.
field_to_balance(str): Optional. If set, function tries to return an equal class
balance in this field. E.g. disease_free_survival
Returns:
RTOG object - the sampled test set.
"""
if seed is not None:
random.seed(seed)
df = self.df.copy(deep=True)
if field_to_balance:
classes = df[field_to_balance].unique()
indices = {}
for c in classes:
sub_df = df[df[field_to_balance] == c]
indices[c] = list(sub_df.index)
m = min([len(v) for _, v in indices.items()])
for c, l in indices.items():
if len(l) > m:
random.shuffle(l)
indices[c] = l[:m]
idx = [elem for _, l in indices.items() for elem in l]
else:
idx = list(range(len(df)))
random.shuffle(idx)
idx = idx[:size]
new_rtog = self.copy()
new_rtog.df = df
new_rtog.df = new_rtog.df.loc[idx]
return new_rtog
def to_csv(self, filename):
self.df.to_csv(filename)
def standardize_disease_specific_survival(self, drop_prior_columns=True):
self.standardize_disease_specific_survival_events(drop_prior_columns=drop_prior_columns)
self.standardize_disease_specific_survival_years(drop_prior_columns=drop_prior_columns)
# If DSS-years unspecified but DSS censored, set DSS-years to 25 (assume long time).
isnan = self.df['disease_specific_survival_years'].isnull().values
iszero = (self.df['disease_specific_survival'] == 0).values
self.df.loc[np.logical_and(isnan, iszero), 'disease_specific_survival_years'] = 25
def standardize_disease_specific_survival_events(self, drop_prior_columns=True):
"""Merges variants of DSS, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'year' not in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(disease_specific_survival=new_values)
def standardize_disease_specific_survival_years(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'years' in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival_years' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(disease_specific_survival_years=new_values)
def standardize_biochemical_failure(self, drop_prior_columns=True):
self.standardize_biochemical_failure_events(drop_prior_columns=drop_prior_columns)
self.standardize_biochemical_failure_years(drop_prior_columns=drop_prior_columns)
def standardize_biochemical_failure_events(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything biochemical_failure
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'biochemical' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'year' not in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['biochemical_failure' == e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['astro' in e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(biochemical_failure=new_values)
def standardize_biochemical_failure_years(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything biochemical_failure
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'biochemical' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'years' in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['biochemical_failure_years' == e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['astro' in e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(biochemical_failure_years=new_values)
def standardize_baseline_psa(self, drop_prior_columns=True):
"""Merges variants of 'baseline_psa' together across studies.
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
if self.study_number == '0126':
self.df['baseline_psa'] = self.df['psa']
if drop_prior_columns:
self.df.drop(columns='psa')
def standardize_baseline_serum(self, drop_prior_columns=True):
"""Merges baseline_serum* values into a single, column: baseline_serum_ng_dl, deleting the original columns.
Args:
drop_prior_columns(bool): If True, drops the original baseline_serum and baseline_serum_unit (or equivalent) columns.
"""
baseline_serum_ngdl = []
if self.study_number == "9202":
# Has two columns: baseline_serum, and baseline_serum_nmol_l, which are all mixed up
# Per Osama:
# if the value >100, it's in ng/dl, and belongs to baseline_serum
#. if the value <100, it's in nmol_l, and belongs to baseline_serum_nmol_l
# After running the code below:
# import matplotlib.pyplot as plt
# v = list(r9202.df['baseline_serum_nmol_l'].values) + list(r9202.df['baseline_serum'])
# v = [val for val in v if not np.isnan(val)]
# plt.hist(v, bins='auto')
# Is it evident that 75 is a better cutoff
cutoff = 75
for index, row in tqdm(self.df.iterrows()):
# If there's a conflict between baseline_serum and baseline_serum_nmol_l, we set the value to NaN
if not (np.isnan(row['baseline_serum']) or np.isnan(row['baseline_serum_nmol_l'])):
print("9202: serum conflict, setting to Nan: index={}, baseline_serum={}, baseline_serum_nmol_l={}".format(
index, row['baseline_serum'], row['baseline_serum_nmol_l']
))
baseline_serum_ngdl.append(np.nan)
continue
# Grab the row's serum value. One column has a nan, the other has a number.
if np.isnan(row['baseline_serum']):
rowval = row['baseline_serum_nmol_l']
else:
rowval = row['baseline_serum']
if rowval < cutoff:
baseline_serum_ngdl.append(serum_values_to_ng_dl(rowval, 'nmol/l'))
else:
baseline_serum_ngdl.append(rowval)
if drop_prior_columns:
self.df.drop(columns=['baseline_serum', 'baseline_serum_nmol_l'], inplace=True)
elif self.study_number == "9408":
# Conversion: 1= NMOL/L 2 = NG/DL 3 = NG/ML 4= Unit/NOS
for index, row in tqdm(self.df.iterrows()):
value = row['baseline_serum']
unit = row['baseline_serum_unit']
if
|
np.isnan(value)
|
numpy.isnan
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 17:56:24 2017
@author: sarah
"""
import numpy as np
from misc import *
import world
import environment as env
import agent as agt
import perception as prc
import action_selection as asl
import itertools
import matplotlib.pylab as plt
from multiprocessing import Pool
from matplotlib.colors import LinearSegmentedColormap
import jsonpickle as pickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import json
import seaborn as sns
import os
import pandas as pd
import gc
import pickle
np.set_printoptions(threshold = 100000, precision = 5)
plt.style.use('seaborn-whitegrid')
# always pass a list of classes
def save_data(file_name, objects):
with open(file_name, 'wb') as output_file:
pickle.dump(objects, output_file)
def load_data(file_name):
with open(file_name, 'rb') as file:
objects = pickle.load(file)
return objects
def extract_object(obj):
keys = []
obj_dict = obj.__dict__
for key in obj_dict:
keys.append(key)
return keys, obj_dict
"""
run function
"""
save = True
data_folder = os.path.join('C:\\Users\\admin\\Desktop\\project\\BalancingControl','data')
const = 0#1e-10
trials = 200 #number of trials
T = 5 #number of time steps in each trial
Lx = 4 #grid length
Ly = 5
no = Lx*Ly #number of observations
ns = Lx*Ly #number of states
na = 3 #number of actions
npi = na**(T-1)
nr = 2
nc = ns
actions = np.array([[0,-1], [1,0], [0,1]])
g1 = 14
g2 = 10
start = 2
print("start", start)
print("g2", g2)
print("g1", g1)
print("nc", nc)
print("nr", nr)
print("npi", npi)
print("na", na)
print("ns", ns)
print("no", no)
print("trials", trials)
print("data_folder", data_folder)
print("save", save)
print('\n\nrunning simulations\n\n')
print('-------------------------')
def run_agent(par_list, trials=trials, T=T, Lx = Lx, Ly = Ly, ns=ns, na=na,var=0.1,run=0,\
sample_post = False, sample_other = False, prior_start = True):
#set parameters:
#obs_unc: observation uncertainty condition
#state_unc: state transition uncertainty condition
#goal_pol: evaluate only policies that lead to the goal
#utility: goal prior, preference p(o)
# over_actions -> ddm uses prior and likelihood over actions or policies
obs_unc, state_unc, goal_pol, selector, context, utility, over_actions, h, q = par_list
print("q", q)
print("h", h)
name_str = selector + '_s'+ str(var)+'_context_' + str(context) + '_over-actions_'+ str(over_actions)+'_h'+str(h) + '_'+str(run)
"""
create matrices
"""
vals = np.array([1., 2/3., 1/2., 1./2.])
#generating probability of observations in each state
A = np.eye(ns) + const
np.fill_diagonal(A, 1-(ns-1)*const)
#state transition generative probability (matrix)
B = np.zeros((ns, ns, na)) + const
cert_arr = np.zeros(ns)
for s in range(ns):
x = s//Ly
y = s%Ly
#state uncertainty condition
if state_unc:
if (x==0) or (y==3):
c = vals[0]
elif (x==1) or (y==2):
c = vals[1]
elif (x==2) or (y==1):
c = vals[2]
else:
c = vals[3]
condition = 'state'
else:
c = 1.
cert_arr[s] = c
for u in range(na):
x = s//Ly+actions[u][0]
y = s%Ly+actions[u][1]
#check if state goes over boundary
if x < 0:
x = 0
elif x == Lx:
x = Lx-1
if y < 0:
y = 0
elif y == Ly:
y = Ly-1
s_new = Ly*x + y
if s_new == s:
B[s, s, u] = 1 - (ns-1)*const
else:
B[s, s, u] = 1-c + const
B[s_new, s, u] = c - (ns-1)*const
B_c = np.broadcast_to(B[:,:,:,np.newaxis], (ns, ns, na, nc))
"""
create environment (grid world)
"""
Rho = np.zeros((nr,ns)) + const
Rho[0,:] = 1 - (nr-1)*const
Rho[:,np.argmax(utility)] = [0+const, 1-(nr-1)*const]
util = np.array([1-np.amax(utility),
|
np.amax(utility)
|
numpy.amax
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gluonts.dataset.artificial import recipe as rcp
from sktime.forecasting.model_selection import temporal_train_test_split
class SyntheticSeries:
def __init__(self, length, test_size, shortname=None):
"""
TODO function description
"""
self.length = length
self.test_size = test_size
self.target = self.shortname = shortname
self.components = self.generate_components()
if shortname is None: self.target = self.shortname = self.components.columns[-1]
def generate_components(self):
"""
TODO function description
"""
raise NotImplementedError
def get_data(self, generate_new=True, include_oracle=True):
"""
TODO function description
"""
if generate_new:
self.components = self.generate_components()
y_train, y_test = temporal_train_test_split(self.components[self.target], test_size=self.test_size)
if include_oracle:
return y_train, y_test, self.components['oracle']
else:
return y_train, y_test
def plot_timeseries(self, **kwargs):
"""
TODO function description
"""
ax = self.components['oracle'].plot(marker="o", markersize=5, **kwargs)
self.components[self.target].plot(ax=ax, linewidth=2)
return ax
def plot_components(self,
oracle_on_target=True,
bins_size=20,
histogram_components=['target', 'noise'],
scatter_components=['target', 'noise'],
show_title=False):
"""
TODO function description
"""
fig, axes = plt.subplots(nrows=self.components.shape[1], squeeze=False,
figsize=(20, 3*self.components.shape[1]))
if show_title:
fig.suptitle(self.target, fontsize=14)
histogram_components = [self.target if c=='target' else c for c in histogram_components]
scatter_components = [self.target if c=='target' else c for c in scatter_components]
for ax, label in zip(axes[:, 0], self.components):
if label in histogram_components:
ax.hist(self.components[label], bins=self.length//bins_size,
orientation="horizontal", color='#3333FF55')
if label in scatter_components:
ax.scatter(x=range(self.components.shape[0]),
y=self.components[label], s=5)
else:
self.components[label].plot(ax=ax, linewidth=2)
ax.set_xlim(xmin=0)
ax.grid(False)
ax.set_ylabel(label)
if oracle_on_target:
self.components.oracle.plot(ax=axes[-1, 0], linewidth=1)
return axes
class GluonSyntheticSeries(SyntheticSeries):
def __init__(self, length, test_size, **recipe):
"""
TODO function description
"""
self.recipe = recipe
self.gluonts_recipe = [(label, recipe[label]) for label in recipe]
super().__init__(length, test_size, self.gluonts_recipe[-1][0])
def generate_components(self):
"""
TODO function description
"""
# generates a dataframe, with timeseries components
# the last two columns should be the oracle and the final timeseries
components = rcp.evaluate(self.gluonts_recipe, self.length)
for label in components:
if components[label].shape[0] == 1:
components[label] = np.repeat(components[label], self.length)
return pd.DataFrame(components, dtype=float)
gaussian = GluonSyntheticSeries(
length = 1000,
test_size = 300,
oracle = rcp.RandomUniform(-100, 100, shape=1),
Gaussian = rcp.RandomGaussian(rcp.RandomUniform(0.01, 10, shape=1)) + 'oracle'
)
uniform = GluonSyntheticSeries(
length = 1000,
test_size = 300,
range = rcp.RandomUniform(0.01, 10, shape=1),
oracle = rcp.RandomUniform(-100, 100, shape=1),
Uniform = rcp.RandomUniform(rcp.Ref('oracle')-rcp.Ref('range'),
rcp.Ref('oracle')+rcp.Ref('range'))
)
symdirichlet = GluonSyntheticSeries(
length = 1000,
test_size = 300,
intercept = rcp.RandomUniform(-100, 100, shape=1),
alpha = rcp.RandomUniform(0.01, 100, shape=1),
oracle = rcp.Add([1/300, 'intercept']), # 1/lengh
SymmetricDirichlet = rcp.RandomSymmetricDirichlet('alpha') + 'intercept'
)
class GeometricBrownianMotion(SyntheticSeries):
def __init__(self, length, test_size):
super().__init__(length, test_size, 'Brownian Motion')
def generate_components(self):
# define initial value of time series
initial = np.random.exponential(1000)
# define mean and std of underlying normal distributiion
mean = np.random.exponential(0.0005)
std = np.random.uniform(0.01, 0.03)
# x[t] = x[t-1] * lognormal(mean, std)
# returns are said to be log-normally distributed.
# this means that the latent variable (also known as noise during simulation)
# is normally distributed, but suffers an exp() transformation applied to it.
#factor = np.exp(np.cumsum(np.log(np.random.lognormal(mean, std, self.length))))
#factor = np.exp(np.cumsum(np.log(np.exp(np.random.normal(mean, std, self.length)))))
series = initial * np.exp(np.cumsum(np.random.normal(mean, std, self.length)))
#oracle = np.empty(shape=self.length)
#oracle[1:] = series[:-1] * np.exp(mean)
#oracle[0] = np.nan
oracle = initial * np.exp(np.arange(self.length) * mean)
return pd.DataFrame({'oracle': oracle, self.shortname: series})
brownian = GeometricBrownianMotion(length=1000, test_size=300)
trend_clean = GluonSyntheticSeries(
length = 1000,
test_size = 300,
oracle = rcp.LinearTrend(slope=rcp.RandomUniform(2, 10, shape=1)),
Trend = 'oracle'
)
trend_small_noise = GluonSyntheticSeries(
length = 1000,
test_size = 300,
noise = rcp.RandomGaussian(stddev=rcp.RandomUniform(0.5, 1, shape=1)),
oracle = rcp.LinearTrend(slope=rcp.RandomUniform(3, 10, shape=1)),
Trend = rcp.Add(['oracle','noise'])
)
trend_big_noise = GluonSyntheticSeries(
length = 1000,
test_size = 300,
noise = rcp.RandomGaussian(stddev=rcp.RandomUniform(3, 5, shape=1)),
oracle = rcp.LinearTrend(slope=rcp.RandomUniform(3, 10, shape=1)),
Trend = rcp.Add(['oracle','noise'])
)
seasonality = GluonSyntheticSeries(
length = 1000,
test_size = 300,
oracle = rcp.SmoothSeasonality(period=rcp.RandomUniform(20, 100, shape=1), phase=0),
Seasonality = 'oracle'
)
two_seasonalities = GluonSyntheticSeries(
length = 1000,
test_size = 300,
seasonality1 = rcp.SmoothSeasonality(period=rcp.RandomUniform(20, 100, shape=1), phase=0),
seasonality2 = rcp.SmoothSeasonality(period=rcp.RandomUniform(100, 200, shape=1), phase=0),
oracle = rcp.Add(['seasonality1','seasonality2']),
TwoSeasonalities = 'oracle'
)
seasonality_noisy = GluonSyntheticSeries(
length = 1000,
test_size = 300,
oracle = rcp.SmoothSeasonality(period=rcp.RandomUniform(20, 200, shape=1), phase=0),
noise = rcp.RandomGaussian(stddev=rcp.RandomUniform(0.1, 0.8, shape=1)),
SeasonalityNoisy = rcp.Add(['oracle', 'noise'])
)
two_seasonalities_noisy = GluonSyntheticSeries(
length = 1000,
test_size = 300,
seasonality1 = rcp.SmoothSeasonality(period=rcp.RandomUniform(20, 100, shape=1), phase=0),
seasonality2 = rcp.SmoothSeasonality(period=rcp.RandomUniform(100, 200, shape=1), phase=0),
oracle = rcp.Add(['seasonality1','seasonality2']),
noise = rcp.RandomGaussian(stddev=rcp.RandomUniform(0.1, 0.8, shape=1)),
TwoSeasonalitiesNoisy = rcp.Add(['oracle', 'noise'])
)
trend_seasonality = GluonSyntheticSeries(
length = 1000,
test_size = 300,
trend = rcp.LinearTrend(slope=rcp.RandomUniform(1, 10, shape=1)),
seasonality = rcp.SmoothSeasonality(period=rcp.RandomUniform(10, 200, shape=1), phase=0),
oracle = rcp.Add(['trend', 'seasonality']),
TrendSeasonality = 'oracle'
)
class WeeklySyntheticSeries(SyntheticSeries):
def __init__(self, length, test_size):
super().__init__(length, test_size, 'Weekly Patterns')
def generate_components(self):
# define mean and std to reasonable values
mean1 =
|
np.random.exponential(10)
|
numpy.random.exponential
|
import random as rand
import numpy as np
import multiprocessing as mp
from scipy.spatial import HalfspaceIntersection, ConvexHull
from scipy.spatial.qhull import QhullError
from dataclasses import dataclass
from operator import mul, add
from functools import reduce
from collections import namedtuple
from kaa.lputil import minLinProg, maxLinProg, LPUtil
from settings import KaaSettings
from kaa.trajectory import Traj, TrajCollection
from settings import KaaSettings
from kaa.log import Output
@dataclass
class VolumeData:
ConvHullVol: float
EnvelopBoxVol: float
class ChebyCenter:
def __init__(self, center, radius):
self.center = center
self.radius = radius
class LinearSystem:
def __init__(self, model, A, b, constr_mat=None):
self.A = A
self.b = b
self.model = model
self.vars = model.vars
self.dim = model.dim
self.constr_mat = constr_mat # Pointer to total constraint mat for LPUtil purposes.
self.randgen = rand.Random(KaaSettings.RandSeed)
"""
Computes and returns the Chebyshev center of parallelotope.
@returns self.dim point marking the Chebyshev center.
"""
@property
def chebyshev_center(self):
'Initialize objective function for Chebyshev intersection LP routine.'
c = [0 for _ in range(self.dim + 1)]
c[-1] = 1
row_norm = np.reshape(np.linalg.norm(self.A, axis=1), (self.A.shape[0], 1))
center_A = np.hstack((self.A, row_norm))
center_pt = maxLinProg(self.model, c, center_A, self.b).x
return ChebyCenter(center_pt[:-1], center_pt[-1])
"""
Volume estimation of system by sampling points and taking ratio.
@params samples: number of samples used to estimate volume
@returns estimated volume of linear system stored in VolDataTuple
"""
@property
def volume(self):
envelop_box_vol = self.calc_vol_envelop_box()
conv_hull_vol = self.calc_vol_conv_hull() if self.dim < 4 else None
return VolumeData(conv_hull_vol, envelop_box_vol)
"""
Find vertices of this linear system.
"""
@property
def vertices(self):
phase_intersect = np.hstack((self.A, - np.asarray([self.b]).T))
'Run scipy.spatial.HalfspaceIntersection.'
try:
center_pt = np.asarray(self.chebyshev_center.center)
hs = HalfspaceIntersection(phase_intersect, center_pt)
except QhullError:
feasible_pt = self.feasible_point()
hs = HalfspaceIntersection(phase_intersect, feasible_pt)
vertices = np.asarray(hs.intersections)
return vertices
def feasible_point(self, use_interior_point=True, num_samples=5):
if use_interior_point:
return minLinProg(self, np.zeros(self.dim), self.A, self.b, method='Interior').x
else:
sample_mat =
|
np.random.randn(num_samples, self.dim)
|
numpy.random.randn
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The basic photometry class for the TASOC Photometry pipeline.
All other specific photometric algorithms will inherit from BasePhotometry.
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import h5py
import sqlite3
import logging
import datetime
import os.path
import glob
import contextlib
import warnings
from copy import deepcopy
from astropy._erfa.core import ErfaWarning
from astropy.io import fits
from astropy.table import Table, Column
from astropy import units
import astropy.coordinates as coord
from astropy.time import Time
from astropy.wcs import WCS
import enum
from bottleneck import nanmedian, nanvar, nanstd, allnan
from .image_motion import ImageMovementKernel
from .quality import TESSQualityFlags, PixelQualityFlags, CorrectorQualityFlags
from .utilities import (find_tpf_files, find_hdf5_files, find_catalog_files, rms_timescale,
find_nearest, ListHandler)
from .catalog import catalog_sqlite_search_footprint
from .plots import plot_image, plt, save_figure
from .spice import TESS_SPICE
from .version import get_version
from . import fixes
# Filter out annoying warnings:
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=ErfaWarning, module="astropy")
__version__ = get_version()
__docformat__ = 'restructuredtext'
hdf5_cache = {}
#--------------------------------------------------------------------------------------------------
@enum.unique
class STATUS(enum.Enum):
"""
Status indicator of the status of the photometry.
"""
UNKNOWN = 0 #: The status is unknown. The actual calculation has not started yet.
STARTED = 6 #: The calculation has started, but not yet finished.
OK = 1 #: Everything has gone well.
ERROR = 2 #: Encountered a catastrophic error that I could not recover from.
WARNING = 3 #: Something is a bit fishy. Maybe we should try again with a different algorithm?
ABORT = 4 #: The calculation was aborted.
SKIPPED = 5 #: The target was skipped because the algorithm found that to be the best solution.
#--------------------------------------------------------------------------------------------------
class BasePhotometry(object):
"""
The basic photometry class for the TASOC Photometry pipeline.
All other specific photometric algorithms will inherit from this.
Attributes:
starid (int): TIC number of star being processed.
input_folder (str): Root directory where files are loaded from.
output_folder (str): Root directory where output files are saved.
plot (bool): Indicates wheter plots should be created as part of the output.
plot_folder (str): Directory where plots are saved to.
method (str): String indication the method of photometry.
sector (int): TESS observing sector.
camera (int): TESS camera (1-4).
ccd (int): TESS CCD (1-4).
data_rel (int): Data release number.
n_readout (int): Number of frames co-added in each timestamp.
header (dict-like): Primary header, either from TPF or HDF5 files.
target (dict): Catalog information about the main target.
target_mag (float): TESS magnitude of the main target.
target_pos_ra (float): Right ascension of the main target at time of observation.
target_pos_dec (float): Declination of the main target at time of observation.
target_pos_ra_J2000 (float): Right ascension of the main target at J2000.
target_pos_dec_J2000 (float): Declination of the main target at J2000.
target_pos_column (float): Main target CCD column position.
target_pos_row (float): Main target CCD row position.
target_pos_column_stamp (float): Main target CCD column position in stamp.
target_pos_row_stamp (float): Main target CCD row position in stamp.
wcs (:class:`astropy.wcs.WCS`): World Coordinate system solution.
lightcurve (:class:`astropy.table.Table`): Table to be filled with an extracted lightcurve.
final_phot_mask (numpy.ndarray): Mask indicating which pixels were used in extraction of
lightcurve. ``True`` if used, ``False`` otherwise.
final_position_mask (numpy.ndarray): Mask indicating which pixels were used in extraction
of positions. ``True`` if used, ``False`` otherwise.
additional_headers (dict): Additional headers to be included in FITS files.
.. codeauthor:: <NAME> <<EMAIL>>
"""
#----------------------------------------------------------------------------------------------
def __init__(self, starid, input_folder, output_folder, datasource='ffi',
sector=None, camera=None, ccd=None, plot=False, cache='basic', version=5):
"""
Initialize the photometry object.
Parameters:
starid (int): TIC number of star to be processed.
input_folder (string): Root directory where files are loaded from.
output_folder (string): Root directory where output files are saved.
datasource (string, optional): Source of the data. Options are ``'ffi'`` or ``'tpf'``.
Default is ``'ffi'``.
plot (boolean, optional): Create plots as part of the output. Default is ``False``.
camera (integer, optional): TESS camera (1-4) to load target from (Only used for FFIs).
ccd (integer, optional): TESS CCD (1-4) to load target from (Only used for FFIs).
cache (string, optional): Optional values are ``'none'``, ``'full'``
or ``'basic'`` (Default).
version (integer): Data release number to be added to headers. Default=5.
Raises:
Exception: If starid could not be found in catalog.
FileNotFoundError: If input file (HDF5, TPF, Catalog) could not be found.
ValueError: On invalid datasource.
ValueError: If ``camera`` and ``ccd`` is not provided together with ``datasource='ffi'``.
"""
logger = logging.getLogger(__name__)
if datasource != 'ffi' and not datasource.startswith('tpf'):
raise ValueError("Invalid datasource: '%s'" % datasource)
if cache not in ('basic', 'none', 'full'):
raise ValueError("Invalid cache: '%s'" % cache)
# Store the input:
self.starid = starid
self.input_folder = os.path.abspath(input_folder)
self.output_folder_base = os.path.abspath(output_folder)
self.plot = plot
self.datasource = datasource
self.version = version
# Further checks of inputs:
if os.path.isfile(self.input_folder):
self.input_folder = os.path.dirname(self.input_folder)
if not os.path.isdir(self.input_folder):
raise FileNotFoundError("Not a valid input directory: '%s'" % self.input_folder)
# Extract which photometric method that is being used by checking the
# name of the class that is running:
self.method = {
'BasePhotometry': 'base',
'AperturePhotometry': 'aperture',
'PSFPhotometry': 'psf',
'LinPSFPhotometry': 'linpsf',
'HaloPhotometry': 'halo'
}.get(self.__class__.__name__, None)
logger.info('STARID = %d, DATASOURCE = %s, METHOD = %s',
self.starid, self.datasource, self.method)
self._status = STATUS.UNKNOWN
self._details = {}
self.tpf = None
self.hdf = None
self._MovementKernel = None
self._images_cube_full = None
self._images_err_cube_full = None
self._backgrounds_cube_full = None
self._pixelflags_cube_full = None
self._sumimage_full = None
# Add a ListHandler to the logging of the corrections module.
# This is needed to catch any errors and warnings made by the correctors
# for ultimately storing them in the TODO-file.
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
self.message_queue = []
handler = ListHandler(message_queue=self.message_queue, level=logging.WARNING)
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler.setFormatter(formatter)
logging.getLogger('photometry').addHandler(handler)
# Directory where output files will be saved:
self.output_folder = os.path.join(
self.output_folder_base,
self.datasource[:3], # Only three first characters for cases with "tpf:XXXXXX"
'{0:011d}'.format(self.starid)[:5]
)
# Set directory where diagnostics plots should be saved to:
self.plot_folder = None
if self.plot:
self.plot_folder = os.path.join(self.output_folder, 'plots', '{0:011d}'.format(self.starid))
os.makedirs(self.plot_folder, exist_ok=True)
# Init table that will be filled with lightcurve stuff:
self.lightcurve = Table()
if self.datasource == 'ffi':
# The camera and CCD should also come as input
# They will be needed to find the correct input files
if sector is None or camera is None or ccd is None:
raise ValueError("SECTOR, CAMERA and CCD keywords must be provided for FFI targets.")
self.sector = sector # TESS observing sector.
self.camera = camera # TESS camera.
self.ccd = ccd # TESS CCD.
logger.debug('SECTOR = %s', self.sector)
logger.debug('CAMERA = %s', self.camera)
logger.debug('CCD = %s', self.ccd)
# Load stuff from the common HDF5 file:
filepath_hdf5 = find_hdf5_files(self.input_folder, sector=self.sector, camera=self.camera, ccd=self.ccd)
if len(filepath_hdf5) != 1:
raise FileNotFoundError("HDF5 File not found. SECTOR=%d, CAMERA=%d, CCD=%d" % (self.sector, self.camera, self.ccd))
filepath_hdf5 = filepath_hdf5[0]
self.filepath_hdf5 = filepath_hdf5
logger.debug("CACHE = %s", cache)
load_into_cache = False
if cache == 'none':
load_into_cache = True
else:
global hdf5_cache
if filepath_hdf5 not in hdf5_cache:
hdf5_cache[filepath_hdf5] = {}
load_into_cache = True
elif cache == 'full' and hdf5_cache[filepath_hdf5].get('_images_cube_full') is None:
load_into_cache = True
# Open the HDF5 file for reading if we are not holding everything in memory:
if load_into_cache or cache != 'full':
self.hdf = h5py.File(filepath_hdf5, 'r')
if load_into_cache:
logger.debug('Loading basic data into cache...')
attrs = {}
# Just a shorthand for the attributes we use as "headers":
hdr = dict(self.hdf['images'].attrs)
attrs['header'] = hdr
attrs['data_rel'] = hdr['DATA_REL'] # Data release number
# Start filling out the basic vectors:
self.lightcurve['time'] = Column(self.hdf['time'], description='Time', dtype='float64', unit='TBJD')
N = len(self.lightcurve['time'])
self.lightcurve['cadenceno'] = Column(self.hdf['cadenceno'], description='Cadence number', dtype='int32')
self.lightcurve['quality'] = Column(self.hdf['quality'], description='Quality flags', dtype='int32')
if 'timecorr' in self.hdf:
self.lightcurve['timecorr'] = Column(self.hdf['timecorr'], description='Barycentric time correction', unit='days', dtype='float32')
else:
self.lightcurve['timecorr'] = Column(np.zeros(N, dtype='float32'), description='Barycentric time correction', unit='days', dtype='float32')
# Correct timestamp offset that was in early data releases:
self.lightcurve['time'] = fixes.time_offset(self.lightcurve['time'], hdr, datatype='ffi')
attrs['lightcurve'] = self.lightcurve
# World Coordinate System solution:
if isinstance(self.hdf['wcs'], h5py.Group):
refindx = self.hdf['wcs'].attrs['ref_frame']
hdr_string = self.hdf['wcs']['%04d' % refindx][0]
else:
hdr_string = self.hdf['wcs'][0]
if not isinstance(hdr_string, str): hdr_string = hdr_string.decode("utf-8") # For Python 3
self.wcs = WCS(header=fits.Header().fromstring(hdr_string), relax=True) # World Coordinate system solution.
attrs['wcs'] = self.wcs
# Get shape of sumimage from hdf5 file:
attrs['_max_stamp'] = (0, self.hdf['sumimage'].shape[0], 0, self.hdf['sumimage'].shape[1])
attrs['pixel_offset_row'] = hdr.get('PIXEL_OFFSET_ROW', 0)
attrs['pixel_offset_col'] = hdr.get('PIXEL_OFFSET_COLUMN', 44) # Default for TESS data
# Get info for psf fit Gaussian statistic:
attrs['readnoise'] = hdr.get('READNOIS', 10)
attrs['gain'] = hdr.get('GAIN', 100)
attrs['num_frm'] = hdr.get('NUM_FRM', 900) # Number of frames co-added in each timestamp (Default=TESS).
attrs['n_readout'] = hdr.get('NREADOUT', int(attrs['num_frm']*(1-2/hdr.get('CRBLKSZ', np.inf)))) # Number of readouts
# Load MovementKernel into memory:
attrs['_MovementKernel'] = self.MovementKernel
# The full sum-image:
attrs['_sumimage_full'] = np.asarray(self.hdf['sumimage'])
# Store attr in global variable:
hdf5_cache[filepath_hdf5] = deepcopy(attrs)
# If we are doing a full cache (everything in memory) load the image cubes as well.
# Note that this will take up A LOT of memory!
if cache == 'full':
logger.warning('Loading full image cubes into cache...')
hdf5_cache[filepath_hdf5]['_images_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
hdf5_cache[filepath_hdf5]['_images_err_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
hdf5_cache[filepath_hdf5]['_backgrounds_cube_full'] =
|
np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
|
numpy.empty
|
import numpy as np
import networkx as nx
from astar import AStar
'''
to install astar:
git clone https://github.com/jrialland/python-astar.git
python3 setup.py install
'''
def discretize(x, discretization_bins=20, unif_range=(-1, 1)):
try:
assert type(x) == np.ndarray
assert (unif_range[0] < x).all() and (x < unif_range[1]).all()
except AssertionError:
import ipdb
ipdb.set_trace()
bins = np.linspace(unif_range[0], unif_range[1], num=discretization_bins)
return np.digitize(x, bins)
def undiscretize(x, discretization_bins=20, unif_range=(-1, 1)):
try:
assert type(x) == np.ndarray
assert (0 < x).all() and (x < discretization_bins).all()
except AssertionError:
import ipdb
ipdb.set_trace()
bins = np.linspace(unif_range[0], unif_range[1], num=discretization_bins)
return 0.5 * (bins[x] + bins[x - 1])
class StateObsTuple():
def __init__(self, state, obs):
self.state = state.astype(int)
self.obs = obs
def __eq__(self, other):
return np.array_equal(self.state, other.state)
def __hash__(self):
return hash(tuple(self.state.tolist()))
def unpack(self):
return (self.state, self.obs)
def plan_traj(trans_prob, start_obs, goal_obs, posterior_function):
weights = -np.log(trans_prob + 1e-8)
# apply cutoff to very low probabilities
cutoff = 3
weights[weights > cutoff] = 0
G = nx.DiGraph(weights)
c_start = posterior_function(start_obs)
c_goal = posterior_function(goal_obs)
try:
c_traj = nx.shortest_path(G, source=c_start, target=c_goal, weight='weight')
except:
c_traj = [] # no trajectory found
return c_traj
class SolverNoPruning(AStar):
"""
Use astar algorithm.
Node is a tuple: (binary state representation of size c_dim, observation vector)
transition function : map current state to a sample of next state
posterior_function : map observation to state
discriminator_function : map two observations to confidence score (that they are from the data)
generator_function : map current state, next state, and current observation to a sample of next observation
"""
def __init__(self,
transition_function,
generator_function,
discriminator_function=None,
relaxation=10.0,
mc_samples=100):
self.transition = transition_function
self.generator = generator_function
self.mc_samples = mc_samples
self.relaxation = relaxation # astar relaxed heuristic
self.n_expanded = 0
def heuristic_cost_estimate(self, n1, n2):
"""Euclidean heuristic"""
return self.relaxation * np.linalg.norm(n1.state - n2.state)
def distance_between(self, n1, n2):
"""minimize euclidean distance"""
return np.linalg.norm(n1.state - n2.state)
def is_goal_reached(self, current, goal):
""" returns true when we can consider that 'current' is the goal"""
return np.array_equal(current.state, goal.state)
def neighbors(self, node):
"""
Sample next states from current state, and generate corresponding observations.
Use discriminator to prune impossible observation transitions
"""
self.n_expanded += 1
# if self.n_expanded %1 ==0:
# print("\tExpanded %d nodes" % self.n_expanded)
state, observation = node.unpack()
observations =
|
np.tile(observation, (self.mc_samples, 1))
|
numpy.tile
|
"""
Beta python scripts for paper
"Reaction-drift-diffusion models from master equations: application to material defects"
(c) <NAME>, CNRS / CINaM, 2021
tomswinburne.github.io
"""
import numpy as np
import scipy.linalg as spla
import matplotlib.pyplot as plt
import msmtools.analysis as mana
def ordev(M,vec=False,pi=None):
"""
Wrapper to order and normalize scipy.linalg.eig results
M : numpy matrix
Square matrix to be diagonalized
"""
if pi is None:
_nu,_w,_v = spla.eig(M,left=True)
_nu = -_nu.real
_w = _w[:,_nu.argsort()]
_v = _v[:,_nu.argsort()]
_nu = _nu[_nu.argsort()]
_w = [email protected](1.0/np.diag(_w.T@_v)) * _v[:,0].sum()
_v /= _v[:,0].sum()
if not vec:
return _nu
else:
return _nu,_w,_v
else:
assert pi.size==M.shape[0],"Dimension Mismatch!"
rPi = np.diag(np.sqrt(pi))
riPi = np.diag(1.0/np.sqrt(pi))
nu,tw = np.linalg.eigh(-riPi@M@rPi)
tw = tw[:,nu.argsort()]
nu = nu[nu.argsort()]
if not vec:
return nu
w = riPi@tw
v = rPi@tw
return nu,w,v
def generate_random_blocks( N=10,
mean_bar = [0.1,1.0],
std_bar = 0.3, #
std_min = 0.0, #
gen = np.random.uniform, #
seed = 123, # s
connectivity=3, # average number of connections
sub_basin=False, # sub-super-basins in cell?
sub_basin_barrier = 4.0, # inter-sub-basin transitions
sub_basin_migration = 0.0, # self migration barrier
sub_basin_height = 0.0, # relative heights
sub_basin_size=2, # size of "high" basin
sub_basin_aniso=False # sub-basin-dependent migration
):
"""
Helper function to generate random periodic transition rate matricies
N : int
number of states
mean_bar : list, size dim+1
mean barrier for Q(0), Q(x), Q(y)...
std_bar : float
std dev for barrier energies
std_min : float
std dev for minima energies
gen : numpy.random distribution
barrier distribution law
seed : int
random number seed
sub_basin : bool
If true, split cell into two sub-super-basins.
To produce multiple coarse-grained states
"""
np.random.seed(seed)
"""
Detailed balance:
C@Pi = (C@Pi).T
L@Pi = (R@Pi).T
"""
dim = len(mean_bar)-1
pi = np.exp(-gen(size=N)*std_min)
if sub_basin:
pi[-sub_basin_size:] *= np.exp(-sub_basin_height) # scale is small
pi /= pi.max()
Pi = np.diag(pi)
iPi = np.diag(1.0/pi)
# intercell
CPi = gen(size=(N,N))*std_bar
CPi -= CPi.mean()
CPi = np.exp(-CPi-mean_bar[0]) + np.exp(-CPi-mean_bar[0]).T
if sub_basin:
CPi[-sub_basin_size:,:][:,:sub_basin_size] *= np.exp(-sub_basin_barrier)
CPi[:sub_basin_size,:][:,-sub_basin_size:] *= np.exp(-sub_basin_barrier)
C = CPi@iPi
kt = C.sum(axis=0)
L,R = [],[]
for _dim,mu in enumerate(mean_bar[1:]):
_L = gen(size=(N,N))*std_bar
_L -= _L.mean()
if connectivity<N:
_L = np.triu(np.exp(-_L-mu),max(0,N-connectivity))
else:
_L = np.exp(-_L-mu)
if sub_basin_aniso:
if _dim==0:
ll = _L[-sub_basin_size:,:][:,-sub_basin_size:].shape
sub_basin_k = np.exp(-0.1*
|
np.random.uniform(size=ll)
|
numpy.random.uniform
|
"""
Dveloper: vujadeyoon
E-mail: <EMAIL>
Github: https://github.com/vujadeyoon/vujade
Title: vujade_videocv.py
Description: A module for video processing with computer vision.
"""
import os
import numpy as np
import math
import cv2
import ffmpeg
import shlex
import subprocess
import json
from vujade import vujade_utils as utils_
from vujade.utils.SceneChangeDetection.InteractiveProcessing import scd as scd_ip_
from vujade.utils.SceneChangeDetection.BatchProcessing import scd as scd_bp_
def encode_vid2vid(_path_video_src, _path_video_dst):
os.system('ffmpeg -i {} {}'.format(_path_video_src, _path_video_dst))
class VideoReaderFFmpeg:
def __init__(self, _path_video, _channel=3, _pix_fmt='bgr24'):
self.path_video = _path_video
video_info = self._get_info()
self.height = video_info['height']
self.width = video_info['width']
self.channel = _channel
self.fps = eval(video_info['avg_frame_rate'])
self.time = eval(video_info['duration'])
self.num_frames = math.ceil(self.fps * self.time)
self.pix_fmt = _pix_fmt
self.idx_frame_curr = -1
self.num_frames_remain = self.num_frames
self.cap = (
ffmpeg
.input(self.path_video)
.output('pipe:', format='rawvideo', pix_fmt=self.pix_fmt)
.run_async(pipe_stdout=True)
)
def imread(self, _num_batch_frames=1, _trans=(0, 3, 1, 2)):
if self.num_frames_remain < _num_batch_frames:
_num_batch_frames = self.num_frames_remain # equivalent: %=
in_bytes = self.cap.stdout.read((self.width * self.height * self.channel) * _num_batch_frames)
if not in_bytes:
return None
frames = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([-1, self.height, self.width, self.channel])
)
if _trans is not None:
frames = frames.transpose(_trans)
self.idx_frame_curr += _num_batch_frames
self.num_frames_remain -= _num_batch_frames
self._cal_eof()
return frames
def _cal_eof(self):
self.is_eof = (self.num_frames_remain == 0)
def _get_info(self):
probe = ffmpeg.probe(self.path_video)
return next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
class VideoWriterFFmpeg:
def __init__(self, _path_video, _resolution=(1080, 1920), _fps=30.0, _qp_val=0, _pix_fmt='bgr24', _codec='libx264'):
if _path_video is None:
raise ValueError('The parameter, _path_video, should be assigned.')
self.path_video = _path_video
self.height = int(_resolution[0])
self.width = int(_resolution[1])
self.fps = float(_fps)
self.qp_val = _qp_val
self.pix_fmt = _pix_fmt
self.codec = _codec
self.wri = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt=self.pix_fmt, s='{}x{}'.format(self.width, self.height))
.filter('fps', fps=self.fps, round='up')
.output(self.path_video, pix_fmt='yuv420p', **{'c:v': self.codec}, **{'qscale:v': self.qp_val})
.overwrite_output()
.run_async(pipe_stdin=True)
)
def imwrite(self, _list_img):
for idx, img in enumerate(_list_img):
self.wri.stdin.write(img)
def close(self):
self.wri.stdin.close()
self.wri.wait()
class VideoReaderCV:
def __init__(self, _path_video: str, _sec_start: int = None, _sec_end: int = None) -> None:
if _path_video is None:
raise ValueError('The parameter, _path_video, should be assigned.')
if (_sec_start is not None) and (isinstance(_sec_start, int) is False):
raise ValueError('The parameter, _sec_start, should be None or integer.')
if (_sec_end is not None) and (isinstance(_sec_end, int) is False):
raise ValueError('The parameter, _sec_end, should be None or integer.')
if (isinstance(_sec_start, int) is True) and (isinstance(_sec_end, int) is True) and (_sec_start >= _sec_end):
raise ValueError('The parameter _sec_start should be lower than the parameter _sec_end.')
self.path_video = _path_video
self._open()
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.channel = 3
self.fps = float(self.cap.get(cv2.CAP_PROP_FPS))
self.num_frames_ori = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.length_ori = int(self.num_frames_ori / self.fps)
self.orientation = self._get_orientation()
if (_sec_end is not None) and (self.length_ori <= _sec_end):
_sec_end = None
if (_sec_start is None) or ((_sec_start is not None) and (_sec_start < 0)):
self.frame_start = 0
self.sec_start = 0
else:
self.sec_start = _sec_start
self.frame_start = int(self.sec_start * self.fps)
if (_sec_end is None) or ((_sec_end is not None) and (_sec_end < 0)):
self.frame_end = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
self.sec_end = int(self.frame_end / self.fps)
else:
self.sec_end = _sec_end
self.frame_end = int(self.sec_end * self.fps)
self.num_frames = self.frame_end - self.frame_start + 1
self.idx_frame_curr = (self.frame_start - 1)
self.num_frames_remain = self.frame_end - self.frame_start + 1
self.frame_timestamps = []
self.is_eof = False
self._set(_idx_frame=self.frame_start)
def _is_open(self) -> bool:
return self.cap.isOpened()
def _open(self) -> None:
self.cap = cv2.VideoCapture(self.path_video)
if self._is_open() is False:
raise ValueError('The video capture is not opened.')
def _cal_eof(self) -> None:
self.is_eof = (self.frame_end <= self.idx_frame_curr)
def _set(self, _idx_frame: int) -> None:
'''
:param _idx_frame: Interval: [0, self.frame_end-1]
'''
if self.frame_end < _idx_frame:
raise ValueError('The parameter, _idx_frame, should be lower than self.frame_end.')
if self.idx_frame_curr <= _idx_frame:
for idx in range((_idx_frame - self.idx_frame_curr) - 1):
if self.is_eof is True:
break
self._read(_is_record_timestamp=False)
else:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, _idx_frame)
self.idx_frame_curr = (_idx_frame - 1)
self.num_frames_remain = self._update_num_frames_reamin()
self._cal_eof()
def _read(self, _is_record_timestamp: bool = True) -> np.ndarray:
ret, frame = self.cap.read()
if _is_record_timestamp is True:
self._timestamps()
self.idx_frame_curr += 1
self.num_frames_remain = self._update_num_frames_reamin()
self._cal_eof()
return frame
def _update_num_frames_reamin(self) -> int:
return self.frame_end - self.idx_frame_curr
def _timestamps(self) -> None:
self.frame_timestamps.append(self.cap.get(cv2.CAP_PROP_POS_MSEC))
def _get_orientation(self) -> str:
"""
Function to get the rotation of the input video file.
Adapted from gist.github.com/oldo/dc7ee7f28851922cca09/revisions using the ffprobe comamand by <NAME> from
stackoverflow.com/questions/5287603/how-to-extract-orientation-information-from-videos?noredirect=1&lq=1
Returns a rotation None, 90, 180 or 270
"""
cmd = "ffprobe -loglevel error -select_streams v:0 -show_entries stream_tags=rotate -of default=nw=1:nk=1"
args = shlex.split(cmd)
args.append(self.path_video)
ffprobe_output = subprocess.check_output(args).decode('utf-8')
if len(ffprobe_output) > 0: # Output of cmdis None if it should be 0
ffprobe_output = json.loads(ffprobe_output)
rotation = ffprobe_output
else:
rotation = 0
if (rotation == 0) or (rotation == 180):
orientation = 'horizontal'
else: # (rotation == 90) or (rotation == 270):
orientation = 'vertical'
return orientation
def imread(self, _num_batch_frames: int = 1, _trans: tuple = None, _set_idx_frame: int = None, _dsize: tuple = None, _color_code: int = None, _interpolation: int = cv2.INTER_LINEAR) -> np.ndarray:
if (_dsize is None) or (_dsize == (self.width, self.height)):
is_resize = False
width = self.width
height = self.height
else:
is_resize = True
width = _dsize[0]
height = _dsize[1]
if _color_code is None:
channel = self.channel
elif _color_code == cv2.COLOR_BGR2GRAY:
channel = 1
else:
raise NotImplementedError
if (_set_idx_frame is not None) and (0 <= _set_idx_frame):
self._set(_set_idx_frame)
if _num_batch_frames <= self.num_frames_remain:
frames = np.zeros(shape=(_num_batch_frames, height, width, channel), dtype=np.uint8)
num_batch_frames = _num_batch_frames
else:
frames = np.zeros(shape=(self.num_frames_remain, height, width, channel), dtype=np.uint8)
num_batch_frames = self.num_frames_remain
for idx in range(num_batch_frames):
if self.is_eof is True:
break
if is_resize is False:
temp = self._read(_is_record_timestamp=True)
else:
temp = cv2.resize(src=self._read(_is_record_timestamp=True), dsize=(width, height), interpolation=_interpolation)
if _color_code is None:
frames[idx, :, :, :] = temp
elif channel == 1:
frames[idx, :, :, 0] = cv2.cvtColor(temp, code=_color_code)
else:
raise NotImplementedError
if _trans is not None:
frames = frames.transpose(_trans)
return frames
def get_timestamp(self) -> list:
while (self.is_eof is False):
self.imread(_num_batch_frames=1, _trans=None)
return self.frame_timestamps
def close(self) -> None:
self.cap.release()
class VideoWriterCV:
def __init__(self, _path_video, _resolution=(1920, 1080), _fps=30.0, _fourcc=cv2.VideoWriter_fourcc(*'MJPG')):
if _path_video is None:
raise ValueError('The variable, _path_video, should be assigned.')
self.path_video = _path_video
self.width = int(_resolution[0])
self.height = int(_resolution[1])
self.fps = float(_fps)
self.fourcc = _fourcc
self.wri = self._open()
def imwrite(self, _list_img):
for idx, img in enumerate(_list_img):
self.wri.write(image=img)
def _open(self):
return cv2.VideoWriter(self.path_video, self.fourcc, self.fps, (self.width, self.height))
def close(self):
self.wri.release()
class SceneChangeDetectorFFmpeg:
"""This class is intended to detect scene change for the given video.
The reference is as follows: https://rusty.today/posts/ffmpeg-scene-change-detector.
The corresponding FFmpeg is as below.
i) ffmpeg -i _path_video -filter:v "select='gt(scene, 0.4)', showinfo" -f null - 2> ffout.log
ii) grep showinfo ffout.log | grep pts_time:[0-9.]* -o | grep [0-9.]* -o > ffout_scene_change_detection.log
"""
def __init__(self, _threshold: float = 0.4):
self.threshold = _threshold
self.cmd = None
self.offset = 9
def get_frame_index(self, _path_video: str) -> list:
vid_src = VideoReaderCV(_path_video=_path_video)
vid_src_timestamp = self._convert(_list=vid_src.get_timestamp(), _unit=1000, _decimals=4)
command = self._get_command(_path_video=_path_video)
str_stdout, str_stderr = utils_.run_command_stdout(_command=command)
idx_start = utils_.find_substr(_str_src=str_stderr, _str_sub='pts_time:')
idx_end = utils_.find_substr(_str_src=str_stderr, _str_sub=' pos:')
scd_timestamp = []
for idx, (_idx_start, _idx_end) in enumerate(zip(idx_start, idx_end)):
scd_timestamp.append(float(str_stderr[_idx_start + self.offset:_idx_end]))
res = utils_.list_matching_idx(_list_1=self._convert(_list=scd_timestamp, _unit=1.0, _decimals=4), _list_2=vid_src_timestamp)
return res
def _get_command(self, _path_video: str) -> list:
return ["ffmpeg", "-i", _path_video, "-filter:v", "select='gt(scene, {})', showinfo".format(self.threshold), "-f", "null", "pipe:1"]
def _convert(self, _list, _unit=1.0, _decimals=4):
return list(np.round(np.array(_list) / _unit, _decimals))
class SceneChangeDetectorFFmpegInteractiveProcessing:
"""This class is intended to detect scene change for the given video.
The reference is as follows: https://rusty.today/posts/ffmpeg-scene-change-detector.
The corresponding FFmpeg is as below.
i) ffmpeg -i _path_video -filter:v "select='gt(scene, 0.4)', showinfo" -f null - 2> ffout.log
ii) grep showinfo ffout.log | grep pts_time:[0-9.]* -o | grep [0-9.]* -o > ffout_scene_change_detection.log
"""
def __init__(self, _dsize: tuple = None, _threshold: float = 0.4, _is_cython: bool = True):
"""
:param tuple _dsize: An image size for computation
:param float _threshold: A thershold value to determine wheter the scene change occurs
:param bool _is_cython: A boolean variable to decide whether to use cython
"""
if _dsize is None:
raise ValueError('The argument should be tuple, not None.')
self.dsize = _dsize
self.threshold = _threshold
self.is_cython = _is_cython
self.width = _dsize[0]
self.height = _dsize[1]
self.nb_sad = 3 * self.height * self.width
self.cnt_call = 0
self.mafd_prev = None
self.mafd_curr = None
self.diff_curr = None
self.scence_change_val = None
self.res = None
self.ndarr_frame_curr = None
self.ndarr_frame_ref = None
def get_frame_index(self, _path_video: str) -> list:
"""
:param str _path_video: A path for the given video file
:returns: A list containing the frame index information where the scene change occurs
"""
res = []
vid_src = VideoReaderCV(_path_video=_path_video)
for idx in range(int(vid_src.num_frames)):
ndarr_frame = np.squeeze(vid_src.imread(_num_batch_frames=1, _trans=None, _dsize=self.dsize))
is_scene_change = self.run(_ndarr_frame=ndarr_frame, _be_float32=True)
if is_scene_change is True:
res.append(idx)
return res
def run(self, _ndarr_frame: np.ndarray, _be_float32: bool = True) -> bool:
if _ndarr_frame is None:
raise ValueError('The given ndarr_frame should be assigned.')
if _be_float32 is True:
self.ndarr_frame_curr = _ndarr_frame.astype(np.float32)
else:
self.ndarr_frame_curr = _ndarr_frame
self._get_scene_change_score()
self._detect_scene_change()
return self.res
def _get_scene_change_score(self) -> None:
if self.cnt_call == 0:
pass
elif self.cnt_call == 1:
self._check_dimension()
self._get_mafd()
else:
self._check_dimension()
self._get_mafd()
self._get_diff()
self.scence_change_val = self._calculate_scene_change_value()
self._update()
def _detect_scene_change(self) -> None:
if self.scence_change_val is None:
self.res = None # Pending
else:
if self.threshold <= self.scence_change_val:
self.res = True # Scene change
else:
self.res = False # No scene change
def _check_dimension(self) -> None:
if self.is_cython is True:
scd_ip_.check_dimension(_ndarr_1=self.ndarr_frame_curr, _ndarr_2=self.ndarr_frame_ref)
else:
if self.ndarr_frame_curr.shape != self.ndarr_frame_ref.shape:
raise ValueError('The given both frames should have equal shape.')
def _get_mafd(self) -> None:
if self.is_cython is True:
self.mafd_curr = scd_ip_.mafd(_ndarr_1=self.ndarr_frame_curr, _ndarr_2=self.ndarr_frame_ref, _nb_sad=self.nb_sad)
else:
sad = self._get_sad()
if self.nb_sad == 0:
self.mafd_curr = 0.0
else:
self.mafd_curr = sad / self.nb_sad
def _get_diff(self) -> None:
if self.is_cython is True:
self.diff_curr = scd_ip_.diff(_val_1=self.mafd_curr, _val_2=self.mafd_prev)
else:
self.diff_curr = abs(self.mafd_curr - self.mafd_prev)
def _calculate_scene_change_value(self) -> float:
if self.is_cython is True:
res = scd_ip_.calculate_scene_change_value(_mafd=self.mafd_curr, _diff=self.diff_curr, _min=0.0, _max=1.0)
else:
res = self._clip(_val=min(self.mafd_curr, self.diff_curr) / 100.0, _min=0.0, _max=1.0)
return res
def _get_sad(self) -> np.ndarray:
return np.sum(np.fabs(self.ndarr_frame_curr - self.ndarr_frame_ref))
def _clip(self, _val, _min=0.0, _max=1.0) -> float:
if _val <= _min:
_val = _min
if _max <= _val:
_val = _max
return _val
def _update(self) -> None:
self.ndarr_frame_ref = self.ndarr_frame_curr
self.mafd_prev = self.mafd_curr
self.cnt_call += 1
class SceneChangeDetectorFFmpegBatchProcessing:
"""This class is intended to detect scene change for the given video.
The reference is as follows: https://rusty.today/posts/ffmpeg-scene-change-detector.
The corresponding FFmpeg is as below.
i) ffmpeg -i _path_video -filter:v "select='gt(scene, 0.4)', showinfo" -f null - 2> ffout.log
ii) grep showinfo ffout.log | grep pts_time:[0-9.]* -o | grep [0-9.]* -o > ffout_scene_change_detection.log
"""
def __init__(self, _dsize: tuple = None, _threshold: float = 0.4, _is_gray: bool = True, _unit_computation: int = 1800, _is_cython: bool = True):
"""
:param tuple _dsize: An image size for computation
:param float _threshold: A thershold value to determine wheter the scene change occurs
:param bool _is_gray: A boolean variable to decide whether to be applied on grayscale
:param bool _unit_computation: A computation unit
:param bool _is_cython: A boolean variable to decide whether to use cython
"""
if _dsize is None:
raise ValueError('The argument should be tuple, not None.')
self.dsize = _dsize
self.threshold = _threshold
self.is_gray = _is_gray
self.unit_computation = _unit_computation
self.is_cython = _is_cython
self.width = _dsize[0]
self.height = _dsize[1]
if self.is_gray is True:
self.channel = 1
self.color_code = cv2.COLOR_BGR2GRAY
else:
self.channel = 3
self.color_code = None
self.nb_sad = self.channel * self.height * self.width
if self.nb_sad == 0:
raise ValueError('The self.nb_sad should be positive.')
def get_frame_index(self, _path_video: str) -> list:
"""
:param str _path_video: A path for the given video file
:returns: A list containing the frame index information where the scene change occurs
"""
res = []
vid_src = VideoReaderCV(_path_video=_path_video)
while (vid_src.is_eof is False):
offset = (vid_src.idx_frame_curr - 1)
if offset < 0:
offset = 0
ndarr_frames = vid_src.imread(_num_batch_frames=self.unit_computation, _trans=(0, 3, 1, 2),
_set_idx_frame=(vid_src.idx_frame_curr - 1),
_dsize=(self.width, self.height),
_color_code=self.color_code)
mafd = self._get_mafd(_ndarr_frames=ndarr_frames)
diff = self._get_diff(_mafd=mafd)
scene_change_val = self._calculate_scene_change_value(_mafd=mafd, _diff=diff)
idx_sc = self._get_idx_sc(_scene_change_val=scene_change_val, _threshold=self.threshold, _offset=offset)
res.extend(list(idx_sc))
return res
def _get_mafd(self, _ndarr_frames) -> np.ndarray:
if self.is_cython is True:
res = scd_bp_.mafd(_ndarr_1=_ndarr_frames[1:, :, :, :].astype(np.int16), _ndarr_2=_ndarr_frames[:-1, :, :, :].astype(np.int16), _nb_sad=self.nb_sad)
else:
res = (np.sum(np.abs(_ndarr_frames[1:, :, :, :].astype(np.int16) - _ndarr_frames[:-1, :, :, :].astype(np.int16)), axis=(1, 2, 3))) / self.nb_sad
return res
def _get_diff(self, _mafd) -> np.ndarray:
if self.is_cython is True:
res = scd_bp_.diff(_mafd_1=_mafd[1:], _mafd_2=_mafd[:-1])
else:
res = np.abs(_mafd[1:] - _mafd[:-1])
return res
def _calculate_scene_change_value(self, _mafd, _diff) -> np.ndarray:
if self.is_cython is True:
res = scd_bp_.calculate_scene_change_value(_mafd=_mafd[1:], _diff=_diff, _min=0.0, _max=1.0)
else:
res = np.clip(
|
np.minimum(_mafd[1:], _diff)
|
numpy.minimum
|
# Copyright 2021 Population Health Sciences and Image Analysis, German Center for Neurodegenerative Diseases(DZNE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
sys.path.append('/../../')
sys.path.append('../../../')
import numpy as np
import os
#os.environ['MPLCONFIGDIR'] = "./tmp"
#nilearn-0.7.1
def get_cmap():
import matplotlib.colors
colors=[[0,0,1],[1,0,0]]
cmap = matplotlib.colors.ListedColormap(colors)
return cmap
def plot_coronal_predictions(images_batch=None,pred_batch=None,img_per_row=4):
from skimage import color
import matplotlib.pyplot as plt
import torch
from torchvision import utils
plt.ioff()
DEFAULT_COLORS= [(0,0,255),(255,0,0)] # 1: blue 2 : red
FIGSIZE = 2
FIGDPI = 100
ncols=2
nrows=1
fig, ax = plt.subplots(nrows,ncols)
grid_size=(images_batch.shape[0]/img_per_row,img_per_row)
# adjust layout
fig.set_size_inches([FIGSIZE * ncols * grid_size[1] , FIGSIZE * nrows * grid_size[0]])
fig.set_dpi(FIGDPI)
fig.set_facecolor('black')
fig.set_tight_layout({'pad': 0})
fig.subplots_adjust(wspace=0,hspace=0)
pos=0
images = torch.from_numpy(images_batch.copy())
images = torch.unsqueeze(images,1)
grid = utils.make_grid(images.cpu(), nrow=img_per_row,normalize=True)
#ax[pos].imshow(grid.numpy().transpose(1, 2, 0), cmap='gray',origin='lower')
ax[pos].imshow(np.fliplr(grid.numpy().transpose(1, 2, 0)), cmap='gray', origin='lower')
ax[pos].set_axis_off()
ax[pos].set_aspect('equal')
ax[pos].margins(0, 0)
ax[pos].set_title('T2 input image (1 to N)',color='white')
pos += 1
pred=torch.from_numpy(pred_batch.copy())
pred = torch.unsqueeze(pred, 1)
pred_grid = utils.make_grid(pred.cpu(), nrow=img_per_row)[0] #dont take the channels axis from grid
#pred_grid=color.label2rgb(pred_grid.numpy(),grid.numpy().transpose(1, 2, 0),alpha=0.6,bg_label=0,colors=DEFAULT_COLORS)
pred_grid = color.label2rgb(pred_grid.numpy(), grid.numpy().transpose(1, 2, 0), alpha=0.6, bg_label=0,bg_color=None,
colors=DEFAULT_COLORS)
#ax[pos].imshow(pred_grid,origin='lower')
ax[pos].imshow(np.fliplr(pred_grid), origin='lower')
ax[pos].set_axis_off()
ax[pos].set_aspect('equal')
ax[pos].margins(0, 0)
ax[pos].set_title('Predictions (1 to N). Left OB (blue); Right OB (Red)',color='white')
ax[pos].margins(0, 0)
return fig
def plot_qc_images(save_dir,image,prediction,padd=30):
from scipy import ndimage
from nilearn import plotting
from utils import image_utils
qc_dir=os.path.join(save_dir,'QC')
plotting.plot_roi(bg_img=image, roi_img=prediction,
display_mode='ortho', output_file=os.path.join(qc_dir,'overall_screenshot.png'), draw_cross=False,
cmap=get_cmap())
plane = 'coronal'
mod_image = image_utils.plane_swap(image.get_fdata(), plane)
mod_pred = image_utils.plane_swap(prediction.get_fdata(), plane)
idx = np.where(mod_pred > 0)
idx =
|
np.unique(idx[0])
|
numpy.unique
|
#
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import pandas as pd
from sklearn.utils.validation import check_random_state
from scipy.special import gammainc
def hyperBall(n, d, radius=1.0, center=[], random_state=None):
"""
Generates a sample from a uniform distribution on the hyperball
Parameters
----------
n: int
Number of data points.
d: int
Dimension of the hyperball
radius: float
Radius of the hyperball
center: list, tuple, np.array
Center of the hyperball
random_state: int, np.random.RandomState instance
Random number generator
Returns
-------
data: np.array, (npoints x ndim)
Generated data
"""
random_state_ = check_random_state(random_state)
if center == []:
center = np.array([0] * d)
r = radius
x = random_state_.normal(size=(n, d))
ssq = np.sum(x ** 2, axis=1)
fr = r * gammainc(d / 2, ssq / 2) ** (1 / d) / np.sqrt(ssq)
frtiled = np.tile(fr.reshape(n, 1), (1, d))
p = center + np.multiply(x, frtiled)
return p
def hyperSphere(n, d, random_state=None):
"""
Generates a sample from a uniform distribution on the hypersphere
Parameters
----------
n: int
Number of data points.
d: int
Dimension of the hypersphere
center: list, tuple, np.array
Center of the hypersphere
random_state: int, np.random.RandomState instance
Random number generator
Returns
-------
data: np.array, (npoints x ndim)
Generated data
"""
random_state = check_random_state(random_state)
vec = random_state.randn(n, d)
vec /= np.linalg.norm(vec, axis=1)[:, None]
return vec
def hyperTwinPeaks(n, d=2, height=1.0, random_state=None):
"""
Generates a sample from a plane with protruding peaks. Translated from <NAME>'s R package intrinsicDimension
Parameters
----------
n: int
Number of data points.
d: int
Dimension of the dataset
height: float
Height of the peaks
random_state: int, np.random.RandomState instance
Random number generator
Returns
-------
data: np.array, (npoints x ndim)
Generated data
"""
random_state = check_random_state(random_state)
base_coord = random_state.uniform(size=(n, d))
_height = height * np.prod(np.sin(2 * np.pi * base_coord), axis=1, keepdims=1)
return np.hstack((base_coord, _height))
def lineDiskBall(n, random_state=None):
"""
Generates a sample from a uniform distribution on a line, an oblong disk and an oblong ball
Translated from ldbl function in Hideitsu Hino's package
Parameters
----------
n: int
Number of data points.
random_state: int, np.random.RandomState instance
Random number generator
Returns
-------
data: np.array, (npoints x ndim)
Generated data
"""
random_state = check_random_state(random_state)
line = np.hstack(
(
np.repeat(0, 5 * n)[:, None],
np.repeat(0, 5 * n)[:, None],
random_state.uniform(-0.5, 0, size=5 * n)[:, None],
)
)
disc = np.hstack(
(random_state.uniform(-1, 1, (13 * n, 2)), np.zeros(13 * n)[:, None],)
)
disc = disc[~(np.sqrt(np.sum(disc ** 2, axis=1)) > 1), :]
disc = disc[:, [0, 2, 1]]
disc[:, 2] = disc[:, 2] - min(disc[:, 2]) + max(line[:, 2])
fb = random_state.uniform(-0.5, 0.5, size=(n * 100, 3))
rmID = np.where(np.sqrt(np.sum(fb ** 2, axis=1)) > 0.5)[0]
if len(rmID) > 0:
fb = fb[~(np.sqrt(np.sum(fb ** 2, axis=1)) > 0.5), :]
fb = np.hstack((fb[:, :2], fb[:, [2]] + 0.5))
fb[:, 2] = fb[:, 2] - min(fb[:, 2]) + max(disc[:, 2])
# if _sorted:
# fb = fb[order(fb[:, 2]),:]
line2 = np.hstack(
(
np.repeat(0, 5 * n)[:, None],
np.repeat(0, 5 * n)[:, None],
random_state.uniform(-0.5, 0, size=5 * n)[:, None],
)
)
line2[:, 2] = line2[:, 2] - min(line2[:, 2]) + max(fb[:, 2])
lineID = np.repeat(1, len(line))
discID = np.repeat(2, len(disc))
fbID = np.repeat(3, len(fb))
line2ID = np.repeat(1, len(line2))
x = np.vstack((line, disc, fb, line2))
useID = np.sort(random_state.choice(len(x), n, replace=False))
x = x[useID, :]
return x, np.concatenate((lineID, discID, fbID, line2ID), axis=0)[useID]
def swissRoll3Sph(n_swiss, n_sphere, a=1, b=2, nturn=1.5, h=4, random_state=None):
"""
Generates a sample from a uniform distribution on a Swiss roll-surface,
possibly together with a sample from a uniform distribution on a 3-sphere
inside the Swiss roll. Translated from <NAME>'s R package intrinsicDimension
Parameters
----------
n_swiss: int
Number of data points on the Swiss roll.
n_sphere: int
Number of data points on the 3-sphere.
a: int or float, default=1
Minimal radius of Swiss roll and radius of 3-sphere.
b: int or float, default=2
Maximal radius of Swiss roll.
nturn: int or float, default=1.5
Number of turns of the surface.
h: int or float, default=4
Height of Swiss roll.
Returns
-------
data: np.array, (npoints x ndim)
Generated data
"""
random_state = check_random_state(random_state)
if n_swiss > 0:
omega = 2 * np.pi * nturn
def dl(r):
return np.sqrt(b ** 2 + omega ** 2 * (a + b * r) ** 2)
ok = np.zeros(1)
while sum(ok) < n_swiss:
r_samp = random_state.uniform(size=3 * n_swiss)
ok = random_state.uniform(size=3 * n_swiss) < dl(r_samp) / dl(1)
r_samp = r_samp[ok][:n_swiss]
x = (a + b * r_samp) * np.cos(omega * r_samp)
y = (a + b * r_samp) * np.sin(omega * r_samp)
z = random_state.uniform(-h, h, size=n_swiss)
w = np.zeros(n_swiss)
else:
x = y = z = w = np.array([])
if n_sphere > 0:
sph = hyperSphere(n_sphere, 4, random_state=random_state) * a
x = np.concatenate((x, sph[:, 0]))
y = np.concatenate((y, sph[:, 1]))
z = np.concatenate((z, sph[:, 2]))
w = np.concatenate((w, sph[:, 3]))
return np.hstack((x[:, None], y[:, None], z[:, None], w[:, None]))
class BenchmarkManifolds:
"""
Generates a commonly used benchmark set of synthetic manifolds with known intrinsic dimension described by Hein et al. and Campadelli et al. [Campadelli2015]_
Parameters
----------
noise_type : str, 'uniform' or 'gaussian'
Type of noise to generate
"""
# class modified and adapted from https://github.com/stat-ml/GeoMLE
# Original licence citation:
# MIT License
#
# Copyright (c) 2019 <NAME>, <NAME>, <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def __init__(self, random_state: int = None, noise_type: str = "uniform"):
self.random_state = check_random_state(random_state)
self.noise_type = noise_type
self._dict_truth = {
"M1_Sphere": (10, 11, "10D sphere linearly embedded"),
"M2_Affine_3to5": (3, 5, "Affine space"),
"M3_Nonlinear_4to6": (
4,
6,
"Concentrated figure, mistakable with a 3D one",
),
"M4_Nonlinear": (4, 8, "Nonlinear manifold"),
"M5a_Helix1d": (1, 3, "1D helix"),
"M5b_Helix2d": (2, 3, "2D helix"),
"M6_Nonlinear": (6, 36, "Nonlinear manifold"),
"M7_Roll": (2, 3, "Swiss Roll"),
"M8_Nonlinear": (12, 72, "Nonlinear (highly curved) manifold"),
"M9_Affine": (20, 20, "Affine space"),
"M10a_Cubic": (10, 11, "10D hypercube"),
"M10b_Cubic": (17, 18, "17D hypercube"),
"M10c_Cubic": (24, 25, "24D hypercube"),
"M10d_Cubic": (70, 71, "70D hypercube"),
"M11_Moebius": (2, 3, "Möebius band 10-times twisted"),
"M12_Norm": (20, 20, "Isotropic multivariate Gaussian"),
"M13a_Scurve": (2, 3, "2D S-curve"),
"M13b_Spiral": (1, 13, "1D helix curve"),
"Mbeta": (
10,
40,
"Manifold generated with a smooth nonuniform pdf (see paper for description)",
),
"Mn1_Nonlinear": (
18,
72,
"Nonlinearly embedded manifold of high ID (see paper for description)",
),
"Mn2_Nonlinear": (
24,
96,
"Nonlinearly embedded manifold of high ID (see paper for description)",
),
"Mp1_Paraboloid": (
3,
12,
"3D paraboloid, nonlinearly embedded in (3(3+1))D space, according to a multivariate Burr distribution (alpha=1)",
),
"Mp2_Paraboloid": (
6,
21,
"6D paraboloid, nonlinearly embedded in (3*(6+1))D space, according to a multivariate Burr distribution (alpha=1)",
),
"Mp3_Paraboloid": (
9,
30,
"9D paraboloid, nonlinearly embedded in (3*(9+1))D space, according to a multivariate Burr distribution (alpha=1)",
),
}
self.truth = pd.DataFrame(
self._dict_truth,
index=["Intrinsic Dimension", "Number of variables", "Description"],
).T
self.dict_gen = {
# synthetic data
"M1_Sphere": self._gen_sphere_data,
"M2_Affine_3to5": self._gen_affine3_5_data,
"M3_Nonlinear_4to6": self._gen_nonlinear4_6_data,
"M4_Nonlinear": self._gen_nonlinear_data,
"M5a_Helix1d": self._gen_helix1_data,
"M5b_Helix2d": self._gen_helix2_data,
"M6_Nonlinear": self._gen_nonlinear_data,
"M7_Roll": self._gen_roll_data,
"M8_Nonlinear": self._gen_nonlinear_data,
"M9_Affine": self._gen_affine_data,
"M10a_Cubic": self._gen_cubic_data,
"M10b_Cubic": self._gen_cubic_data,
"M10c_Cubic": self._gen_cubic_data,
"M10d_Cubic": self._gen_cubic_data,
"M11_Moebius": self._gen_moebius_data,
"M12_Norm": self._gen_norm_data,
"M13a_Scurve": self._gen_scurve_data,
"M13b_Spiral": self._gen_spiral_data,
"Mbeta": self._gen_campadelli_beta_data,
"Mn1_Nonlinear": self._gen_campadelli_n_data,
"Mn2_Nonlinear": self._gen_campadelli_n_data,
"Mp1_Paraboloid": self._gen_paraboloid_data,
"Mp2_Paraboloid": self._gen_paraboloid_data,
"Mp3_Paraboloid": self._gen_paraboloid_data,
}
def generate(
self,
name: str = "all",
n: int = 2500,
dim: int = None,
d: int = None,
noise: float = 0.0,
):
"""
Generates all datasets. A ground truth dict of intrinsic dimension and embedding dimension is in BenchmarkManifolds.dict_truth.keys()
Parameters
----------
n: int
The number of sample points
dim: int
If generating a single dataset, choose the embedding dimension. Note that some datasets have restrictions on the chosen embedding dimension
d: int
If generating a single dataset, choose the intrinsic dimension. Note that some datasets have restrictions on the chosen intrinsic dimension
noise: float, optional(default=0.0)
The value of noise in data
Returns
-------
data: a dict of np.arrays or a single np.array with shape (n, dim)
Generated data
"""
if self.noise_type == "normal":
self.gen_noise = (
lambda n, dim, noise: (self.random_state.randn(n, dim)) * noise
)
if self.noise_type == "uniform":
self.gen_noise = (
lambda n, dim, noise: (self.random_state.rand(n, dim) - 0.5) * noise
)
dict_data = {}
if name == "all":
for k, (d, dim, _) in self._dict_truth.items():
data = self.dict_gen[k](n=n, dim=dim, d=d)
dict_data[k] = data + self.gen_noise(n, dim, noise)
return dict_data
elif name in self._dict_truth.keys():
if dim is None and d is None:
data = self.dict_gen[name](n=n)
elif dim is None:
data = self.dict_gen[name](n=n, d=d)
elif d is None:
data = self.dict_gen[name](n=n, dim=dim)
data = self.dict_gen[name](n=n, dim=dim, d=d)
return data + self.gen_noise(n, dim, noise)
def _gen_spiral_data(self, n, dim=3, d=1):
assert d < dim
assert d == 1
assert dim >= 3
t = 10 * np.pi * self.random_state.rand(n)
data = np.vstack(
[100 * np.cos(t), 100 * np.sin(t), t, np.zeros((dim - 3, n))]
).T
assert data.shape == (n, dim)
return data
def _gen_helix1_data(self, n, dim=3, d=1):
assert d < dim
assert d == 1
assert dim >= 3
t = 2 * np.pi / n + self.random_state.rand(n) * 2 * np.pi
data = np.vstack(
[
(2 + np.cos(8 * t)) * np.cos(t),
(2 + np.cos(8 * t)) * np.sin(t),
np.sin(8 * t),
np.zeros((dim - 3, n)),
]
).T
assert data.shape == (n, dim)
return data
def _gen_helix2_data(self, n, dim=3, d=2):
assert d < dim
assert d == 2
assert dim >= 3
r = 10 * np.pi * self.random_state.rand(n)
p = 10 * np.pi * self.random_state.rand(n)
data = np.vstack(
[r * np.cos(p), r * np.sin(p), 0.5 * p, np.zeros((dim - 3, n))]
).T
assert data.shape == (n, dim)
return data
def _gen_helicoid_data(self, n, dim=3, d=2):
assert d <= dim
assert d == 2
assert dim >= 3
u = 2 * np.pi / n + self.random_state.rand(n) * 2 * np.pi
v = 5 * np.pi * self.random_state.rand(n)
data = np.vstack(
[np.cos(v), np.sin(v) * np.cos(v), u, np.zeros((dim - 3, n))]
).T
assert data.shape == (n, dim)
return data
def _gen_roll_data(self, n, dim=3, d=2):
assert d < dim
assert dim >= 3
assert d == 2
t = 1.5 * np.pi * (1 + 2 * self.random_state.rand(n))
p = 21 * self.random_state.rand(n)
data = np.vstack(
[t * np.cos(t), p, t * np.sin(t), np.zeros((dim - d - 1, n))]
).T
assert data.shape == (n, dim)
return data
def _gen_scurve_data(self, n, dim=3, d=2):
assert d < dim
assert dim >= 3
assert d == 2
t = 3 * np.pi * (self.random_state.rand(n) - 0.5)
p = 2.0 * self.random_state.rand(n)
data = np.vstack(
[np.sin(t), p, np.sign(t) * (np.cos(t) - 1), np.zeros((dim - d - 1, n)),]
).T
assert data.shape == (n, dim)
return data
def _gen_sphere_data(self, n, dim, d):
assert d < dim
V = self.random_state.randn(n, d + 1)
data = np.hstack(
[V / np.sqrt((V ** 2).sum(axis=1))[:, None], np.zeros((n, dim - d - 1)),]
)
assert data.shape == (n, dim)
return data
def _gen_norm_data(self, n, dim, d):
assert d <= dim
norm_xyz = self.random_state.multivariate_normal(np.zeros(d), np.identity(d), n)
data = np.hstack([norm_xyz, np.zeros((n, dim - d))])
assert data.shape == (n, dim)
return data
def _gen_uniform_data(self, n, dim, d):
assert d <= dim
uniform_xyz = self.random_state.uniform(size=(n, d))
data = np.hstack([uniform_xyz, np.zeros((n, dim - d))])
assert data.shape == (n, dim)
return data
def _gen_cubic_data(self, n, dim, d):
assert d < dim
cubic_data = np.array([[]] * (d + 1))
for i in range(d + 1):
n_once = int(n / (2 * (d + 1)) + 1)
# 1st side
data_once = self.random_state.rand(d + 1, n_once)
data_once[i] = 0
cubic_data = np.hstack([cubic_data, data_once])
# 2nd side
data_once = self.random_state.rand(d + 1, n_once)
data_once[i] = 1
cubic_data = np.hstack([cubic_data, data_once])
cubic_data = cubic_data.T[:n]
data = np.hstack([cubic_data, np.zeros((n, dim - d - 1))])
assert data.shape == (n, dim)
return data
def _gen_moebius_data(self, n, dim=3, d=2):
assert dim == 3
assert d == 2
phi = self.random_state.rand(n) * 2 * np.pi
rad = self.random_state.rand(n) * 2 - 1
data = np.vstack(
[
(1 + 0.5 * rad * np.cos(5.0 * phi)) * np.cos(phi),
(1 + 0.5 * rad * np.cos(5.0 * phi)) * np.sin(phi),
0.5 * rad * np.sin(5.0 * phi),
]
).T
assert data.shape == (n, dim)
return data
def _gen_affine_data(self, n, dim, d):
assert dim >= d
p = self.random_state.rand(d, n) * 5 - 2.5
v = np.eye(dim, d)
# v = np.random.randint(0, 10, (dim, d))
data = v.dot(p).T
assert data.shape == (n, dim)
return data
def _gen_affine3_5_data(self, n, dim=5, d=3):
assert dim == 5
assert d == 3
p = 4 * self.random_state.rand(d, n)
A = np.array(
[
[1.2, -0.5, 0],
[0.5, 0.9, 0],
[-0.5, -0.2, 1],
[0.4, -0.9, -0.1],
[1.1, -0.3, 0],
]
)
b = np.array([[3, -1, 0, 0, 8]]).T
data = A.dot(p) + b
data = data.T
assert data.shape == (n, dim)
return data
def _gen_nonlinear4_6_data(self, n, dim=6, d=4):
assert dim == 6
assert d == 4
p0, p1, p2, p3 = self.random_state.rand(d, n)
data = np.vstack(
[
p1 ** 2 * np.cos(2 * np.pi * p0),
p2 ** 2 * np.sin(2 * np.pi * p0),
p1 + p2 + (p1 - p3) ** 2,
p1 - 2 * p2 + (p0 - p3) ** 2,
-p1 - 2 * p2 + (p2 - p3) ** 2,
p0 ** 2 - p1 ** 2 + p2 ** 2 - p3 ** 2,
]
).T
assert data.shape == (n, dim)
return data
def _gen_nonlinear_data(self, n, dim, d):
assert dim >= d
m = int(dim / (2 * d))
assert dim == 2 * m * d
p = self.random_state.rand(d, n)
F = np.zeros((2 * d, n))
F[0::2, :] = np.cos(2 * np.pi * p)
F[1::2, :] = np.sin(2 * np.pi * p)
R = np.zeros((2 * d, n))
R[0::2, :] = np.vstack([p[1:], p[0]])
R[1::2, :] = np.vstack([p[1:], p[0]])
D = (R * F).T
data =
|
np.hstack([D] * m)
|
numpy.hstack
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `policy_gradients.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax.tree_util import tree_map
import numpy as np
from rlax._src import policy_gradients
class DpgLossTest(parameterized.TestCase):
def setUp(self):
super(DpgLossTest, self).setUp()
self.s_t = np.array([[0, 1, 0], [1, 1, 2]], dtype=np.float32) # [B, T]
self.w_s = np.ones([3, 2], dtype=np.float32)
self.b_s = np.zeros([2], dtype=np.float32)
self.w = np.ones([2, 1], dtype=np.float32)
self.b = np.zeros([1], dtype=np.float32)
self.expected = np.array([0.5, 0.5], dtype=np.float32)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_dpg_loss(self, compile_fn, place_fn):
"""Tests for a single element."""
# Optionally compile.
dpg = compile_fn(policy_gradients.dpg_loss)
# Actor and critic function approximators.
actor = lambda s_t: jnp.matmul(s_t, self.w_s) + self.b_s
critic = lambda a_t: jnp.squeeze(jnp.matmul(a_t, self.w) + self.b)
# For each element in the batch.
for s_t, expected in zip(self.s_t, self.expected):
# Compute loss.
a_t = actor(s_t)
dqda = jax.grad(critic)(a_t)
# Optionally convert to device array.
a_t, dqda = tree_map(place_fn, (a_t, dqda))
# Test outputs.
actual = np.sum(dpg(a_t, dqda, dqda_clipping=1.))
np.testing.assert_allclose(actual, expected, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_dpg_loss_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
# Vmap and optionally compile.
dpg = compile_fn(jax.vmap(policy_gradients.dpg_loss, in_axes=(0, 0, None)))
# Actor and critic function approximators.
actor = lambda s_t: jnp.matmul(s_t, self.w_s) + self.b_s
critic = lambda a_t: jnp.squeeze(jnp.matmul(a_t, self.w) + self.b)
# Compute loss.
a_t = actor(self.s_t)
dqda = jax.vmap(jax.grad(critic))(a_t)
# Optionally convert to device array.
a_t, dqda = tree_map(place_fn, (a_t, dqda))
# Test outputs.
actual = np.sum(dpg(a_t, dqda, 1.), axis=1)
|
np.testing.assert_allclose(actual, self.expected, atol=1e-4)
|
numpy.testing.assert_allclose
|
# -*- coding: utf-8 -*-
__author__ = "<NAME>", "<NAME>"
__date__ = "08 Mar 2016"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
#import xrt.backends.raycing.apertures as ra
import xrt.backends.raycing.oes as roe
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.raycing.screens as rsc
showIn3D = False
mGold = rm.Material('Au', rho=19.3)
E0 = 9000.
L = 1200.
W = 10.
gap = 0.2
pitchVFM = 4e-3
pVFM = 20000.
qVFM = 400000000.
pitchHFM = 4e-3
pHFM = 20000.
qHFM = 400000000.
p = pVFM
q = 2000
#Select a case:
#case = 'parabolic'
case = 'elliptical'
def build_beamline(nrays=raycing.nrays):
beamLine = raycing.BeamLine(height=0)
rs.GeometricSource(
beamLine, 'GeometricSource', (0, 0, 0),
nrays=nrays, dx=0., dz=0., dxprime=2e-4, dzprime=1e-4,
distE='lines', energies=(E0,), polarization='horizontal')
if case == 'parabolic':
mirrorVFM = roe.BentFlatMirror
mirrorHFM = roe.BentFlatMirror
RVFM = 2 * p /
|
np.sin(pitchVFM)
|
numpy.sin
|
#coverage:ignore
""" Determine costs for sparse decomposition in QC
Note this is WIP
"""
from typing import Tuple
import numpy as np
from numpy.lib.scimath import arccos, arcsin # has analytc continuation to cplx
from openfermion.resource_estimates.utils import QI, power_two
def cost_sparse(n: int, lam: float, d: int, dE: float, chi: int,
stps: int) -> Tuple[int, int, int]:
""" Determine fault-tolerant costs using sparse decomposition in quantum
chemistry
Args:
n (int) - the number of spin-orbitals
lam (float) - the lambda-value for the Hamiltonian
dE (float) - allowable error in phase estimation
L (int) - the rank of the first decomposition
Lxi (int) - the total number of eigenvectors
chi (int) - equivalent to aleph_1 and aleph_2 in the document, the
number of bits for the representation of the coefficients
beta (int) - equivalent to beth in the document, the number of bits
for the rotations
stps (int) - an approximate number of steps to choose the precision
of single qubit rotations in preparation of the equal superposition
state
Returns:
step_cost (int) - Toffolis per step
total_cost (int) - Total number of Toffolis
ancilla_cost (int) - Total ancilla cost
"""
# I think there is a bug in the mathematica notebook. It does not check if
# 2 is a factor first, which it should, cf. the similar function in
# costingdf.nb Below is correct using the power_two() function, to give
# power of 2 that is a factor of d.
eta = power_two(d)
nN = np.ceil(np.log2(n // 2))
m = chi + 8 * nN + 4 # Eq (A13)
oh = [0] * 20
nM = (np.ceil(np.log2(d)) - eta) / 2
for p in range(2, 22):
# JJG note: arccos arg may be > 1
v = np.round(np.power(2,p+1) / (2 * np.pi) * arccos(
|
np.power(2,nM)
|
numpy.power
|
#
# Copyright (c) 2019 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: LicenseRef-BSD-5-Clause-Nordic
import time
import pyaudio
import numpy as np
import colorsys
import wave
import threading
import queue
from modules.led_stream import MS_PER_SEC
from modules.led_stream import Step
from modules.led_stream import validate_params
from modules.led_stream import fetch_free_steps_buffer_info, led_send_single_step
class MusicLedStream():
def __init__(self, dev, DEVICE_CONFIG, led_id, freq, filename):
if not validate_params(DEVICE_CONFIG, freq, led_id):
raise ValueError("Invalid music LED stream parameters")
success, (ready, free) = fetch_free_steps_buffer_info(dev, led_id,
DEVICE_CONFIG['stream_led_cnt'])
if not success:
raise Exception("Device communication problem occurred")
if not ready:
raise Exception("LEDs are not ready")
try:
self.file = wave.open(filename, 'rb')
except (wave.Error, FileNotFoundError) as e:
raise e
self.dev_params = {
'max_free' : free,
'dev' : dev,
'stream_led_cnt' : DEVICE_CONFIG['stream_led_cnt'],
'led_id' : led_id,
}
self.send_error_event = threading.Event()
self.queue = queue.Queue()
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.p.get_format_from_width(self.file.getsampwidth()),
channels=self.file.getnchannels(),
rate=self.file.getframerate(),
output=True,
input=False,
stream_callback=self.music_callback,
frames_per_buffer=self.file.getframerate() // freq,
start = False)
self.led_effects = {
'BASE_DURATION' : 1 / freq,
'SUBSTEP_CNT' : 10,
'sent_cnt' : 0,
'reminder' : 0,
'duration_increment' : 0,
'start_time' : None,
'out_latency' : self.stream.get_output_latency(),
}
@staticmethod
def peak_to_hue(peak):
assert peak >= 0
assert peak <= 1
G = 120
B = 240
HUE_MAX = 360
hue = (B - peak * (B - G))
hue /= HUE_MAX
return hue
@staticmethod
def gen_led_color(data):
# Cast to prevent overflow
peak = min(1, np.abs(np.int32(np.max(data)) - np.min(data)) /
|
np.iinfo(np.int16)
|
numpy.iinfo
|
""" .. _Segments-api:
**Segments** --- Manages groups of line segments.
-------------------------------------------------
This module defines the Segments class.
"""
# system imports
import numpy as np
import copy
# ADMIT imports
from admit.util.AdmitLogging import AdmitLogging as logging
class Segments(object):
""" Class to hold segments and convert them between different types.
Segments are defined by a beginning and ending channel (both
inclusive). ADMIT gives special meaning to a segment, for example a
line can be found within that segment, or a continuum has to be fitted
in that segment (or group of segments).
Parameters
----------
st : array like
Array like object containing either the full segment list (array of
two element arrays containing the start and end channel numbers for
each segment), or an array of starting channel numbers.
Default: None.
en : array like
An array of the ending channel number corresponding to the starting
channel numbers given in st. Leave as None if st contains the full
listing.
Default: None.
nchan : int
The number of channels in the spectrum that the segments refer to.
This is used to construct the bit mask for tracking all segments.
If left as None, no bitmask will be made.
Default: None.
startchan : int
The starting channel number of the spectrum that the segments refer
to. Must be >= 0.
Default: 0.
"""
def __init__(self, st=None, en=None, nchan=None, startchan=0):
# initialize everything
self._segments = []
self._nchan = nchan
# error chack the starting channel number
self._startchan = int(startchan)
if self._startchan < 0:
raise Exception("Start channel must be 0 or greater.")
# if nchan was specified create the bitmask
if nchan:
self._chans = np.array([0] * nchan)
# determine the maxchan
self._maxchan = nchan - 1 + self._startchan
else:
self._chans = None
self._maxchan = 0
if st is None:
return
if type(st) != type(en) and en is not None:
raise Exception("Channel start and end points must be the same type.")
# build the list of segments
# if en is not given
peak = 0
if en is None:
# st must be array like
if not hasattr(st, '__iter__'):
raise Exception("A list must be given for parameter st.")
for seg in st:
# each one must have length of 2
if len(seg) != 2:
raise Exception("Each segment must have a size of 2.")
# make sure they are in the right order
tempseg = [int(seg[0]), int(seg[1])]
tempseg.sort()
peak = max(peak, tempseg[1])
self._segments.append(tempseg)
else:
# if both en and st are given and are ints
if not hasattr(st, '__iter__'):
tempseg = [int(st), int(en)]
tempseg.sort()
peak = max(peak, tempseg[1])
self._segments.append(tempseg)
else:
# if both en ans st are given and both array like
# create iterators
stit = iter(st)
enit = iter(en)
if len(st) != len(en):
logging.warning("Starting and ending channel ranges do not have the same length, truncating the longer.")
# iterate through the lists
while True:
try:
tempseg = [int(stit.next()), int(enit.next())]
tempseg.sort()
peak = max(peak, tempseg[1])
self._segments.append(tempseg)
except StopIteration:
break
if self._chans is None:
self._chans = np.array([0] * (peak + 1))
# determine the maxchan
self._maxchan = peak + self._startchan
# build the bit mask
for seg in self._segments:
if seg[1] > self._maxchan or seg[0] < self._startchan:
raise Exception("All or part of a segment is beyond the given spectrum. Segment: %s, bounds: [%i, %i]" %
(seg, self._startchan, self._maxchan))
self._chans[seg[0] - self._startchan: seg[1] - self._startchan + 1] = 1
def __len__(self):
""" Returns the number of segments
Parameters
----------
None
Returns
-------
int containing the number of segments in the class
"""
return len(self._segments)
def __iter__(self):
""" Rurns an iterator to the list of segments
Parameters
----------
None
Returns
-------
Iterator to the list of segments
"""
return iter(self._segments)
def __getitem__(self, index):
""" Returns the segment at the given index
Parameters
----------
index : int
The index of the segment to return
Returns
-------
Two element list (segment) of the starting and ending channel numbers
"""
if index >= len(self._segments):
raise Exception("Index %i is beyond the range of indices (%i)" % (index, len(self._segments)))
return self._segments[index]
def __add__(self, other):
""" Method to add two Segments classes together, without merging he semgents. The bitmask
is recalculated.
Parameters
----------
other : Segments class instance or array like
If a Segments instance is given then the internal segments are added to the current
segment list. If an array is given then the items of the array are added to the
current segment list.
Returns
-------
The instance of this class with the new segments incorporated
"""
new = copy.deepcopy(self)
if type(other) == type(new):
if new._startchan != other._startchan:
raise Exception("Starting channels do not match.")
if len(new._chans) != len(other._chans):
raise Exception("Number of channels do not match.")
for seg in other:
new._segments.append(seg)
new.recalcmask()
elif hasattr(other, "__iter__"):
for seg in other:
tempseg = [int(seg[0]), int(seg[1])]
if tempseg[1] > new._maxchan or tempseg[0] < new._startchan:
raise Exception("All or part of a segment is beyond the given spectrum. Segment: %s, bounds: [%i, %i]" %
(seg, self._startchan, self._maxchan))
new._segments.append(tempseg)
new.recalcmask()
return new
def __setitem__(self, index, item):
""" Method to set the segment at index to a new value
Parameters
----------
index : int
The location in the segment array to replace
item : two element array
The new segment to replace the indicated one with
Returns
-------
None
"""
if not hasattr(item, "__iter__"):
raise Exception("Segments must be ginven as an iteratable object (list, np.array, etc.")
if len(item) != 2:
raise Exception("Segments must have length 2.")
tempseg = [int(item[0]), int(item[1])]
tempseg.sort()
if tempseg[1] > self._maxchan or tempseg[0] < self._startchan:
raise Exception("All or part of a segment is beyond the given spectrum. Segment: %s, bounds: [%i, %i]" %
(tempseg, self._startchan, self._maxchan))
self._segments[index] = tempseg
self._chans[tempseg[0] - self._startchan: tempseg[1] - self._startchan + 1] = 1
def __contains__(self, chan):
""" Method to determine if a given channel is in a segment. This requires the bit mask to
be available
Parameters
----------
chan : int
The channel number to test
Returns
-------
bool, True if the channel is in a segment, False otherwise
"""
if self._chans is None:
raise Exception("No bitmask has been built, call setnchan to build it.")
return bool(self._chans[chan - self._startchan])
def append(self, item):
""" Method to append a new segment to the current list
Parameters
----------
item : two element array
The new segment to append to the list
Returns
-------
None
"""
if not hasattr(item, '__iter__'):
raise Exception("Segments must be ginven as an iteratable object (list, np.array, etc.")
else:
if len(item) != 2:
raise Exception("Segments must have length 2.")
tempseg = [int(item[0]), int(item[1])]
tempseg.sort()
if tempseg[0] < self._startchan or tempseg[1] > self._maxchan:
raise Exception("All or part of a segment is beyond the given spectrum. Segment: %s, bounds: [%i, %i]" %
(tempseg, self._startchan, self._maxchan))
self._segments.append(tempseg)
self._chans[tempseg[0] - self._startchan: tempseg[1] - self._startchan + 1] = 1
def getmask(self):
""" Method to return the current bitmask
Parameters
----------
None
Returns
-------
Array like object containing the bit current bit mask, 1 = in segment, 0 = not in segment
"""
return self._chans
def getchannels(self, invert=False):
""" Method to return the current list of channel numbers in the bitmask
Parameters
----------
invert : boolean
Return the list of channels outside the bitmask instead
Returns
-------
Array like object containing the (zero based) channel numbers that are in the segments
"""
if invert:
return (np.where(self._chans == 0)[0] + self._startchan).tolist()
else:
return (np.where(self._chans == 1)[0] + self._startchan).tolist()
def remove(self, index):
""" Method to remove a segment from the segment list
Parameters
----------
index : int
The location of the segment to remove
Returns
-------
None
"""
del self._segments[index]
self.recalcmask()
def pop(self):
""" Method to pop, or remove and return, the last segment in the list
Parameters
----------
None
Returns
-------
Array like 2 element list of the segment starting and ending channels of the last
segment in the list
"""
seg = self._segments.pop()
self.recalcmask()
return seg
def limits(self):
""" Method to return the channel range of the internal channel bit mask
Parameters
----------
None
Returns
-------
Array like 2 element list of the segment starting and ending channels
"""
return [self._startchan, self._maxchan]
def recalcmask(self, test=False):
""" Method to recalculate the bit mask based on the current segment list. 1 = in segment
0 = not in segment
Parameters
----------
test : bool
If True then test each segment to be sure it is in the current allowed channel range.
If False the do not test.
Returns
-------
None
"""
self._chans = np.array([0] * (self._maxchan + 1 - self._startchan))
for seg in self._segments:
if test and (seg[0] < self._startchan or seg[1] >= self._maxchan):
raise Exception("All or part of a segment is beyond the given spectrum. Segment: %s, bounds: [%i, %i]" %
(seg, self._startchan, self._maxchan))
self._chans[seg[0] - self._startchan: seg[1] - self._startchan + 1] = 1
def setnchan(self, nchan):
""" Method to set the number of channels in the internal channel bit mask
Parameters
----------
nchan : int
The number of channels in the bit mask
Returns
-------
None
"""
if nchan - 1 == self._nchan:
return
self._nchan = nchan
self._maxchan = self._startchan + nchan - 1
self.recalcmask(True)
def getnchan(self):
""" Method to return the number of channels in the current bit mask
Parameters
----------
None
Returns
-------
int giving the number of channels
"""
return self._maxchan - self._startchan + 1
def setstartchan(self, chan):
""" Method to set the starting channels number for the internal bit mask
Parameters
----------
chan : int
The starting channel number
Returns
-------
None
"""
if chan == self._startchan:
return
self._startchan = chan
self._maxchan = self._startchan + self._nchan - 1
self.recalcmask(True)
def getstartchan(self):
""" Method to get the starting channel number of the current bit mask
Parameters
----------
None
Returns
-------
int containing the starting channel number
"""
return self._startchan
def chans(self, invert=False):
""" Method to convert the bit mask into a string of channel ranges in CASA format. e.g.
[3,10],[25,50] => "3~10;25~50"
Parameters
----------
None
Returns
-------
string containing the formatted channel ranges
"""
output = ""
if invert:
basechan = np.append(1-self._chans, 0)
shiftchan =
|
np.insert(1-self._chans, 0, 0)
|
numpy.insert
|
import pandas as pd
import numpy as np
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from collections import defaultdict
import warnings
warnings.filterwarnings("ignore")
def get_sentence_vectors(df):
"""
A method to get convert sentences into bag of words vectors
:param df: Dataframe
sentences, sense, and labels
:return: Array
"""
df["sentence"] = df.apply(lambda row: remove_mask(row["sentence"], row["form"]), axis=1)
df = df[df["sentence"]!=0]
all_data = df[["sentence", "form"]].values
sentences, Y = all_data[:, 0], np.array(all_data[:, 1], dtype='int64')
vectorizor = CountVectorizer(min_df=0.01, ngram_range=(1, 2))
X_fitted = vectorizor.fit_transform(sentences)
X = X_fitted.toarray()
return X, Y
def get_single_feature_vectors(df, feature):
df["context"] = df.apply(lambda row: find_context(row['sentence'], row['form']), axis=1)
df[feature] = df.apply(lambda row: feature_calc(row["context"], row["word"], feature), axis=1)
df = df[df[feature] != 0]
all_data = df[[feature, "form"]].values
X, Y = all_data[:, 0],
|
np.array(all_data[:, 1], dtype='int64')
|
numpy.array
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import torch
from MOC_utils.model import create_inference_model, load_inference_model, convert2flow
from MOC_utils.data_parallel import DataParallel
from .decode import moc_decode
from MOC_utils.utils import flip_tensor
class MOCDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
self.rgb_model_backbone, self.rgb_model_branch = None, None
self.flow_model_backbone, self.flow_model_branch = None, None
self.num_classes = opt.num_classes
self.opt = opt
def load_backbone(self):
opt = self.opt
if opt.rgb_model != '':
print('create rgb model')
self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(opt.arch, opt.branch_info, opt.head_conv, opt.K, flip_test=opt.flip_test)
self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
self.rgb_model_backbone = DataParallel(
self.rgb_model_backbone, device_ids=opt.gpus,
chunk_sizes=opt.chunk_sizes).to(opt.device)
self.rgb_model_backbone.eval()
if opt.flow_model != '':
print('create flow model')
self.flow_model_backbone, self.flow_model_branch = create_inference_model(opt.arch, opt.branch_info, opt.head_conv, opt.K, flip_test=opt.flip_test)
self.flow_model_backbone = convert2flow(opt.ninput, self.flow_model_backbone)
self.flow_model_backbone, self.flow_model_branch = load_inference_model(self.flow_model_backbone, self.flow_model_branch, opt.flow_model)
self.flow_model_backbone = DataParallel(
self.flow_model_backbone, device_ids=opt.gpus,
chunk_sizes=opt.chunk_sizes).to(opt.device)
self.flow_model_backbone.eval()
def load_branch(self):
opt = self.opt
if opt.rgb_model != '':
print('create rgb model')
self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(opt.arch, opt.branch_info, opt.head_conv, opt.K, flip_test=opt.flip_test)
self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
self.rgb_model_branch = DataParallel(
self.rgb_model_branch, device_ids=opt.gpus,
chunk_sizes=opt.chunk_sizes).to(opt.device)
self.rgb_model_branch.eval()
if opt.flow_model != '':
print('create flow model')
self.flow_model_backbone, self.flow_model_branch = create_inference_model(opt.arch, opt.branch_info, opt.head_conv, opt.K, flip_test=opt.flip_test)
self.flow_model_backbone = convert2flow(opt.ninput, self.flow_model_backbone)
self.flow_model_backbone, self.flow_model_branch = load_inference_model(self.flow_model_backbone, self.flow_model_branch, opt.flow_model)
self.flow_model_branch = DataParallel(
self.flow_model_branch, device_ids=opt.gpus,
chunk_sizes=opt.chunk_sizes).to(opt.device)
self.flow_model_branch.eval()
def pre_process(self, images, is_flow=False, ninput=1):
images = [cv2.resize(im, (self.opt.resize_height, self.opt.resize_width), interpolation=cv2.INTER_LINEAR) for im in images]
if self.opt.flip_test:
data = [np.empty((3 * ninput, self.opt.resize_height, self.opt.resize_width), dtype=np.float32) for i in range(2)]
else:
data = [np.empty((3 * ninput, self.opt.resize_height, self.opt.resize_width), dtype=np.float32)]
mean = np.tile(
|
np.array(self.opt.mean, dtype=np.float32)
|
numpy.array
|
import warnings
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
#warnings.filterwarnings("ignore")
if rank!=0: warnings.filterwarnings("ignore")
import numpy
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as interpolate
from scipy.interpolate import interp1d
from cosmo4d.lab import (UseComplexSpaceOptimizer,
NBodyModel, LPTModel, ZAModel,
LBFGS, ParticleMesh)
#from cosmo4d.lab import mapbias as map
from cosmo4d import lab
from cosmo4d.lab import report, dg, objectives, mapnoise
from abopt.algs.lbfgs import scalar as scalar_diag
from nbodykit.cosmology import Planck15, EHPower, Cosmology
from nbodykit.algorithms.fof import FOF
from nbodykit.lab import FFTPower, BigFileMesh, FieldMesh, BigFileCatalog, ArrayCatalog
import sys, os, json, yaml
from solve import solve
from getbiasparams import getbias, eval_bfit
sys.path.append('../')
sys.path.append('../utils/')
import HImodels
#initiatea
klin, plin = numpy.loadtxt('../../data/pklin_1.0000.txt', unpack = True)
ipk = interpolate(klin, plin)
#cosmo = Planck15.clone(Omega_cdm = 0.2685, h = 0.6711, Omega_b = 0.049)
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
cosmo = Cosmology.from_dict(cosmodef)
#########################################
#Set parameters here
##
cfname = sys.argv[1]
with open(cfname, 'r') as ymlfile: cfg = yaml.load(ymlfile)
for i in cfg['basep'].keys(): locals()[i] = cfg['basep'][i]
kmin, angle = cfg['mods']['kmin'], cfg['mods']['angle']
h1model = HImodels.ModelA(aa)
zz = 1/aa-1
if angle is None:
angle = numpy.round(mapnoise.wedge(zz, att=cfg['mods']['wopt'], angle=True), 0)
if rank == 0:
print(angle)
try: spread
except : spread = 1.
truth_pm = ParticleMesh(BoxSize=bs, Nmesh=(nc, nc, nc), dtype='f8')
comm = truth_pm.comm
rank = comm.rank
nc2 = nc*2
if numd <= 0: num = -1
else: num = int(bs**3 * numd)
if rank == 0: print('Number of objects : ', num)
objfunc = getattr(objectives, cfg['mods']['objective'])
map = getattr(lab, cfg['mods']['map'])
#
proj = '/project/projectdirs/m3058/chmodi/m3127/'
#proj = '/project/projectdirs/cosmosim/lbl/chmodi/cosmo4d/'
if ray: dfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100/'%(nsteps, B, bs, nc)
else: dfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100-fixed/'%(nsteps, B, bs, nc)
ofolder = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2/fastpm_%0.4f/wedge_kmin%.2f_%s/L%04d-N%04d/'%(aa, kmin, cfg['mods']['wopt'], bs, nc)
if ray: ofolder = ofolder[:-1]+'-R/'
if stage2 is not None:
ofolder += 'thermal-%s/'%stage2
if hex: ofolder = ofolder[:-1] + '-hex/'
if spread != 1: ofolder = ofolder[:-1] + '-sp%.1f/'%spread
if hirax :
ofolder = ofolder[:-1] + '-hirax/'
Ndish = 32
else: Ndish = 256
#Dynamics config
if pmdisp:
ofolder += 'T%02d-B%01d/'%(nsteps, B)
else: ofolder += 'ZA/'
if lsstwt: prefix += '_ln%04d'%(lsstnumd*1e4)
if rsdpos: prefix += "_rsdpos"
initseed = 777
fname = 's%d_h1massD%s'%(initseed, "_"+prefix)
optfolder = ofolder + 'opt_%s/'%fname
#ofolder = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsst/fastpm_0.2000/wedge_kmin0.03_pess/L1024-N0256-R/'
if truth_pm.comm.rank == 0: print('Output Folder is %s'%optfolder)
###
for folder in [ofolder, optfolder]:
try: os.makedirs(folder)
except:pass
####################################
new_pm = ParticleMesh(BoxSize=truth_pm.BoxSize, Nmesh=truth_pm.Nmesh*2, dtype='f8')
lsstnum = int(lsstnumd * bs**3)
if rank == 0: print("Lsst number of obj : ", lsstnum)
#####
#Data
if rsdpos :
pp = proj + '/HV10240-R/fastpm_%0.4f/Header/attr-v2'%aa
with open(pp) as ff:
for line in ff.readlines():
if 'RSDFactor' in line: rsdfaccat = float(line.split()[-2])
else: rsdfaccat = 0.
rsdfac = rsdfaccat * 100./aa ##Add hoc factor due to incorrect velocity dimensions in nbody.py
if rank == 0: print('RSD factor for catalog is : ', rsdfaccat)
if rank == 0: print('RSD factor is : ', rsdfac)
noise = None
if rank == 0 : print('Noise : ', noise)
stages = numpy.linspace(0.01, aa, nsteps, endpoint=True)
if pmdisp: dynamic_model = NBodyModel(cosmo, new_pm, B=B, steps=stages)
else: dynamic_model = ZAModel(cosmo, new_pm, B=B, steps=stages)
if rank == 0: print(dynamic_model)
#noise
if stage2 is not None: truth_noise_model = mapnoise.ThermalNoise(new_pm, seed=100, aa=aa, att=stage2,spread=spread, hex=hex, limk=2, Ns=Ndish, checkbase=True)
else: truth_noise_model = mapnoise.ThermalNoise(new_pm, seed=None, aa=aa, att=stage2,spread=spread, hex=hex, Ns=Ndish)
wedge_noise_model = mapnoise.WedgeNoiseModel(pm=new_pm, power=1, seed=100, kmin=kmin, angle=angle)
#Create and save data if not found
#################
###
######
###
###if rsdpos:
### if ray: hmesh = BigFileMesh(proj + '/HV%d-R/fastpm_%0.4f/HImeshz-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
### else: hmesh = BigFileMesh(proj + '/HV%d-F/fastpm_%0.4f/HImeshz-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
###else:
### if ray: hmesh = BigFileMesh(proj + '/HV%d-R/fastpm_%0.4f/HImesh-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
### else: hmesh = BigFileMesh(proj + '/HV%d-F/fastpm_%0.4f/HImesh-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
###
###hmesh /= hmesh.cmean()
###hmesh -= 1.
###
###if ray: dnewfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100/'%(nsteps, B, bs, nc*2)
###else: dnewfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100-fixed/'%(nsteps, B, bs, nc*2)
####dnewfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100-fixed/'%(nsteps, B, bs, nc*2)
###s_truth = BigFileMesh(dnewfolder + 'linear', 'LinearDensityK').paint()
###dyn = BigFileCatalog(dnewfolder + 'fastpm_%0.4f/1'%aa)
###dlayout = new_pm.decompose(dyn['Position'])
###d_truth = new_pm.paint(dyn['Position'], layout=dlayout)
###
###
###
try:
data_p = map.Observable.load(optfolder+'/datap_up')
s_truth = data_p.s
d_truth = data_p.d
hmesh = data_p.mapp
lmesh = data_p.mapp2
except Exception as e:
#data_p = map.Observable(hmesh, d_truth, s_truth)
#data_p.save(optfolder+'datap_up/')
print(e)
cat = BigFileCatalog(proj + '/HV%d-R/fastpm_%0.4f/LL-M10p5/'%(bs*10, aa))
cat = cat.sort('Length', reverse=False)
lsstcat = cat.gslice(start = cat.csize - lsstnum - 1, stop = cat.csize-1)
if rank == 0: print("csize : ", lsstcat.csize)
lsstmasswt = lsstcat['Mass'].copy().flatten()
if not lsstmass: lsstmasswt = lsstmasswt*0 + 1.
lsstposition = lsstcat['Position'] + lsstcat['Velocity']*np.array([0, 0, 1])*rsdfaccat
llayout = new_pm.decompose(lsstposition)
lmesh = new_pm.paint(lsstposition, mass=lsstmasswt, layout=llayout)
lmesh /= lmesh.cmean()
lmesh -= 1
#llayout = new_pm.decompose(lsstcat['Position'])
#lmesh = new_pm.paint(lsstcat['Position'] + lsstcat['Velocity']*np.array([0, 0, 1])*rsdfac, mass=lsstmasswt, layout=llayout)
#lmesh /= lmesh.cmean()
#lmesh -= 1
if rsdpos:
if ray: hmesh = BigFileMesh(proj + '/HV%d-R/fastpm_%0.4f/HImeshz-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
else: hmesh = BigFileMesh(proj + '/HV%d-F/fastpm_%0.4f/HImeshz-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
else:
if ray: hmesh = BigFileMesh(proj + '/HV%d-R/fastpm_%0.4f/HImesh-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
else: hmesh = BigFileMesh(proj + '/HV%d-F/fastpm_%0.4f/HImesh-N%04d/'%(bs*10, aa, nc2), 'ModelD').paint()
hmesh /= hmesh.cmean()
hmesh -= 1.
if ray: dnewfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100/'%(nsteps, B, bs, nc*2)
else: dnewfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100-fixed/'%(nsteps, B, bs, nc*2)
s_truth = BigFileMesh(dnewfolder + 'linear', 'LinearDensityK').paint()
s_truth = new_pm.create(mode='real', value=s_truth[...])
dyn = BigFileCatalog(dnewfolder + 'fastpm_%0.4f/1'%aa)
dlayout = new_pm.decompose(dyn['Position'])
d_truth = new_pm.paint(dyn['Position'], layout=dlayout)
hmesh = new_pm.create(mode='real', value=hmesh[...])
data_p = map.Observable(hmesh, d_truth, s_truth, lmesh)
data_p.save(optfolder+'datap_up/')
try:
data_n = map.Observable.load(optfolder+'/datan_up')
except:
data_n = truth_noise_model.add_noise(data_p)
data_n.save(optfolder+'datan_up/')
try: data_w = map.Observable.load(optfolder+'/dataw_up')
except:
data_w = wedge_noise_model.add_noise(data_n)
data_w.save(optfolder+'dataw_up/')
##
#Model
title = None
paramsfile = '/paramsup%s.txt'
try:
params = numpy.loadtxt(optfolder + paramsfile%'')
params_lsst = numpy.loadtxt(optfolder + paramsfile%'_lsst')
#mock_model = map.MockModel(dynamic_model, params=params, rsdpos=rsdpos, rsdfac=rsdfac)
mock_model = map.MockModel(dynamic_model, params=params, params2 = params_lsst, rsdpos=rsdpos, rsdfac=rsdfac)
fit_p = map.Observable.load(optfolder+'/fitp_up')
#
ivarmesh = BigFileMesh(optfolder + 'ivarmesh_up', 'ivar').paint()
kerror, perror = numpy.loadtxt(optfolder + '/error_psnup.txt', unpack=True)
ipkerror = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror[-1]))
kerror, perror = numpy.loadtxt(optfolder + '/error_psup.txt', unpack=True)
ipkmodel = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror[-1]))
kerror_lsst, perror_lsst = numpy.loadtxt(optfolder + '/error_psup_lsst.txt', unpack=True)
ipkerror_lsst = interp1d(kerror_lsst, perror_lsst, bounds_error=False, fill_value=(perror_lsst[0], perror_lsst[-1]))
except Exception as e:
print('Exception occured : ', e)
mock_model_setup = map.MockModel(dynamic_model, rsdpos=rsdpos, rsdfac=rsdfac)
fpos, linear, linearsq, shear = mock_model_setup.get_code().compute(['xp', 'linear', 'linearsq', 'shear'], init={'parameters': s_truth})
grid = new_pm.generate_uniform_particle_grid(shift=0.0, dtype='f8')
#For LSST
params_lsst, bmod = getbias(new_pm, lmesh, [linear, linearsq, shear], fpos, grid, fitb2=True)
if rank ==0: numpy.savetxt(optfolder + paramsfile%'_lsst', params_lsst, header='b1, b2, bsq')
#For HI
params, bmod = getbias(new_pm, hmesh, [linear, linearsq, shear], fpos, grid, fitb2=True)
#params, bmod = getbias(new_pm, hmesh, [linear, linearsq, shear], fpos, grid)
if rank ==0: numpy.savetxt(optfolder + paramsfile%'', params, header='b1, b2, bsq')
#Create model and save
mock_model = map.MockModel(dynamic_model, params=params, params2 = params_lsst, rsdpos=rsdpos, rsdfac=rsdfac)
fit_p = mock_model.make_observable(s_truth)
fit_p.save(optfolder+'fitp_up/')
#Quantify error
kerror, perror = eval_bfit(data_n.mapp, fit_p.mapp, optfolder, noise=noise, title=title, fsize=15, suff='-noiseup')
ipkerror = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror[-1]))
if rank ==0: numpy.savetxt(optfolder + '/error_psnup.txt',
|
numpy.array([kerror, perror])
|
numpy.array
|
import logging
import sys
from functools import partial
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
from tqdm import trange, tqdm
import tensorflow as tf
import utils
import vis
import data_utils
import preprocessing
import backend
from synthetic import SyntheticPSF
from wavefront import Wavefront
from zernike import Zernike
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
tf.get_logger().setLevel(logging.ERROR)
def iterative_eval(
model: tf.keras.Model,
inputs: np.array,
outputs: np.array,
batch_size: int,
psfargs: dict,
iterations: int = 15,
desc: str = '',
tolerance: float = 1e-2,
patience: int = 5,
):
predictions = {}
corrections = {}
gen = SyntheticPSF(**psfargs)
errors = pd.DataFrame.from_dict({
'niter': np.zeros(outputs.shape[0], dtype=int),
'residuals': utils.peak2peak(outputs)
})
means = None
i = 1
converged = False
while not converged:
corrections[i] = np.squeeze(inputs[0], axis=-1)
predictions[i], stdev = backend.bootstrap_predict(
model,
inputs,
batch_size=batch_size,
n_samples=1,
desc=desc
)
y_pred = pd.DataFrame([utils.peak_aberration(k) for k in predictions[i]], columns=['residuals'])
y_true = pd.DataFrame([utils.peak_aberration(k) for k in outputs], columns=['residuals'])
err = np.abs(y_true - y_pred)
err['niter'] = i
errors = pd.concat([errors, err], ignore_index=True)
means = errors.groupby(['niter']).mean().reset_index()
# check if converged
if (i >= 1) and (
i >= iterations or means['residuals'].iloc[-1] + tolerance > means['residuals'].iloc[-2] or np.allclose(
means['residuals'].tail(patience).values, means['residuals'].iloc[-1], rtol=tolerance, atol=tolerance
)):
converged = True
# setup next iter
res = outputs - predictions[i]
g = partial(
gen.single_psf,
zplanes=0,
normed=True,
noise=True,
augmentation=True,
meta=False
)
inputs = np.expand_dims(np.stack(gen.batch(g, res), axis=0), -1)
outputs = res
i += 1
# psf_cmap = 'Spectral_r'
# fig, axes = plt.subplots(len(predictions), 4, figsize=(8, 11))
# for i, vol in corrections.items():
# axes[i - 1, 0].bar(range(len(predictions[i][0])), height=predictions[i][0], color='dimgrey')
# m = axes[i - 1, 1].imshow(np.max(vol, axis=0) if vol.shape[0] > 3 else vol[0], cmap=psf_cmap)
# axes[i - 1, 2].imshow(np.max(vol, axis=1) if vol.shape[0] > 3 else vol[1], cmap=psf_cmap)
# axes[i - 1, 3].imshow(np.max(vol, axis=2) if vol.shape[0] > 3 else vol[2], cmap=psf_cmap)
# cax = inset_axes(axes[i - 1, 3], width="10%", height="100%", loc='center right', borderpad=-2)
# cb = plt.colorbar(m, cax=cax)
# cax.yaxis.set_label_position("right")
# plt.show()
return means
def evaluate_psnrs(
model: Path,
wavelength: float,
x_voxel_size: float,
y_voxel_size: float,
z_voxel_size: float,
n_samples: int,
batch_size: int,
cpu_workers: int,
plot=True,
metric='peak2peak',
):
n_batches = n_samples // batch_size
save_path = model / "eval"
save_path.mkdir(exist_ok=True, parents=True)
model = backend.load(model)
model.summary()
error = {}
peak2peak = []
scales = sorted(set([int(t) for t in np.logspace(1, 3, num=20)]))
logger.info(f"PSNRs: {scales}")
for snr in scales:
psfargs = dict(
lam_detection=wavelength,
amplitude_ranges=(-.2, .2),
psf_shape=model.layers[0].input_shape[0][1:-1],
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
batch_size=batch_size,
snr=snr,
max_jitter=1,
cpu_workers=cpu_workers,
)
z = Zernike(0)
cols = [f"(n={z.ansi_to_nm(j)[0]}, m={z.ansi_to_nm(j)[1]})" for j in range(model.layers[-1].output_shape[-1])]
y_pred = pd.DataFrame([], columns=cols)
y_true = pd.DataFrame([], columns=cols)
target_psnr = None
for i, (psfs, ys, psnrs, zplanes, maxcounts) in zip(range(n_batches),
SyntheticPSF(**psfargs).generator(debug=True)):
psnr_pct = np.ceil(np.nanquantile(psnrs, .75))
if target_psnr is None:
target_psnr = int(psnr_pct)
else:
target_psnr = int(np.mean([psnr_pct, target_psnr]))
preds, stdev = backend.bootstrap_predict(
model,
psfs,
batch_size=batch_size,
desc=f"Predictions for PSNR({int(target_psnr)})"
)
if plot:
dir = save_path / f"psnr_{target_psnr}"
dir.mkdir(exist_ok=True, parents=True)
paths = [f"{dir}/{(i * batch_size) + n}" for n in range(batch_size)]
utils.multiprocess(
partial(utils.eval, psfargs=psfargs),
list(zip(psfs, ys, preds, paths, psnrs, zplanes, maxcounts)),
desc=f"Plotting PSNR({int(target_psnr)})"
)
y_pred = y_pred.append(pd.DataFrame(utils.peak2peak(preds), columns=['sample']), ignore_index=True)
y_true = y_true.append(pd.DataFrame(utils.peak2peak(ys), columns=['sample']), ignore_index=True)
peak2peak.extend(list(y_true['sample'].values))
df = np.abs(y_true - y_pred)
df = pd.DataFrame(df, columns=['sample'])
error[target_psnr] = df['sample']
df.to_csv(f"{save_path}/psnr_{target_psnr}.csv")
error = pd.DataFrame.from_dict(error)
error = error.reindex(sorted(error.columns), axis=1)
logger.info(error)
vis.plot_residuals(
error,
wavelength=wavelength,
nsamples=n_samples,
save_path=f"{save_path}/psnr_{metric}",
label=r'Peak signal-to-noise ratio'
)
plt.figure(figsize=(6, 6))
plt.hist(peak2peak, bins=100)
plt.grid()
plt.xlabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
plt.ylabel(rf'Number of samples')
plt.savefig(f'{save_path}/dist_peak2peak.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def evaluate(
model: Path,
target: str,
wavelength: float,
x_voxel_size: float,
y_voxel_size: float,
z_voxel_size: float,
batch_size: int,
cpu_workers: int,
n_samples: int = 100,
dist: str = 'mixed',
metric='peak2peak',
plot=False,
dominant_modes=None
):
means, stds = {}, {}
if plot:
save_path = model / "eval"
save_path.mkdir(exist_ok=True, parents=True)
for p in sorted(save_path.glob(f'{target}*.csv')):
df = pd.read_csv(p, header=0, index_col=0)
bits = p.stem.split('_')
try:
var, snr = round(float(bits[2]), 3), int(bits[4])
except ValueError:
var, snr = round(float(bits[3]), 3), int(bits[5])
logger.info(f"PSNR: {snr}, {target}: {var}")
if means.get(snr) is None:
means[snr] = {var: df[metric].mean()}
stds[snr] = {var: df[metric].std()}
else:
means[snr].update({var: df[metric].mean()})
stds[snr].update({var: df[metric].std()})
else:
n_batches = n_samples // batch_size
save_path = model / "eval"
save_path.mkdir(exist_ok=True, parents=True)
model = backend.load(model)
model.summary()
psnrs = sorted(set([int(t) for t in np.linspace(1, 100, num=10).round(0)]))
pconfigs = dict(
amplitude_ranges=[
dict(
snr=p,
amplitude_ranges=a,
distribution=dist,
lam_detection=wavelength,
batch_size=batch_size,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
max_jitter=0,
cpu_workers=cpu_workers,
n_modes=model.output_shape[-1],
psf_shape=tuple(3 * [model.input_shape[-2]]),
)
for p in psnrs for a in np.linspace(0.01, .3, num=7).round(3)
],
max_jitter=[
dict(
snr=p,
max_jitter=j,
distribution=dist,
lam_detection=wavelength,
batch_size=batch_size,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
amplitude_ranges=(-.3, .3),
cpu_workers=cpu_workers,
n_modes=model.output_shape[-1],
psf_shape=tuple(3 * [model.input_shape[-2]]),
)
for p in psnrs for j in np.linspace(0, 2, num=7).round(2)
],
z_voxel_size=[
dict(
snr=p,
distribution=dist,
lam_detection=wavelength,
batch_size=batch_size,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=s,
amplitude_ranges=(-.3, .3),
max_jitter=1,
cpu_workers=cpu_workers,
n_modes=model.output_shape[-1],
psf_shape=tuple(3 * [model.input_shape[-2]]),
)
for p in psnrs for s in np.linspace(.1, 1, num=7).round(2)
],
)
for psfargs in pconfigs[target]:
logger.info(psfargs)
y_pred = pd.DataFrame([], columns=['sample'])
y_true = pd.DataFrame([], columns=['sample'])
for i, (inputs, ys, snrs, zplanes, maxcounts) in zip(
range(n_batches), SyntheticPSF(**psfargs).generator(debug=True, otf=False)
):
if dominant_modes is not None:
# normalized contribution
df = pd.DataFrame(np.abs(ys))
df = df.div(df.sum(axis=1), axis=0)
# dominant modes
dmodes = (df[df > .05]).count(axis=1)
dmodes = dmodes >= dominant_modes
toi = df[dmodes].index
inputs = inputs[toi]
ys = ys[toi]
if model.name.lower() != 'phasenet':
inputs = np.stack(utils.multiprocess(
func=partial(preprocessing.embedding, ishape=model.input_shape[-2]),
jobs=inputs,
desc='Preprocessing',
cores=cpu_workers
),
axis=0
)
# vol = np.squeeze(inputs[0], axis=-1)
# fig, axes = plt.subplots(1, 3, figsize=(6, 6))
# for i in range(3):
# m = axes[i].imshow(vol[i], cmap='Spectral_r', vmin=0, vmax=1)
# plt.show()
preds, stdev = backend.bootstrap_predict(
model,
inputs,
batch_size=batch_size,
n_samples=1,
desc=f"Predictions for ({(psfargs[target], psfargs['snr'])})"
)
y_pred = y_pred.append(pd.DataFrame(utils.peak2peak(preds), columns=['sample']), ignore_index=True)
y_true = y_true.append(pd.DataFrame(utils.peak2peak(ys), columns=['sample']), ignore_index=True)
error = np.abs(y_true - y_pred)
error = pd.DataFrame(error, columns=['sample'])
error.to_csv(f"{save_path}/{target}_{psfargs[target]}_snr_{psfargs['snr']}.csv")
if target == 'amplitude_ranges':
bins = np.arange(0, y_true['sample'].max() + .25, .25)
df = pd.DataFrame(zip(y_true['sample'], error['sample']), columns=['aberration', 'error'])
df['bins'] = pd.cut(df['aberration'], bins, labels=bins[1:], include_lowest=True)
if means.get(psfargs['snr']) is None:
means[psfargs['snr']] = df
else:
means[psfargs['snr']] = means[psfargs['snr']].append(df, ignore_index=True)
else:
if means.get(psfargs['snr']) is None:
means[psfargs['snr']] = {psfargs[target]: error['sample'].mean()}
else:
means[psfargs['snr']].update({psfargs[target]: error['sample'].mean()})
if target == 'amplitude_ranges':
for k, df in means.items():
means[k] = df.groupby('bins').mean()
means[k] = means[k]['error'].to_dict()
means = pd.DataFrame.from_dict(means)
means = means.reindex(sorted(means.columns), axis=1)
means = means.sort_index().interpolate()
logger.info(means)
vis.plot_eval(
means,
wavelength=wavelength,
nsamples=n_samples,
save_path=f"{save_path}/{target}_{dist}_{metric}_dmodes{dominant_modes}",
label=target
)
def compare_models(
modelsdir: Path,
wavelength: float,
x_voxel_size: float,
y_voxel_size: float,
z_voxel_size: float,
psf_shape: tuple,
n_samples: int,
batch_size: int,
cpu_workers: int,
metric='peak2peak',
iterations: int = 10,
):
n_batches = n_samples // batch_size
models = [p for p in modelsdir.iterdir() if p.is_dir()]
errors = [{m.stem: {} for m in models} for _ in range(iterations)]
peak2peak = []
scales = sorted(set([int(t) for t in np.logspace(1, 2, num=3)]))
logger.info(f"Models: {models}")
logger.info(f"PSNRs: {scales}")
modes = backend.load(models[0]).layers[-1].output_shape[-1]
eval_distribution = 'mixed'
for snr in scales:
psfargs = dict(
n_modes=modes,
distribution=eval_distribution,
lam_detection=wavelength,
amplitude_ranges=(-.3, .3),
psf_shape=psf_shape,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
batch_size=batch_size,
snr=snr,
max_jitter=1,
cpu_workers=cpu_workers,
)
target_psnr = None
for _, (psfs, ys, psnrs, zplanes, maxcounts) in zip(range(n_batches),
SyntheticPSF(**psfargs).generator(debug=True)):
psnr_pct = np.ceil(np.nanquantile(psnrs, .75))
if target_psnr is None:
target_psnr = int(psnr_pct)
else:
target_psnr = int(np.mean([psnr_pct, target_psnr]))
for m in models:
model = backend.load(m)
preds = iterative_eval(
model,
inputs=psfs,
outputs=ys,
batch_size=batch_size,
psfargs=psfargs,
desc=f"{m.stem}, PSNR({int(target_psnr)})",
iterations=iterations,
)
for i in range(iterations):
ps = preds[0] if i == 0 else np.sum([preds[k] for k in range(i + 1)], axis=0)
y_pred = pd.DataFrame(utils.peak2peak(ps), columns=['sample'])
y_true = pd.DataFrame(utils.peak2peak(ys), columns=['sample'])
# drop aberrations below diffraction limit
idx = y_true.index[y_true['sample'] >= .5]
y_true = y_true.loc[idx]
y_pred = y_pred.loc[idx]
peak2peak.extend(list(y_true['sample'].values))
if errors[i][m.stem].get(target_psnr) is not None:
errors[i][m.stem][target_psnr] = np.mean([
errors[i][m.stem][target_psnr], np.nanmean(np.abs(y_true - y_pred))
])
else:
errors[i][m.stem][target_psnr] = np.nanmean(np.abs(y_true - y_pred))
for i in range(iterations):
error = pd.DataFrame(errors[i])
error = error.reindex(sorted(error.columns), axis=1)
logger.info(error)
vis.plot_models(
error,
wavelength=wavelength,
nsamples=n_samples,
save_path=f"{modelsdir}/{eval_distribution}_psnr_{metric}_iter{i + 1}",
label=r'Peak signal-to-noise ratio'
)
plt.figure(figsize=(6, 6))
plt.hist(peak2peak, bins=100)
plt.grid()
plt.xlabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
plt.ylabel(rf'Number of samples')
plt.savefig(f'{modelsdir}/{eval_distribution}_peak2peak.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def compare_models_and_modes(
modelsdir: Path,
wavelength: float,
x_voxel_size: float,
y_voxel_size: float,
z_voxel_size: float,
psf_shape: tuple,
n_samples: int,
batch_size: int,
cpu_workers: int,
iterations: int = 5,
psnr: int = 30
):
n_batches = n_samples // batch_size
models = [p for p in modelsdir.iterdir() if p.is_dir()]
errors = [{m.stem: {} for m in models} for _ in range(iterations)]
logger.info(f"Models: {models}")
modes = backend.load(models[0]).layers[-1].output_shape[-1]
eval_distribution = 'mixed'
psfargs = dict(
n_modes=modes,
distribution=eval_distribution,
lam_detection=wavelength,
amplitude_ranges=.3,
psf_shape=psf_shape,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
batch_size=batch_size,
snr=psnr,
max_jitter=1,
cpu_workers=cpu_workers,
)
for _, (psfs, ys, psnrs, zplanes, maxcounts) in zip(
range(n_batches), SyntheticPSF(**psfargs).generator(debug=True)
):
for m in models:
model = backend.load(m)
preds = iterative_eval(
model,
inputs=psfs,
outputs=ys,
batch_size=batch_size,
psfargs=psfargs,
desc=f"{m.stem}, PSNR({int(psnr)})",
iterations=iterations,
)
for i in range(iterations):
ps = preds[0] if i == 0 else np.sum([preds[k] for k in range(i + 1)], axis=0)
y_pred = pd.DataFrame(utils.microns2waves(ps, wavelength),
columns=[f'Z{i}' for i in range(1, modes + 1)])
y_true = pd.DataFrame(utils.microns2waves(ys, wavelength),
columns=[f'Z{i}' for i in range(1, modes + 1)])
residuals = np.abs(y_true - y_pred)
residuals['model'] = m.stem
errors[i][m.stem] = residuals
if errors[i].get(m.stem) is not None:
errors[i][m.stem] = pd.concat([errors[i][m.stem], residuals], ignore_index=True)
else:
errors[i][m.stem] = residuals
for i in range(iterations):
res = pd.concat(errors[i], ignore_index=True)
logger.info(res)
vis.plot_residuals_per_mode(
res,
wavelength=wavelength,
nsamples=n_samples,
save_path=f"{modelsdir}/{eval_distribution}_modes_iter{i + 1}",
)
def synthatic_convergence(
modelsdir: Path,
wavelength: float,
x_voxel_size: float,
y_voxel_size: float,
z_voxel_size: float,
psf_shape: tuple,
n_samples: int,
batch_size: int,
cpu_workers: int,
psnr: int = 30,
tolerance: float = 1e-2,
patience: int = 5,
max_iters: int = 15,
eval_distribution: str = 'zipf',
):
n_batches = n_samples // batch_size
models = [p for p in modelsdir.iterdir() if p.is_dir()]
logger.info(f"Models: {models}")
modes = backend.load(models[0]).layers[-1].output_shape[-1]
errors = pd.DataFrame([], columns=['niter', 'model', 'residuals'])
avgs = None
psfargs = dict(
n_modes=modes,
distribution=eval_distribution,
lam_detection=wavelength,
amplitude_ranges=(0, .3),
psf_shape=psf_shape,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
batch_size=batch_size,
snr=psnr,
max_jitter=1,
cpu_workers=cpu_workers,
)
gen = SyntheticPSF(**psfargs)
for _, (psfs, ys, psnrs, zplanes, maxcounts) in zip(range(n_batches), gen.generator(debug=True)):
for m in models:
model = backend.load(m)
niter = 0
inputs = psfs
outputs = ys
predictions = {}
converged = False
while not converged:
if model.name.lower() != 'phasenet':
inputs = np.stack(utils.multiprocess(
func=partial(preprocessing.embedding, ishape=model.input_shape[-2]),
jobs=inputs,
desc='Preprocessing',
cores=cpu_workers
),
axis=0
)
predictions[niter], stdev = backend.bootstrap_predict(
model,
inputs,
batch_size=batch_size,
desc=f"iter {niter} - {m.stem}, PSNR({int(psnr)})"
)
# compute error
ps = predictions[0] if niter == 0 else np.sum([predictions[k] for k in range(niter + 1)], axis=0)
y_pred = pd.DataFrame(utils.peak2peak(ps), columns=['residuals'])
y_true = pd.DataFrame(utils.peak2peak(ys), columns=['residuals'])
# drop aberrations below diffraction limit
idx = y_true.index[y_true['residuals'] >= .25]
y_true = y_true.loc[idx]
y_pred = y_pred.loc[idx]
err = np.abs(y_true - y_pred)
err['model'] = m.stem
err['niter'] = niter
current_mean = err['residuals'].mean()
errors = pd.concat([errors, err], ignore_index=True)
avgs = errors.groupby(['niter', 'model']).mean().reset_index()
# check if converged
if niter >= max_iters \
or current_mean <= .05 \
or avgs[avgs['model'] == m.stem].shape[0] > 1 \
and np.allclose(avgs[avgs['model'] == m.stem]['residuals'].tail(patience).values,
current_mean, rtol=tolerance, atol=tolerance):
converged = True
# setup next iter
res = outputs - predictions[niter]
g = partial(
gen.single_psf,
zplanes=0,
normed=True,
noise=True,
)
inputs = np.expand_dims(np.stack(gen.batch(g, res), axis=0), -1)
outputs = res
niter += 1
logger.info(avgs)
errors.to_csv(f"{modelsdir}/{eval_distribution}_iters_residuals.csv", index=False)
vis.plot_convergence(
avgs,
wavelength=wavelength,
nsamples=n_samples,
save_path=f"{modelsdir}/{eval_distribution}_iters",
)
def convergence(
modelsdir: Path,
datadir: Path,
wavelength: float,
n_samples: int,
batch_size: int,
cpu_workers: int,
psf_shape: tuple,
psnr: int = 50,
amplitude: int = 2,
max_iters: int = 10,
x_voxel_size: float = .15,
y_voxel_size: float = .15,
z_voxel_size: float = .6,
):
classes = sorted([
c for c in Path(datadir).rglob('*/')
if c.is_dir()
and len(list(c.glob('*.tif'))) > 0
and f'psnr{psnr - 9}-{psnr}' in str(c)
and f'p{amplitude - 1}9-p{amplitude}' in str(c)
])
models = [p for p in modelsdir.iterdir() if p.is_dir()]
logger.info(f"Models: {models}")
modes = backend.load(models[0]).layers[-1].output_shape[-1]
errors = pd.DataFrame([], columns=['niter', 'model', 'residuals'])
psfargs = dict(
n_modes=modes,
lam_detection=wavelength,
amplitude_ranges=(0, .3),
psf_shape=psf_shape,
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
batch_size=batch_size,
snr=(psnr - 9, psnr),
max_jitter=0,
cpu_workers=cpu_workers,
)
for m in tqdm(models, desc='Evaluating', unit='model'):
model = backend.load(m)
for c in tqdm(classes, unit='class'):
val = data_utils.load_dataset(c, samplelimit=n_samples)
val = val.map(lambda x: tf.py_function(data_utils.get_sample, [x], [tf.float32, tf.float32]))
for inputs, ys in val.batch(n_samples):
predictions = iterative_eval(
model,
inputs=inputs,
outputs=ys.numpy(),
batch_size=n_samples,
iterations=max_iters,
psfargs=psfargs,
desc=f"Predictions for [{m.stem}] ({c})"
)
y_pred = pd.DataFrame(predictions, columns=['niter', 'residuals'])
y_pred['model'] = m.stem
y_pred['class'] = c
errors = pd.concat([errors, y_pred], ignore_index=True)
logger.info(errors)
avgs = errors.groupby(['niter', 'model']).mean().reset_index()
logger.info(avgs)
errors.to_csv(f"{modelsdir}/iters_residuals_psnr{psnr - 9}-{psnr}_p{amplitude - 1}9-p{amplitude}.csv", index=False)
vis.plot_convergence(
avgs,
wavelength=wavelength,
nsamples=n_samples * len(classes),
psnr=f'{psnr - 9}-{psnr}',
save_path=f"{modelsdir}/iters_psnr{psnr - 9}-{psnr}_p{amplitude - 1}9-p{amplitude}",
)
def eval_mode(phi, model, psfargs):
gen = SyntheticPSF(**psfargs)
model = backend.load(model)
input_shape = model.layers[0].output_shape[0][1:-1]
w = Wavefront(phi, order='ansi')
abr = 0 if np.count_nonzero(phi) == 0 else round(utils.peak_aberration(phi))
if input_shape[0] == 3:
inputs = gen.single_otf(
w, zplanes=0, normed=True, noise=True, midslice=True, na_mask=True, ratio=True, augmentation=True
)
else:
inputs = gen.single_psf(w, zplanes=0, normed=True, noise=True, augmentation=True)
# fig, axes = plt.subplots(1, 3)
# img = inputs
# m = axes[0].imshow(np.max(img, axis=0), cmap='Spectral_r', vmin=0, vmax=1)
# axes[1].imshow(np.max(img, axis=1), cmap='Spectral_r', vmin=0, vmax=1)
# axes[2].imshow(np.max(img, axis=2), cmap='Spectral_r', vmin=0, vmax=1)
# cax = inset_axes(axes[2], width="10%", height="100%", loc='center right', borderpad=-3)
# cb = plt.colorbar(m, cax=cax)
# cax.yaxis.set_label_position("right")
# plt.show()
inputs = np.expand_dims(np.stack(inputs, axis=0), 0)
inputs = np.expand_dims(np.stack(inputs, axis=0), -1)
pred, stdev = backend.bootstrap_predict(
model,
inputs,
batch_size=1,
n_samples=1,
desc=f"P2P({abr}), PSNR({int(psfargs['snr'])})"
)
phi = utils.peak_aberration(phi)
pred = utils.peak_aberration(pred)
residuals = np.abs(phi - pred)
return residuals
def evaluate_modes(
model,
wavelength=.605,
n_modes=60,
psf_shape=64,
psnr=30,
x_voxel_size=.15,
y_voxel_size=.15,
z_voxel_size=.6,
):
gen = dict(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=tuple(3 * [psf_shape]),
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
snr=psnr,
max_jitter=0,
cpu_workers=-1,
)
residuals = {}
waves = np.arange(0, .5, step=.05)
for i in range(5, n_modes):
residuals[i] = {}
jobs = np.zeros((len(waves), n_modes))
jobs[:, i] = waves
res = utils.multiprocess(
partial(eval_mode, model=model, psfargs=gen),
jobs=list(jobs),
cores=1
)
residuals[i] = {round(utils.peak_aberration(jobs[k, :])): res[k] for k in range(len(waves))}
df = pd.DataFrame.from_dict(residuals[i], orient="index")
logger.info(df)
vis.plot_mode(
f'{model}/res_mode_{i}.png',
df,
mode_index=i,
n_modes=n_modes,
wavelength=wavelength
)
def eval_bin(datapath, modelpath):
model = backend.load(modelpath)
val = data_utils.load_dataset(datapath)
val = val.map(lambda x: tf.py_function(data_utils.get_sample, [x], [tf.float32, tf.float32]))
y_pred = pd.DataFrame([], columns=['sample'])
y_true = pd.DataFrame([], columns=['sample'])
for inputs, ys in val.batch(100):
preds, stdev = backend.bootstrap_predict(
model,
inputs,
batch_size=100,
n_samples=1,
desc=f"Predictions for ({datapath})"
)
p = pd.DataFrame([utils.peak_aberration(i) for i in preds], columns=['sample'])
y_pred = y_pred.append(p, ignore_index=True)
y = pd.DataFrame([utils.peak_aberration(i) for i in ys.numpy()], columns=['sample'])
y['snr'] = int(np.mean(list(map(int, datapath.parent.stem.lstrip('psnr_').split('-')))))
y_true = y_true.append(y, ignore_index=True)
return (y_pred, y_true)
def evalheatmap(modelpath: Path, datadir: Path, distribution: str = '/'):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'xtick.major.pad': 10
})
classes = sorted([
c for c in Path(datadir).rglob('*/')
if c.is_dir() and len(list(c.glob('*.tif'))) > 0 and distribution in str(c)
])
job = partial(eval_bin, modelpath=modelpath)
preds, ys = zip(*utils.multiprocess(job, classes))
y_true = pd.DataFrame([], columns=['sample']).append(ys, ignore_index=True)
y_pred = pd.DataFrame([], columns=['sample']).append(preds, ignore_index=True)
error =
|
np.abs(y_true - y_pred)
|
numpy.abs
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import subprocess
from pathlib import Path
import csv
import numpy as np
import itk
from itk import TubeTK as tube
def scv_is_bundled():
return getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS')
def scv_get_perfusion_toolbox_path():
if scv_is_bundled():
return os.path.join(sys._MEIPASS, 'StroCoVess', 'perfusion_toolbox')
return os.path.join(os.path.dirname(os.path.realpath(__file__)),
'perfusion_toolbox')
#################
#################
#################
#################
#################
def scv_convert_ctp_to_cta(filenames,
report_progress=print,
debug=False,
output_dirname="."):
filenames.sort()
num_images = len(filenames)
base_im = itk.imread(filenames[num_images//2],itk.F)
base_spacing = base_im.GetSpacing()
progress_percent = 10
report_progress("Reading images",progress_percent)
Dimension = 3
PixelType = itk.ctype('float')
ImageType = itk.Image[PixelType,Dimension]
imdatamax = itk.GetArrayFromImage(base_im)
imdatamin = imdatamax
if output_dirname!=None and not os.path.exists(output_dirname):
os.mkdir(output_dirname)
progress_percent = 20
progress_per_file = 70/num_images
for imNum in range(num_images):
imMoving = itk.imread(filenames[imNum],itk.F)
if imMoving.shape != base_im.shape:
resample = tube.ResampleImage.New(Input=imMoving)
resample.SetMatchImage(base_im)
resample.Update()
imMovingIso = resample.GetOutput()
progress_label = "Resampling "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
else:
imMovingIso = imMoving
imdataTmp = itk.GetArrayFromImage(imMovingIso)
imdatamax = np.maximum(imdatamax,imdataTmp)
imdataTmp = np.where(imdataTmp==-1024,imdatamin,imdataTmp)
imdatamin = np.minimum(imdatamin,imdataTmp)
progress_percent += progress_per_file
progress_label = "Integrating "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
report_progress("Generating CT, CTA, and CTP",90)
ct = itk.GetImageFromArray(imdatamin)
ct.CopyInformation(base_im)
cta = itk.GetImageFromArray(imdatamax)
cta.CopyInformation(base_im)
diff = imdatamax-imdatamin
diff[:4,:,:] = 0
diff[-4:,:,:] = 0
diff[:,:4,:] = 0
diff[:,-4:,:] = 0
diff[:,:,:4] = 0
diff[:,:,-4:] = 0
dsa = itk.GetImageFromArray(diff)
dsa.CopyInformation(base_im)
report_progress("Done",100)
return ct,cta,dsa
#################
#################
#################
#################
#################
def scv_segment_brain_from_ct(ct_image,
report_progress=print,
debug=False):
ImageType = itk.Image[itk.F,3]
LabelMapType = itk.Image[itk.UC,3]
report_progress("Threshold",5)
thresh = tube.ImageMath.New(Input=ct_image)
thresh.IntensityWindow(-50,6000,0,6000)
imgt = thresh.GetOutput()
thresh.ReplaceValuesOutsideMaskRange(imgt,1,6000,0)
thresh.ReplaceValuesOutsideMaskRange(imgt,0,700,1)
tmpimg = thresh.GetOutput()
thresh.ReplaceValuesOutsideMaskRange(tmpimg,0,1,2)
ct_tmp = thresh.GetOutput()
report_progress("Initial Mask",10)
maskMath = tube.ImageMath.New(Input=ct_tmp)
# remove skin
maskMath.Dilate(15,0,1)
# shrink brain
maskMath.Erode(6,2,0)
# restore skull
maskMath.ReplaceValueWithinMaskRange(ct_tmp,1,1,0,1)
# shrink skull
maskMath.Dilate(3,1,0)
maskMath.Erode(1,1,0)
comboSeed = maskMath.GetOutputUChar()
report_progress("Connected Component",20)
segmenter = tube.SegmentConnectedComponentsUsingParzenPDFs[ImageType,
LabelMapType].New()
segmenter.SetFeatureImage( ct_image )
segmenter.SetInputLabelMap( comboSeed )
segmenter.SetObjectId( 2 )
segmenter.AddObjectId( 1 )
segmenter.SetVoidId( 0 )
segmenter.SetErodeDilateRadius( 20 )
segmenter.SetHoleFillIterations( 40 )
segmenter.Update()
segmenter.ClassifyImages()
brainMaskRaw = segmenter.GetOutputLabelMap()
report_progress("Masking",60)
maskMath = itk.CastImageFilter[LabelMapType, ImageType].New()
maskMath.SetInput(brainMaskRaw)
maskMath.Update()
brainMaskF = maskMath.GetOutput()
maskMath = tube.ImageMath.New(Input = brainMaskF)
maskMath.Threshold(2,2,1,0)
maskMath.Dilate(2,1,0)
maskMath.Erode(3,1,0)
brainMaskRaw2 = maskMath.GetOutputUChar()
connComp = tube.SegmentConnectedComponents.New(Input=brainMaskRaw2)
connComp.SetKeepOnlyLargestComponent(True)
connComp.Update()
brainMask = connComp.GetOutput()
report_progress("Finishing",90)
cast = itk.CastImageFilter[LabelMapType,ImageType].New()
cast.SetInput(brainMask)
cast.Update()
brainMaskF = cast.GetOutput()
brainMath = tube.ImageMath[ImageType].New(Input=ct_image)
brainMath.ReplaceValuesOutsideMaskRange( brainMaskF,1,1,-1024)
ct_brain_image = brainMath.GetOutput()
report_progress("Done",100)
return ct_brain_image, brainMask
#################
#################
#################
#################
#################
def scv_enhance_vessels_in_cta(cta_image,
cta_roi_image,
report_progress=print,
debug=False ):
ImageType = itk.Image[itk.F,3]
LabelMapType = itk.Image[itk.UC,3]
report_progress("Masking",5)
imMath = tube.ImageMath.New(Input=cta_roi_image)
imMath.Threshold( 0.00001,4000,1,0)
imMath.Erode(10,1,0)
imBrainMaskErode = imMath.GetOutput()
imMath.SetInput(cta_roi_image)
imMath.IntensityWindow(0,300,0,300)
imMath.ReplaceValuesOutsideMaskRange(imBrainMaskErode,0.5,1.5,0)
imBrainErode = imMath.GetOutput()
spacing = cta_image.GetSpacing()[0]
report_progress("Blurring",10)
imMath = tube.ImageMath[ImageType].New()
imMath.SetInput(imBrainErode)
imMath.Blur(1.5*spacing)
imBlur = imMath.GetOutput()
imBlurArray = itk.GetArrayViewFromImage(imBlur)
report_progress("Generating Seeds",20)
numSeeds = 15
seedCoverage = 20
seedCoord = np.zeros([numSeeds,3])
for i in range(numSeeds):
seedCoord[i] = np.unravel_index(np.argmax(imBlurArray,
axis=None),imBlurArray.shape)
indx = [int(seedCoord[i][0]),int(seedCoord[i][1]),
int(seedCoord[i][2])]
minX = max(indx[0]-seedCoverage,0)
maxX = max(indx[0]+seedCoverage,imBlurArray.shape[0])
minY = max(indx[1]-seedCoverage,0)
maxY = max(indx[1]+seedCoverage,imBlurArray.shape[1])
minZ = max(indx[2]-seedCoverage,0)
maxZ = max(indx[2]+seedCoverage,imBlurArray.shape[2])
imBlurArray[minX:maxX,minY:maxY,minZ:maxZ]=0
indx.reverse()
seedCoord[:][i] = cta_roi_image.TransformIndexToPhysicalPoint(indx)
report_progress("Segmenting Initial Vessels",30)
vSeg = tube.SegmentTubes.New(Input=cta_roi_image)
vSeg.SetVerbose(debug)
vSeg.SetMinRoundness(0.4)
vSeg.SetMinCurvature(0.002)
vSeg.SetRadiusInObjectSpace( 1 )
for i in range(numSeeds):
progress_label = "Vessel "+str(i)+" of "+str(numSeeds)
progress_percent = i/numSeeds*20+30
report_progress(progress_label,progress_percent)
vSeg.ExtractTubeInObjectSpace( seedCoord[i],i )
tubeMaskImage = vSeg.GetTubeMaskImage()
imMath.SetInput(tubeMaskImage)
imMath.AddImages(cta_roi_image,200,1)
blendIm = imMath.GetOutput()
report_progress("Computing Training Mask",50)
trMask = tube.ComputeTrainingMask[ImageType,LabelMapType].New()
trMask.SetInput( tubeMaskImage )
trMask.SetGap( 4 )
trMask.SetObjectWidth( 1 )
trMask.SetNotObjectWidth( 1 )
trMask.Update()
fgMask = trMask.GetOutput()
report_progress("Enhancing Image",70)
enhancer = tube.EnhanceTubesUsingDiscriminantAnalysis[ImageType,
LabelMapType].New()
enhancer.AddInput( cta_image )
enhancer.SetLabelMap( fgMask )
enhancer.SetRidgeId( 255 )
enhancer.SetBackgroundId( 128 )
enhancer.SetUnknownId( 0 )
enhancer.SetTrainClassifier(True)
enhancer.SetUseIntensityOnly(True)
enhancer.SetScales([0.75*spacing,2*spacing,6*spacing])
enhancer.Update()
enhancer.ClassifyImages()
report_progress("Finalizing",90)
imMath = tube.ImageMath[ImageType].New()
imMath.SetInput(enhancer.GetClassProbabilityImage(0))
imMath.Blur(0.5*spacing)
prob0 = imMath.GetOutput()
imMath.SetInput(enhancer.GetClassProbabilityImage(1))
imMath.Blur(0.5*spacing)
prob1 = imMath.GetOutput()
cta_vess = itk.SubtractImageFilter(Input1=prob0, Input2=prob1)
imMath.SetInput(cta_roi_image)
imMath.Threshold(0.0000001,2000,1,0)
imMath.Erode(2,1,0)
imBrainE = imMath.GetOutput()
imMath.SetInput(cta_vess)
imMath.ReplaceValuesOutsideMaskRange(imBrainE,1,1,-0.001)
cta_roi_vess = imMath.GetOutput()
report_progress("Done",100)
return cta_vess,cta_roi_vess
#################
#################
#################
#################
#################
def scv_extract_vessels_from_cta(cta_image,
cta_roi_vessels_image,
report_progress=print,
debug=False,
output_dirname="."):
if output_dirname!=None and not os.path.exists(output_dirname):
os.mkdir(output_dirname)
spacing = cta_image.GetSpacing()[0]
report_progress("Thresholding",5)
imMath = tube.ImageMath.New(cta_roi_vessels_image)
imMath.MedianFilter(1)
imMath.Threshold(0.00000001,9999,1,0)
vess_mask_im = imMath.GetOutputShort()
if debug and output_dirname!=None:
itk.imwrite(vess_mask_im,
output_dirname+"/extract_vessels_mask.mha",
compression=True)
report_progress("Connecting",10)
ccSeg = tube.SegmentConnectedComponents.New(vess_mask_im)
ccSeg.SetMinimumVolume(50)
ccSeg.Update()
vess_mask_cc_im = ccSeg.GetOutput()
if debug and output_dirname!=None:
itk.imwrite(vess_mask_cc_im,
output_dirname+"/extract_vessels_mask_cc.mha",
compression=True)
imMathSS = tube.ImageMath.New(vess_mask_cc_im)
imMathSS.Threshold(0,0,1,0)
vess_mask_inv_im = imMathSS.GetOutputFloat()
report_progress("Filling in",20)
distFilter = itk.DanielssonDistanceMapImageFilter.New(vess_mask_inv_im)
distFilter.Update()
dist_map_im = distFilter.GetOutput()
report_progress("Generating seeds",30)
imMath.SetInput(dist_map_im)
imMath.Blur(0.5*spacing)
tmp = imMath.GetOutput()
# Distance map's distances are in index units, not spacing
imMath.ReplaceValuesOutsideMaskRange(tmp,0.333,10,0)
initial_radius_im = imMath.GetOutput()
if debug and output_dirname!=None:
itk.imwrite(initial_radius_im,
output_dirname+"/vessel_extraction_initial_radius.mha",
compression=True)
report_progress("Generating input",30)
imMath.SetInput(cta_image)
imMath.ReplaceValuesOutsideMaskRange(cta_roi_vessels_image,0,1000,0)
imMath.Blur(0.4*spacing)
imMath.NormalizeMeanStdDev()
imMath.IntensityWindow(-4,4,0,1000)
input_im = imMath.GetOutput()
if debug and output_dirname!=None:
itk.imwrite(input_im,
output_dirname+"/vessel_extraction_input.mha",
compression=True)
report_progress("Extracting vessels",40)
vSeg = tube.SegmentTubes.New(Input=input_im)
vSeg.SetVerbose(debug)
vSeg.SetMinCurvature(0.0001) #0
vSeg.SetMinRoundness(0.02)
vSeg.SetMinRidgeness(0.5)
vSeg.SetMinLevelness(0.0)
vSeg.SetRadiusInObjectSpace( 0.8*spacing )
vSeg.SetBorderInIndexSpace(3)
vSeg.SetSeedMask( initial_radius_im )
#vSeg.SetSeedRadiusMask( initial_radius_im )
vSeg.SetOptimizeRadius(True)
vSeg.SetUseSeedMaskAsProbabilities(True)
vSeg.SetSeedExtractionMinimumProbability(0.95) #0.99
vSeg.ProcessSeeds()
report_progress("Finalizing",90)
tubeMaskImage = vSeg.GetTubeMaskImage()
if debug and output_dirname!=None:
itk.imwrite(tubeMaskImage,
output_dirname+"/vessel_extraction_output.mha",
compression=True)
report_progress("Done",100)
return tubeMaskImage,vSeg.GetTubeGroup()
#################
#################
#################
#################
#################
def scv_register_ctp_images(fixed_image_filename,
moving_image_filenames,
output_dirname,
report_progress=print,
debug=False):
ImageType = itk.Image[itk.F,3]
num_images = len(moving_image_filenames)
progress_percent = 10
progress_per_file = 70/num_images
fixed_im = itk.imread(fixed_image_filename,itk.F)
fixed_im_spacing = fixed_im.GetSpacing()
if fixed_im_spacing[0] != fixed_im_spacing[1] or \
fixed_im_spacing[1] != fixed_im_spacing[2]:
report_progress("Resampling",progress_percent)
resample = tube.ResampleImage.New(Input=fixed_im)
resample.SetMakeIsotropic(True)
resample.Update()
fixed_im = resample.GetOutput()
if debug:
progress_label = "DEBUG: Resampling to "+str(
fixed_im.GetSpacing())
report_progress(progress_label,progress_percent)
imMath = tube.ImageMath.New(fixed_im)
imMath.Threshold(150,800,1,0)
imMath.Dilate(10,1,0)
mask_im = imMath.GetOutputUChar()
mask_array = itk.GetArrayViewFromImage(mask_im)
mask_array[:4,:,:] = 0
mask_array[-4:,:,:] = 0
mask_obj = itk.ImageMaskSpatialObject[3].New()
mask_obj.SetImage(mask_im)
mask_obj.Update()
for imNum in range(num_images):
progress_percent += progress_per_file
progress_label = "Registering "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
if moving_image_filenames[imNum] != fixed_image_filename:
moving_im = itk.imread(moving_image_filenames[imNum],itk.F)
imreg = tube.RegisterImages[ImageType].New()
imreg.SetFixedImage(fixed_im)
imreg.SetMovingImage(moving_im)
imreg.SetRigidMaxIterations(100)
imreg.SetRegistration("RIGID")
imreg.SetExpectedOffsetMagnitude(5)
imreg.SetExpectedRotationMagnitude(0.05)
imreg.SetFixedImageMaskObject(mask_obj)
imreg.SetUseEvolutionaryOptimization(False)
if debug:
imreg.SetReportProgress(True)
imreg.Update()
tfm = imreg.GetCurrentMatrixTransform()
moving_reg_im = imreg.ResampleImage("SINC_INTERPOLATION",
moving_im,tfm,-1024)
if output_dirname!=None:
pname,fname = os.path.split(moving_image_filenames[imNum])
rename_file_fname = os.path.splitext(fname)
new_fname = str(rename_file_fname[0])+"_reg.nii"
new_filename = os.path.join(output_dirname,new_fname)
itk.imwrite(moving_reg_im,new_filename,compression=True)
elif output_dirname!=None:
pname,fname = os.path.split(moving_image_filenames[imNum])
rename_file_fname = os.path.splitext(fname)
new_fname = str(rename_file_fname[0])+"_reg.nii"
new_filename = os.path.join(output_dirname,new_fname)
itk.imwrite(moving_reg_im,new_filename,compression=True)
report_progress("Done",100)
#################
#################
#################
#################
#################
def scv_register_atlas_to_image(atlas_im, atlas_mask_im, in_im):
ImageType = itk.Image[itk.F,3]
regAtlasToIn = tube.RegisterImages[ImageType].New(FixedImage=in_im,
MovingImage=atlas_im)
regAtlasToIn.SetReportProgress(True)
regAtlasToIn.SetRegistration("PIPELINE_AFFINE")
regAtlasToIn.SetMetric("MATTES_MI_METRIC")
regAtlasToIn.SetInitialMethodEnum("INIT_WITH_IMAGE_CENTERS")
regAtlasToIn.Update()
atlas_reg_im = regAtlasToIn.ResampleImage()
atlas_mask_reg_im = regAtlasToIn.ResampleImage("NEAREST_NEIGHBOR",
atlas_mask_im)
return atlas_reg_im,atlas_mask_reg_im
#################
#################
#################
#################
#################
def scv_compute_atlas_region_stats(atlas_im,
time_im,
vess_im,
number_of_time_bins=100,
report_progress=print,
debug=False):
atlas_arr = itk.GetArrayFromImage(atlas_im)
time_arr = itk.GetArrayFromImage(time_im)
vess_arr = itk.GetArrayFromImage(vess_im)
num_regions = int(atlas_arr.max())
time_max = float(time_arr.max())
time_min = float(time_arr.min())
nbins = int(number_of_time_bins)
time_factor = (time_max-time_min)/(nbins+1)
print("Time range =",time_min,"-",time_max)
bin_value = np.zeros([num_regions,nbins])
bin_count = np.zeros([num_regions,nbins])
for atlas_region in range(num_regions):
report_progress("Masking",(atlas_region+1)*(100/num_regions))
indx_arr = np.where(atlas_arr==atlas_region)
indx_list = list(zip(indx_arr[0],indx_arr[1],indx_arr[2]))
for indx in indx_list:
time_bin = int((time_arr[indx]-time_min)/time_factor)
time_bin = min(max(0,time_bin),nbins-1)
if
|
np.isnan(vess_arr[indx])
|
numpy.isnan
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.sparse import rand
from scipy.sparse.linalg import lsqr
from pylops.utils import dottest
from pylops.basicoperators import Regression, LinearRegression, MatrixMult, \
Identity, Zero, Flip, Symmetrize, Roll, Sum, Real, Imag, Conj
par1 = {'ny': 11, 'nx': 11, 'imag': 0,
'dtype':'float64'} # square real
par2 = {'ny': 21, 'nx': 11, 'imag': 0,
'dtype':'float64'} # overdetermined real
par1j = {'ny': 11, 'nx': 11, 'imag': 1j,
'dtype':'complex128'} # square complex
par2j = {'ny': 21, 'nx': 11, 'imag': 1j,
'dtype':'complex128'} # overdetermined complex
par3 = {'ny': 11, 'nx': 21, 'imag': 0,
'dtype':'float64'} # underdetermined real
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Regression(par):
"""Dot-test, inversion and apply for Regression operator
"""
np.random.seed(10)
order = 4
t = np.arange(par['ny'], dtype=np.float32)
LRop = Regression(t, order=order, dtype=par['dtype'])
assert dottest(LRop, par['ny'], order+1)
x = np.array([1., 2., 0., 3., -1.], dtype=np.float32)
xlsqr = lsqr(LRop, LRop*x, damp=1e-10, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_LinearRegression(par):
"""Dot-test and inversion for LinearRegression operator
"""
np.random.seed(10)
t = np.arange(par['ny'], dtype=np.float32)
LRop = LinearRegression(t, dtype=par['dtype'])
assert dottest(LRop, par['ny'], 2)
x = np.array([1., 2.], dtype=np.float32)
xlsqr = lsqr(LRop, LRop*x, damp=1e-10, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_MatrixMult(par):
"""Dot-test and inversion for MatrixMult operator
"""
|
np.random.seed(10)
|
numpy.random.seed
|
from matplotlib import rcParams
rcParams["font.family"] = "sans-serif"
rcParams["font.sans-serif"] = ["Arial"]
rcParams.update({'font.size': 12})
# import matplotlib
# matplotlib.use('Agg')
from basic.binning import binning, scatter_hist
from basic.math_fn import to_1darray, oneD_gaussian, ln_oneD_gaussian, exp_survival, ln_exp_pdf, ln_gau_exp_pdf, exp_gauss_2d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from basic.file_io import save_img
from lifelines import KaplanMeierFitter
import pandas as pd
import random
### data: (n,1)-array
class EM:
def __init__(self, data, dim=1):
self.data = data.reshape(-1, dim)
self.s_lower = 1
def skGMM(self, n_components, tolerance=10e-5):
self.n_components = n_components
data = self.data
n_sample = len(data)
gmm = GaussianMixture(n_components=n_components, tol=tolerance).fit(data)
labels = gmm.predict(data)
data_cluster = [data[labels == i] for i in range(n_components)]
p = gmm.predict_proba(data).T
f = np.sum(p, axis=1) / n_sample
m = np.matmul(p, data).ravel() / np.sum(p, axis=1)
s = np.sqrt(np.matmul(p, data ** 2).ravel() / (np.sum(p, axis=1)) - m ** 2)
self.para_progress = [f, m, s]
self.para_final = [f[-1], m[-1], s[-1]]
return f, m, s, labels, data_cluster
def GMM(self, n_components, tolerance=1e-2, rand_init=False):
"""EM algorithm with pdf=Gaussian (GMM)
Parameters
----------
n_components : int
Number of components.
tolerance : float
Convergence criteria
data : array (n_samples,1)
Returns
-------
"""
## (f,m,s) are growing array
data = self.data
self.n_components = n_components
self.tolerance = tolerance
## initialize EM parameters
f, m, s, loop, improvement = self.__init_GMM(data, n_components=n_components, rand_init=rand_init)
converged = improvement < tolerance
while (loop < 20 or ~converged) and loop < 500:
prior_prob = self.__weighting(f, m, s, function=ln_oneD_gaussian)
f, m, s = self.__update_f_m_s(data, prior_prob, f, m, s)
improvement = self.__cal_improvement(f, m, s)
converged = improvement < tolerance
loop += 1
f, m, s = self.__reshape_all(f, m, s, n_rows=loop+1, n_cols=n_components)
self.para_progress = [f, m, s]
m_f, f_f, s_f = self.__sort_according(m[-1], f[-1], s[-1])
self.para_final = [f_f, m_f, s_f]
para = self.para_final
self.__cal_LLE(data, function=ln_oneD_gaussian, para=para)
converged = np.array([converged] * n_components)
self.converged = converged
# labels, data_cluster = self.predict(data, ln_oneD_gaussian, paras=[f.ravel(), m.ravel(), s.ravel()])
return f_f, m_f, s_f, converged
def PEM(self, n_components, tolerance=1e-2, rand_init=False):
data = self.data
self.n_components = n_components
self.tolerance = tolerance
f, tau, s, loop, improvement = self.__init_PEM(data, n_components=n_components, rand_init=rand_init)
converged = improvement < tolerance
while (loop < 20 or ~converged) and loop < 500:
prior_prob = self.__weighting(f, tau, function=ln_exp_pdf)
f, tau, s = self.__update_f_m_s(data, prior_prob, f, tau, s)
improvement = self.__cal_improvement(f, tau)
converged = improvement < tolerance
loop += 1
f, tau, s = self.__reshape_all(f, tau, s, n_rows=loop+1, n_cols=n_components)
self.para_progress = [f, tau, s]
tau_f, f_f, s_f = self.__sort_according(tau[-1], f[-1], s[-1])
self.para_final = [f_f, tau_f]
para = self.para_final
ln_likelihood = self.__cal_LLE(data, function=ln_exp_pdf, para=para)
converged = np.array([converged] * n_components)
self.converged = converged
# labels, data_cluster = self.predict(data, ln_exp_pdf, paras=[f.ravel(), tau.ravel()])
return f_f, tau_f, s_f, converged, ln_likelihood
def GPEM(self, n_components, tolerance=1e-2, rand_init=False):
data = self.data ## (n_samples, 2)
x = data[:, 0] ## Gaussian R.V.
y = data[:, 1] ## Poisson R.V.
self.tolerance = tolerance
## initialize EM parameters
f1, m, s1, loop, improvement = self.__init_GMM(data[:,0], n_components=n_components, rand_init=rand_init)
f2, tau, s2, loop, improvement = self.__init_PEM(data[:,1], n_components=n_components, rand_init=rand_init)
converged = improvement < tolerance
while (loop < 20 or ~converged) and loop < 500:
prior_prob = self.__weighting(f1, m, s1, tau, function=ln_gau_exp_pdf)
f1, m, s1 = self.__update_f_m_s(data[:,0].reshape(-1,1), prior_prob, f1, m, s1)
f2, tau, s2 = self.__update_f_m_s(data[:,1].reshape(-1,1), prior_prob, f2, tau, s2)
improvement = self.__cal_improvement(f1, m, s1, tau)
converged = improvement < tolerance
loop += 1
f1, m, s1, tau = self.__reshape_all(f1, m, s1, tau, n_rows=loop+1, n_cols=n_components)
self.para_progress = [f1, m, s1, tau]
m_f, f_f, s_f, tau_f = self.__sort_according(m[-1], f1[-1], s1[-1], tau[-1])
self.para_final = [f_f, m_f, s_f, tau_f]
para = self.para_final
ln_likelihood = self.__cal_LLE(data, function=ln_gau_exp_pdf, para=para)
converged = np.array([converged] * n_components)
self.converged = converged
# labels, data_cluster = self.predict(data, function=ln_gau_exp_pdf, paras=para)
return f_f, m_f, s_f, tau_f, converged, ln_likelihood
## set given m
def GPEM_set(self, n_components, m_set, tolerance=1e-2, rand_init=False):
data = self.data ## (n_samples, 2)
x = data[:, 0] ## Gaussian R.V.
y = data[:, 1] ## Poisson R.V.
self.tolerance = tolerance
## initialize EM parameters
m_fix = m_set.copy()
f1, m, s1, loop, improvement = self.__init_GMM(data[:,0], n_components=n_components, rand_init=rand_init)
f2, tau, s2, loop, improvement = self.__init_PEM(data[:,1], n_components=n_components, rand_init=rand_init)
converged = improvement < tolerance
while (loop < 20 or ~converged) and loop < 500:
prior_prob = self.__weighting(f1, m_fix, s1, tau, function=ln_gau_exp_pdf)
m_fix = np.append(m_fix, m_set)
f1, m1_notuse, s1 = self.__update_f_m_s(data[:,0].reshape(-1,1), prior_prob, f1, m_fix, s1)
f2, tau, s2 = self.__update_f_m_s(data[:,1].reshape(-1,1), prior_prob, f2, tau, s2)
improvement = self.__cal_improvement(f1, m_fix, s1, tau)
converged = improvement < tolerance
loop += 1
f1, m_fix, s1, tau = self.__reshape_all(f1, m_fix, s1, tau, n_rows=loop+1, n_cols=n_components)
self.para_progress = [f1, m_fix, s1, tau]
m_fix_f, f_f, s_f, tau_f = self.__sort_according(m_fix[-1], f1[-1], s1[-1], tau[-1])
self.para_final = [f_f, m_fix_f, s_f, tau_f]
para = self.para_final
ln_likelihood = self.__cal_LLE(data, function=ln_gau_exp_pdf, para=para)
converged = np.array([converged] * n_components)
self.converged = converged
# labels, data_cluster = self.predict(data, function=ln_gau_exp_pdf, paras=para)
return f_f, m_fix_f, s_f, tau_f, converged, ln_likelihood
## iteratively find lowest BIC or AIC value
def opt_components_iter(self, iteration=10, tolerance=1e-2, mode='GMM', criteria='BIC', figure=False, figsize=(10, 10)):
n_all, c_all = [], []
for i in range(iteration):
n = self.opt_components(tolerance=tolerance, mode=mode, criteria=criteria, figure=figure, figsize=figsize)
if criteria == 'AIC':
c = self.AIC_owns[n - 1]
else:
c = self.BIC_owns[n - 1]
n_all = np.append(n_all, n)
c_all = np.append(c_all, c)
index = np.argmin(c_all)
return int(n_all[index])
def opt_components(self, tolerance=1e-2, mode='GMM', criteria='BIC', figure=False, figsize=(10,10)):
self.mode = mode
## find best n_conponents
data = self.data
BICs, AICs = [], []
BIC_owns, AIC_owns = [], []
LLE = []
n_clusters =
|
np.arange(1, 6)
|
numpy.arange
|
# coding=utf-8
import torch
from math import log10
from torch.nn import Parameter, Module, Linear, MSELoss
from torch.autograd import Variable, grad
import numpy as np
from PIL.ImageEnhance import *
def gradPenalty(D_net, real, fake, LAMBDA=10, input=None):
use_gpu = real.is_cuda() and fake.is_cuda()
batch_size = real.size()[0]
# Calculate interpolation
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand_as(real)
alpha = alpha.cuda() if use_gpu else alpha
interpolates = alpha * real + ((1 - alpha) * fake)
if use_gpu:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
if input is not None:
disc_interpolates = D_net(interpolates, input)
else:
disc_interpolates = D_net(interpolates)
gradients = grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda()
if use_gpu else torch.ones(disc_interpolates.size()),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def jcbClamp(G_net, z, lmbda_max=20, lmbda_min=1, ep=1, use_gpu=False):
""" implement of jacobin climping.
'Is Generator Conditioning Causally Related to GAN Performance?'
:param G_net: generate model
:param z: input
:param lmbda_max: default 20
:param lmbda_min: default 1
:param ep: default 1
:param use_gpu: default False
:return:
"""
lmbda_max = lmbda_max * torch.ones(1)
lmbda_min = lmbda_min * torch.ones(1)
sigma = torch.randn(z.size())
if use_gpu:
lmbda_max = lmbda_max.cuda()
lmbda_min = lmbda_min.cuda()
sigma = sigma.cuda()
sigma = sigma / torch.norm(sigma, 2) * ep
z_ = z + sigma
Q = torch.norm(G_net(z) - G_net(z_), 2) / torch.norm(z - z_, 2)
l_max = (torch.max(Q, lmbda_max) - lmbda_max) ** 2
l_min = (torch.min(Q, lmbda_min) - lmbda_min) ** 2
return (l_max + l_min).mean()
def VBD(x, z, beta, ic):
x_ = 0.5 * (x + z)
KL = torch.nn.KLDivLoss()
KL = KL(x_, z)
vdb = beta * KL - ic
return vdb
def get_SNR(fake, real):
fake_sq = (fake ** 2).sum()
mse = ((fake - real) ** 2).sum()
if mse < 1e-8:
mse = 1e-8
snr = 10 * log10(fake_sq / mse)
return snr
def get_PSNR(fake, real):
fake_sq = (fake ** 2).mean()
snr = get_SNR(fake, real)
psnr = snr + 10 * log10(255 ** 2 / fake_sq)
return psnr
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length, mask_numb=.0):
self.n_holes = n_holes
self.length = length
self.mask_numb = mask_numb
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x =
|
np.random.randint(w)
|
numpy.random.randint
|
#
# <EMAIL>
# 2017-10-10
# Codigo que faz regressao simples e encontra embeddings
#
# a ideia aqui e a seguinte:
# - carregar dados do movielens
# - inicializar o embedding de forma aleatoria
# - encontrar os embeddings de filmes e de usuarios que gerem o menor erro possivel
# t8: retira os bias de filmes e usuarios e substitui por um unico bias global
# t9b: multilayer nn
from __future__ import division
from __future__ import print_function
from time import gmtime, strftime, localtime
import math
import time
import sys
import os
#from pylab import *
from scipy import sparse
import numpy as np
import pandas as pd
import tensorflow as tf
import random
from tensorflow.python import debug as tf_debug
NUM_USERS = 247754
NUM_MOVIES = 151712
SECOND_FEATURES = 32
batch_size = 99999
num_steps = 2000001
base_alpha = 0.0003
count =1
# Regularization
lbda = 0.002
decay = 0.9999
num_ratings = 0
TRAIN_INPUT_FILE="train-ratings-3.csv"
TEST_INPUT_FILE="validation-ratings-3.csv"
use_bias = True
use_square = True
use_second_layer = True
tf.set_random_seed(1)
round_ranking = 0
t0 = time.perf_counter()
#import tracemalloc
#tracemalloc.start()
if sys.argv[1].isdigit():
NUM_FEATURES = int(sys.argv[1])
else:
raise Exception("parameter NUM_FEATURES is required")
if len(sys.argv) < 3:
raise Exception("parameter round_ranking is required (y, Y, s, S, 1, T, t means should round down. Anything else means it shouldn't")
if sys.argv[2] in ("y", "Y", "s", "S", "1", "T", "t"):
round_ranking = 1
else:
round_raking = 0
if len(sys.argv) < 4:
use_activation = 'linear'
# raise Exception('parameter activation is required. It can be "linear", "sigmoid" or "relu"')
else:
if sys.argv[3] in ("sigmoid" , "linear", "relu"):
use_activation = sys.argv[3]
if use_activation == 'sigmoid':
scale = 6.0
else:
scale = 1.0
def loga(msg):
now = time.perf_counter()
print("%6.2f: %s" % (now - t0, msg))
def load_data(train_fname, test_fname):
global NUM_USERS
global NUM_MOVIES
global round_ranking
global num_ratings
print("Loading data from {} and {}".format(train_fname, test_fname))
full_train_data = pd.read_csv(train_fname, sep=",").sample(frac=1)
full_test_data = pd.read_csv(test_fname, sep=",").sample(frac=1)
train_data =
|
np.array(full_train_data[["userId", "movieId"]])
|
numpy.array
|
import bezier
import csv # bezier.pyおよびモジュールcsvの読み込み
import numpy as np # モジュールnumpyをnpという名前で読み込み
import scipy as sp # モジュールscipyをspという名前で読み込み
from scipy import optimize # scipy内のoptimizeモジュールを読み込み
from scipy import integrate # scipy内のintegrateモジュールを読み込み
filename = 'input1' # 制御点座標情報を格納した入力ファイル名
reader = csv.reader(open(filename + '.csv', 'r')) # 入力ファイルの読み込み
next(reader) # 先頭行は読み飛ばし
row = next(reader)[0:2]
n, m = int(row[0]), int(row[1]) # u,v方向の制御点数
next(reader) # 1行読み飛ばし
cp = np.zeros([n, m, 3])
for i in range(n):
for j in range(m):
cpi = next(reader)[0:3]
cp[i, j, :] = float(cpi[0]), float(cpi[1]), float(cpi[2])
limit = [
|
np.min(cp)
|
numpy.min
|
import numpy as np
from free_energy_clustering.GMM import GaussianMixture
from free_energy_clustering.GMM_free_energy import FreeEnergyClustering
class GMM2D(GaussianMixture):
def __init__(self):
GaussianMixture.__init__(self, n_components=9)
self.n_dims_ = 2
self._set_parameters()
self.name = 'GMM_2D'
return
def _set_cov(self, x11,x12,x22):
tmp_cov = np.zeros((self.n_dims_, self.n_dims_))
tmp_cov[0, 0] = x11
tmp_cov[0, 1] = x12
tmp_cov[1, 0] = x12
tmp_cov[1, 1] = x22
return tmp_cov
def _set_parameters(self):
self.means_ = np.asarray([ np.asarray([0.8,0.35]), np.asarray([0.45,0.52]), np.asarray([0.2,0.6]),
np.asarray([0.05,0.8]), np.asarray([0.5,0.25]), np.asarray([0.5,0.25]),
np.asarray([0.5, 0.25]), np.asarray([0.4, 0.34]), np.asarray([0.8,0.5])])
covs = [
|
np.zeros((2,2))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
# Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
import mpmath as mp
import matplotlib as mpl
import seaborn as sns
from scipy.constants import codata
from pylab import *
from lmfit import minimize, report_fit
# from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
# Plotting
mpl.rc("mathtext", fontset="stixsans", default="regular")
mpl.rcParams.update({"axes.labelsize": 22})
mpl.rc("xtick", labelsize=16)
mpl.rc("ytick", labelsize=16)
mpl.rc("legend", fontsize=14)
F = codata.physical_constants["Faraday constant"][0]
Rg = codata.physical_constants["molar gas constant"][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
"""
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
"""
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(
np.log10(f_start),
np.log10(f_stop),
num=np.around(pts_decade * f_decades).astype(int),
endpoint=True,
)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
"""
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
"""
return 1j * w * L
def elem_C(w, C):
"""
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
"""
return 1 / (C * (w * 1j))
def elem_Q(w, Q, n):
"""
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return 1 / (Q * (w * 1j) ** n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
"""
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
"""
return Rs + 1 / (C * (w * 1j))
def cir_RsQ(w, Rs, Q, n):
"""
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return Rs + 1 / (Q * (w * 1j) ** n)
def cir_RQ(w, R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
return R / (1 + R * Q * (w * 1j) ** n)
def cir_RsRQ(w, Rs="none", R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
return Rs + (R / (1 + R * Q * (w * 1j) ** n))
def cir_RC(w, C="none", R="none", fs="none"):
"""
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
"""
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(
w,
Rs,
R="none",
Q="none",
n="none",
fs="none",
R2="none",
Q2="none",
n2="none",
fs2="none",
):
"""
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if R2 == "none":
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
elif Q2 == "none":
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n2)
elif n2 == "none":
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
return (
Rs + (R / (1 + R * Q * (w * 1j) ** n)) + (R2 / (1 + R2 * Q2 * (w * 1j) ** n2))
)
def cir_RsRQQ(w, Rs, Q, n, R1="none", Q1="none", n1="none", fs1="none"):
"""
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
"""
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w, Q, n)
def cir_RsRQC(w, Rs, C, R1="none", Q1="none", n1="none", fs1="none"):
"""
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
"""
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
"""
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
"""
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
"""
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
"""
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_Q(w, Q, n)
def Randles_coeff(
w,
n_electron,
A,
E="none",
E0="none",
D_red="none",
D_ox="none",
C_red="none",
C_ox="none",
Rg=Rg,
F=F,
T=298.15,
):
"""
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- Lasia, A.L., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
"""
if C_red != "none" and D_red != "none":
sigma = ((Rg * T) / ((n_electron ** 2) * A * (F ** 2) * (2 ** (1 / 2)))) * (
(1 / (D_ox ** (1 / 2) * C_ox)) + (1 / (D_red ** (1 / 2) * C_red))
)
elif C_red == "none" and D_red == "none" and E != "none" and E0 != "none":
f = F / (Rg * T)
x = (n_electron * f * (E - E0)) / 2
func_cosh2 = (np.cosh(2 * x) + 1) / 2
sigma = (
(4 * Rg * T)
/ ((n_electron ** 2) * A * (F ** 2) * C_ox * ((2 * D_ox) ** (1 / 2)))
) * func_cosh2
else:
print("define E and E0")
Z_Aw = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Z_Aw
def cir_Randles(
w,
n_electron,
D_red,
D_ox,
C_red,
C_ox,
Rs,
Rct,
n,
E,
A,
Q="none",
fs="none",
E0=0,
F=F,
Rg=Rg,
T=298.15,
):
"""
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
"""
Z_Rct = Rct
Z_Q = elem_Q(w, Q, n)
Z_w = Randles_coeff(
w,
n_electron=n_electron,
E=E,
E0=E0,
D_red=D_red,
D_ox=D_ox,
C_red=C_red,
C_ox=C_ox,
A=A,
T=T,
Rg=Rg,
F=F,
)
return Rs + 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q="none", fs="none"):
"""
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
"""
if R == "none":
R = 1 / (Q * (2 * np.pi * fs) ** n)
elif Q == "none":
Q = 1 / (R * (2 * np.pi * fs) ** n)
elif n == "none":
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
Z_Q = 1 / (Q * (w * 1j) ** n)
Z_R = R
Z_w = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Rs + 1 / (1 / Z_Q + 1 / (Z_R + Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb="none", Rb="none", fsb="none"):
"""
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
"""
Z_C = elem_C(w, C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb="none", Rb="none", fsb="none", nb="none"):
"""
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
"""
Z_Q = elem_Q(w, Q=Qe, n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
"""
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
"""
return (1 - np.exp(-2 * x)) / (1 + np.exp(-2 * x))
def cir_RCRCZD(
w,
L,
D_s,
u1,
u2,
Cb="none",
Rb="none",
fsb="none",
Ce="none",
Re="none",
fse="none",
):
"""
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
"""
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w * 1j * L ** 2) / D_s) ** (1 / 2)
Z_D = Rb * (u2 / u1) * (tanh(x=alpha) / alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q="none", n="none"):
"""
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
"""
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri # ohm/cm
Lam = (Phi / X1) ** (1 / 2) # np.sqrt(Phi/X1)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1="none"):
"""
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
"""
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R="none", Q="none", n="none", fs="none"):
"""
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
"""
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1="none", Q2="none"):
"""
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- B<NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
"""
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
"""
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
"""
return (1 - np.exp(-2 * x)) / (2 * np.exp(-x))
def coth(x):
"""
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
"""
return (1 + np.exp(-2 * x)) / (1 - np.exp(-2 * x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
"""
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1="none"):
"""
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q="none"):
"""
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1="none", Q2="none"):
"""
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
"""
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
"""
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
"""
# The impedance of the series resistance
Z_Rs = Rs
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w, Q=Q, n=n)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(
w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1="none"
):
"""
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
"""
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w, Q=Q2, n=n2)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
"""
Fit Function: -C-
"""
C = params["C"]
return 1 / (C * (w * 1j))
def elem_Q_fit(params, w):
"""
Fit Function: -Q-
Constant Phase Element for Fitting
"""
Q = params["Q"]
n = params["n"]
return 1 / (Q * (w * 1j) ** n)
def cir_RsC_fit(params, w):
"""
Fit Function: -Rs-C-
"""
Rs = params["Rs"]
C = params["C"]
return Rs + 1 / (C * (w * 1j))
def cir_RsQ_fit(params, w):
"""
Fit Function: -Rs-Q-
"""
Rs = params["Rs"]
Q = params["Q"]
n = params["n"]
return Rs + 1 / (Q * (w * 1j) ** n)
def cir_RC_fit(params, w):
"""
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["C"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("C") == -1: # elif Q == 'none':
R = params["R"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["C"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
Q = params["C"]
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
"""
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
return R / (1 + R * Q * (w * 1j) ** n)
def cir_RsRQ_fit(params, w):
"""
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
Rs = params["Rs"]
return Rs + (R / (1 + R * Q * (w * 1j) ** n))
def cir_RsRQRQ_fit(params, w):
"""
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
"""
if str(params.keys())[10:].find("'R'") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'Q'") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'n'") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("'fs'") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
if str(params.keys())[10:].find("'R2'") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("'Q2'") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("'n2'") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
if str(params.keys())[10:].find("'fs2'") == -1: # elif fs == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
n2 = params["n2"]
Rs = params["Rs"]
return (
Rs + (R / (1 + R * Q * (w * 1j) ** n)) + (R2 / (1 + R2 * Q2 * (w * 1j) ** n2))
)
def cir_Randles_simplified_Fit(params, w):
"""
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
"""
if str(params.keys())[10:].find("'R'") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'Q'") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("'n'") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("'fs'") == -1: # elif fs == 'none':
R = params["R"]
Q = params["Q"]
n = params["n"]
Rs = params["Rs"]
sigma = params["sigma"]
Z_Q = 1 / (Q * (w * 1j) ** n)
Z_R = R
Z_w = sigma * (w ** (-0.5)) - 1j * sigma * (w ** (-0.5))
return Rs + 1 / (1 / Z_Q + 1 / (Z_R + Z_w))
def cir_RsRQQ_fit(params, w):
"""
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
"""
Rs = params["Rs"]
Q = params["Q"]
n = params["n"]
Z_Q = 1 / (Q * (w * 1j) ** n)
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
"""
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
"""
Rs = params["Rs"]
C = params["C"]
Z_C = 1 / (C * (w * 1j))
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
"""
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
"""
Rs = params["Rs"]
R1 = params["R1"]
C1 = params["C1"]
C = params["C"]
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
"""
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
"""
Rs = params["Rs"]
R1 = params["R1"]
C1 = params["C1"]
Q = params["Q"]
n = params["n"]
return Rs + cir_RC(w, C=C1, R=R1, fs="none") + elem_Q(w, Q, n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
"""
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impedance
Ce = params["Ce"]
Z_C = 1 / (Ce * (w * 1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Cb = params["Cb"]
fsb = params["fsb"]
Rb = 1 / (Cb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("Cb") == -1: # elif Q == 'none':
Rb = params["Rb"]
fsb = params["fsb"]
Cb = 1 / (Rb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
Cb = params["Cb"]
Z_RC = Rb / (1 + Rb * Cb * (w * 1j))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
"""
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impedance
Qe = params["Qe"]
ne = params["ne"]
Z_Q = 1 / (Qe * (w * 1j) ** ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Qb = params["Qb"]
nb = params["nb"]
fsb = params["fsb"]
Rb = 1 / (Qb * (2 * np.pi * fsb) ** nb)
if str(params.keys())[10:].find("Qb") == -1: # elif Q == 'none':
Rb = params["Rb"]
nb = params["nb"]
fsb = params["fsb"]
Qb = 1 / (Rb * (2 * np.pi * fsb) ** nb)
if str(params.keys())[10:].find("nb") == -1: # elif n == 'none':
Rb = params["Rb"]
Qb = params["Qb"]
fsb = params["fsb"]
nb = np.log(Qb * Rb) / np.log(1 / (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
nb = params["nb"]
Qb = params["Qb"]
Z_RQ = Rb / (1 + Rb * Qb * (w * 1j) ** nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
"""
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: # if R == 'none':
Ce = params["Ce"]
fse = params["fse"]
Re = 1 / (Ce * (2 * np.pi * fse))
if str(params.keys())[10:].find("Ce") == -1: # elif Q == 'none':
Re = params["Rb"]
fse = params["fsb"]
Ce = 1 / (Re * (2 * np.pi * fse))
if str(params.keys())[10:].find("fse") == -1: # elif fs == 'none':
Re = params["Re"]
Ce = params["Ce"]
Z_RCe = Re / (1 + Re * Ce * (w * 1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: # if R == 'none':
Cb = params["Cb"]
fsb = params["fsb"]
Rb = 1 / (Cb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("Cb") == -1: # elif Q == 'none':
Rb = params["Rb"]
fsb = params["fsb"]
Cb = 1 / (Rb * (2 * np.pi * fsb))
if str(params.keys())[10:].find("fsb") == -1: # elif fs == 'none':
Rb = params["Rb"]
Cb = params["Cb"]
Z_RCb = Rb / (1 + Rb * Cb * (w * 1j))
# Mass transport impendance
L = params["L"]
D_s = params["D_s"]
u1 = params["u1"]
u2 = params["u2"]
alpha = ((w * 1j * L ** 2) / D_s) ** (1 / 2)
Z_D = Rb * (u2 / u1) * (tanh(alpha) / alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
"""
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Q = params["Q"]
n = params["n"]
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri # ohm/cm
Lam = (Phi / X1) ** (1 / 2) # np.sqrt(Phi/X1)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Q = params["Q"]
n = params["n"]
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
Phi = 1 / (Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
Phi = R / (1 + R * Q * (w * 1j) ** n)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
"""
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
if str(params.keys())[10:].find("R2") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
if str(params.keys())[10:].find("Q2") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n1)
if str(params.keys())[10:].find("n2") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
if str(params.keys())[10:].find("fs2") == -1: # elif fs == 'none':
R2 = params["R2"]
n2 = params["n2"]
Q2 = params["Q2"]
Phi = R2 / (1 + R2 * Q2 * (w * 1j) ** n2)
X1 = Ri
Lam = (Phi / X1) ** (1 / 2)
x = L / Lam
x_mp = mp.matrix(x) # x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
) # Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
"""
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
Q = params["Q"]
n = params["n"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
"""
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
Q = params["Q"]
n = params["n"]
# The impedance of the series resistance
Z_Rs = Rs
# The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
if str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
if str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
"""
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: # if R == 'none':
Q = params["Q"]
n = params["n"]
fs = params["fs"]
R = 1 / (Q * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("Q") == -1: # elif Q == 'none':
R = params["R"]
n = params["n"]
fs = params["fs"]
Q = 1 / (R * (2 * np.pi * fs) ** n)
if str(params.keys())[10:].find("n") == -1: # elif n == 'none':
R = params["R"]
Q = params["Q"]
fs = params["fs"]
n = np.log(Q * R) / np.log(1 / (2 * np.pi * fs))
if str(params.keys())[10:].find("fs") == -1: # elif fs == 'none':
R = params["R"]
n = params["n"]
Q = params["Q"]
Phi = R / (1 + R * Q * (w * 1j) ** n)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
"""
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
Rel = params["Rel"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
elif str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: # if R == 'none':
Q2 = params["Q2"]
n2 = params["n2"]
fs2 = params["fs2"]
R2 = 1 / (Q2 * (2 * np.pi * fs2) ** n2)
elif str(params.keys())[10:].find("Q2") == -1: # elif Q == 'none':
R2 = params["R2"]
n2 = params["n2"]
fs2 = params["fs2"]
Q2 = 1 / (R2 * (2 * np.pi * fs2) ** n1)
elif str(params.keys())[10:].find("n2") == -1: # elif n == 'none':
R2 = params["R2"]
Q2 = params["Q2"]
fs2 = params["fs2"]
n2 = np.log(Q2 * R2) / np.log(1 / (2 * np.pi * fs2))
elif str(params.keys())[10:].find("fs2") == -1: # elif fs == 'none':
R2 = params["R2"]
n2 = params["n2"]
Q2 = params["Q2"]
Phi = R2 / (1 + R2 * Q2 * (w * 1j) ** n2)
X1 = Ri
X2 = Rel
Lam = (Phi / (X1 + X2)) ** (1 / 2)
x = L / Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * Lam) / sinh(x))) + Lam * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
"""
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
radius = params["radius"]
D = params["D"]
R = params["R"]
Q = params["Q"]
n = params["n"]
R_w = params["R_w"]
n_w = params["n_w"]
Rel = params["Rel"]
Ri = params["Ri"]
# The impedance of the series resistance
Z_Rs = Rs
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
"""
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
"""
Rs = params["Rs"]
L = params["L"]
Ri = params["Ri"]
radius = params["radius"]
D = params["D"]
R2 = params["R2"]
Q2 = params["Q2"]
n2 = params["n2"]
R_w = params["R_w"]
n_w = params["n_w"]
Rel = params["Rel"]
Ri = params["Ri"]
# The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: # if R == 'none':
Q1 = params["Q1"]
n1 = params["n1"]
fs1 = params["fs1"]
R1 = 1 / (Q1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("Q1") == -1: # elif Q == 'none':
R1 = params["R1"]
n1 = params["n1"]
fs1 = params["fs1"]
Q1 = 1 / (R1 * (2 * np.pi * fs1) ** n1)
elif str(params.keys())[10:].find("n1") == -1: # elif n == 'none':
R1 = params["R1"]
Q1 = params["Q1"]
fs1 = params["fs1"]
n1 = np.log(Q1 * R1) / np.log(1 / (2 * np.pi * fs1))
elif str(params.keys())[10:].find("fs1") == -1: # elif fs == 'none':
R1 = params["R1"]
n1 = params["n1"]
Q1 = params["Q1"]
Z_RQ1 = R1 / (1 + R1 * Q1 * (w * 1j) ** n1)
# The impedance of a 1D Warburg Element
time_const = (radius ** 2) / D
x = (time_const * w * 1j) ** n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(
float(mp.coth(x_mp[i]).real) + float(mp.coth(x_mp[i]).imag) * 1j
)
Z_w = R_w * np.array(warburg_coth_mp) / x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w, Q=Q2, n=n2)
Z_Randles = 1 / (1 / Z_Q + 1 / (Z_Rct + Z_w)) # Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles / (Rel + Ri)) ** (1 / 2)
x = L / lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel * Ri) / (Rel + Ri)) * (L + ((2 * lamb) / sinh(x))) + lamb * (
(Rel ** 2 + Ri ** 2) / (Rel + Ri)
) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
"""
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
"""
if circuit == "C":
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == "Q":
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == "R-C":
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == "R-Q":
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == "RC":
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == "RQ":
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == "R-RQ":
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == "R-RQ-RQ":
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == "R-RC-C":
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == "R-RC-Q":
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == "R-RQ-Q":
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == "R-RQ-C":
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == "R-(Q(RW))":
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == "R-(Q(RM))":
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == "C-RC-C":
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == "Q-RQ-Q":
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == "RC-RC-ZD":
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == "R-TLsQ":
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == "R-RQ-TLsQ":
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == "R-TLs":
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == "R-RQ-TLs":
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == "R-TLQ":
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == "R-RQ-TLQ":
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == "R-TL":
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == "R-RQ-TL":
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == "R-TL1Dsolid":
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == "R-RQ-TL1Dsolid":
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print("Circuit is not defined in leastsq_errorfunc()")
error = [(re - re_fit) ** 2, (im - im_fit) ** 2] # sum of squares
# Different Weighing options, see Lasia
if weight_func == "modulus":
weight = [
1 / ((re_fit ** 2 + im_fit ** 2) ** (1 / 2)),
1 / ((re_fit ** 2 + im_fit ** 2) ** (1 / 2)),
]
elif weight_func == "proportional":
weight = [1 / (re_fit ** 2), 1 / (im_fit ** 2)]
elif weight_func == "unity":
unity_1s = []
for k in range(len(re)):
unity_1s.append(
1
) # makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print("weight not defined in leastsq_errorfunc()")
S = np.array(weight) * error # weighted sum of squares
return S
### Fitting Class
class EIS_exp:
"""
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
"""
def __init__(self, path, data, cycle="off", mask=["none", "none"]):
self.df_raw0 = []
self.cycleno = []
for j, f in enumerate(data, start=0):
if f.endswith("mpt"): # file is a .mpt file
self.df_raw0.append(
extract_mpt(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("DTA"): # file is a .dta file
self.df_raw0.append(
extract_dta(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("z"): # file is a .z file
self.df_raw0.append(
extract_solar(path=path, EIS_name=f)
) # reads all datafiles
elif f.endswith("txt"):
self.df_raw0.append(extract_csv(path=path, EIS_name=f))
else:
print("Data file(s) could not be identified")
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j - 1]):
if j > 0: # corrects cycle_number except for the first data file
self.df_raw0[j].update(
{"cycle_number": self.cycleno[j] + np.max(self.cycleno[j - 1])}
) # corrects cycle number
# currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat(
[self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0
)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat(
[self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]],
axis=0,
)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
],
axis=0,
)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
],
axis=0,
)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
],
axis=0,
)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
],
axis=0,
)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
],
axis=0,
)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
],
axis=0,
)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
],
axis=0,
)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
axis=0,
)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
self.df_raw0[12],
],
axis=0,
)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
self.df_raw0[12],
self.df_raw0[13],
axis=0,
)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat(
[
self.df_raw0[0],
self.df_raw0[1],
self.df_raw0[2],
self.df_raw0[3],
self.df_raw0[4],
self.df_raw0[5],
self.df_raw0[6],
self.df_raw0[7],
self.df_raw0[8],
self.df_raw0[9],
self.df_raw0[10],
self.df_raw0[11],
],
self.df_raw0[12],
self.df_raw0[13],
self.df_raw0[14],
axis=0,
)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(
w=2 * np.pi * self.df_raw.f
) # creats a new coloumn with the angular frequency
# Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ["none", "none"] and cycle == "off":
for i in range(len(self.df_raw.cycle_number.unique())): # includes all data
self.df.append(
self.df_raw[
self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]
]
)
elif mask == ["none", "none"] and cycle != "off":
for i in range(len(cycle)):
self.df.append(
self.df_raw[self.df_raw.cycle_number == cycle[i]]
) # extracting dataframe for each cycle
elif mask[0] != "none" and mask[1] == "none" and cycle == "off":
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how="all", inplace=True)
for i in range(
len(self.df_pre.cycle_number.unique())
): # Appending data based on cycle number
self.df.append(
self.df_pre[
self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]
]
)
elif (
mask[0] != "none" and mask[1] == "none" and cycle != "off"
): # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited.cycle_number == cycle[i]]
)
elif mask[0] == "none" and mask[1] != "none" and cycle == "off":
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how="all", inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): # includes all data
self.df.append(
self.df_pre[
self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]
]
)
elif mask[0] == "none" and mask[1] != "none" and cycle != "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited.cycle_number == cycle[i]]
)
elif mask[0] != "none" and mask[1] != "none" and cycle != "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(
self.df_limited[self.df_limited2.cycle_number == cycle[i]]
)
elif mask[0] != "none" and mask[1] != "none" and cycle == "off":
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(
self.df_limited[
self.df_limited2.cycle_number
== self.df_raw.cycle_number.unique()[i]
]
)
else:
print("__init__ error (#2)")
def Lin_KK(
self,
num_RC="auto",
legend="on",
plot="residuals",
bode="off",
nyq_xlim="none",
nyq_ylim="none",
weight_func="Boukamp",
savefig="none",
):
"""
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
"""
if num_RC == "auto":
print("cycle || No. RC-elements || u")
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(
np.log10(np.max(self.df[i].f)) - np.log10(np.min(self.df[i].f))
) # determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) # needed for self.KK_R
self.Rparam.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[0]
) # Creates intial guesses for R's
self.t_const.append(
KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(
minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC[i],
weight_func,
self.t_const[i],
),
)
) # maxfev=99
self.R_names.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[1]
) # creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(
self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value
)
self.number_RC_sort.insert(0, 0) # needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(
self.KK_R0[
int(np.cumsum(self.number_RC_sort)[i]) : int(
np.cumsum(self.number_RC_sort)[i + 1]
)
]
) # assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(
np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0)
)
self.KK_Rminor.append(
np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0)
)
self.KK_u.append(
1
- (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
)
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0, 0)
self.Rparam[i] = KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[
0
] # Creates intial guesses for R's
self.t_const[i] = KK_timeconst(
w=self.df[i].w, num_RC=int(self.number_RC[i])
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC[i],
weight_func,
self.t_const[i],
),
) # maxfev=99
self.R_names[i] = KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC[i]),
)[
1
] # creates R names
self.KK_R0 = np.delete(
np.array(self.KK_R0), np.s_[0 : len(self.KK_R0)]
)
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(
self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value
)
self.KK_R_lim = np.cumsum(self.number_RC_sort) # used for KK_R[i]
self.KK_R[i] = self.KK_R0[
self.KK_R_lim[i] : self.KK_R_lim[i + 1]
] # assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(
np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0
)
self.KK_Rminor[i] = np.where(
np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0
)
self.KK_u[i] = 1 - (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
else:
print(
"["
+ str(i + 1)
+ "]"
+ " "
+ str(self.number_RC[i]),
" " + str(np.round(self.KK_u[i], 2)),
)
elif num_RC != "auto": # hardwired number of RC-elements/decade
print("cycle || u")
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(
np.log10(np.max(self.df[i].f)) - np.log10(np.min(self.df[i].f))
) # determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(
np.round(num_RC * self.decade[i])
) # Creats the the number of -(RC)- circuits
self.Rparam.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC0[i]),
)[0]
) # Creates intial guesses for R's
self.t_const.append(
KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))
) # Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(
minimize(
KK_errorfunc,
self.Rparam[i],
method="leastsq",
args=(
self.df[i].w.values,
self.df[i].re.values,
self.df[i].im.values,
self.number_RC0[i],
weight_func,
self.t_const[i],
),
)
) # maxfev=99
self.R_names.append(
KK_Rnam_val(
re=self.df[i].re,
re_start=self.df[i].re.idxmin(),
num_RC=int(self.number_RC0[i]),
)[1]
) # creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(
self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value
)
self.number_RC0.insert(0, 0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(
self.KK_R0[
int(np.cumsum(self.number_RC0)[i]) : int(
np.cumsum(self.number_RC0)[i + 1]
)
]
) # assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0))
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0))
self.KK_u.append(
1
- (
np.abs(np.sum(self.KK_Rminor[i]))
/ np.abs(np.sum(self.KK_Rgreater[i]))
)
) # currently gives incorrect values
print(
"[" + str(i + 1) + "]" + " " + str(np.round(self.KK_u[i], 2))
)
else:
print("num_RC incorrectly defined")
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(
KK_RC2(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(
KK_RC3(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(
KK_RC4(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(
KK_RC5(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(
KK_RC6(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(
KK_RC7(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(
KK_RC8(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(
KK_RC9(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(
KK_RC10(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(
KK_RC11(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(
KK_RC12(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(
KK_RC13(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(
KK_RC14(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(
KK_RC15(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(
KK_RC16(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(
KK_RC17(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(
KK_RC18(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(
KK_RC19(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(
KK_RC20(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(
KK_RC21(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(
KK_RC22(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(
KK_RC23(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(
KK_RC24(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(
KK_RC25(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(
KK_RC26(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(
KK_RC27(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(
KK_RC28(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(
KK_RC29(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(
KK_RC30(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(
KK_RC31(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(
KK_RC32(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(
KK_RC33(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(
KK_RC34(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(
KK_RC35(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(
KK_RC36(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(
KK_RC37(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(
KK_RC38(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(
KK_RC39(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(
KK_RC40(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(
KK_RC41(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(
KK_RC42(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(
KK_RC43(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(
KK_RC44(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(
KK_RC45(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(
KK_RC46(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(
KK_RC47(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(
KK_RC48(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(
KK_RC49(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(
KK_RC50(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(
KK_RC51(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(
KK_RC52(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(
KK_RC53(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(
KK_RC54(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(
KK_RC55(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(
KK_RC56(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(
KK_RC57(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(
KK_RC58(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(
KK_RC59(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(
KK_RC60(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(
KK_RC61(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(
KK_RC62(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(
KK_RC63(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(
KK_RC64(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(
KK_RC65(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(
KK_RC66(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(
KK_RC67(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(
KK_RC68(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(
KK_RC69(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(
KK_RC70(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(
KK_RC71(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(
KK_RC72(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(
KK_RC73(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(
KK_RC74(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(
KK_RC75(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(
KK_RC76(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(
KK_RC77(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(
KK_RC78(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(
KK_RC79(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(
KK_RC80(
w=self.df[i].w,
Rs=self.Lin_KK_Fit[i].params.get("Rs").value,
R_values=self.KK_R[i],
t_values=self.t_const[i],
)
)
else:
print("RC simulation circuit not defined")
print(" Number of RC = ", self.number_RC)
self.KK_rr_re.append(
residual_real(
re=self.df[i].re,
fit_re=self.KK_circuit_fit[i].real,
fit_im=-self.KK_circuit_fit[i].imag,
)
) # relative residuals for the real part
self.KK_rr_im.append(
residual_imag(
im=self.df[i].im,
fit_re=self.KK_circuit_fit[i].real,
fit_im=-self.KK_circuit_fit[i].imag,
)
) # relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == "on":
for i in range(len(self.df)):
self.label_re_1.append("Z' (#" + str(i + 1) + ")")
self.label_im_1.append("Z'' (#" + str(i + 1) + ")")
self.label_cycleno.append("#" + str(i + 1))
elif legend == "potential":
for i in range(len(self.df)):
self.label_re_1.append(
"Z' (" + str(np.round(np.average(self.df[i].E_avg), 2)) + " V)"
)
self.label_im_1.append(
"Z'' (" + str(np.round(np.average(self.df[i].E_avg), 2)) + " V)"
)
self.label_cycleno.append(
str(np.round(np.average(self.df[i].E_avg), 2)) + " V"
)
if plot == "w_data":
fig = figure(figsize=(6, 8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect="equal")
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df) + 2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df) + 2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(
self.df[i].re,
self.df[i].im,
marker="o",
ms=4,
lw=2,
color=colors[i],
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
### Bode Plot
if bode == "on":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].re,
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_re_1[i],
)
ax1.plot(
np.log10(self.df[i].f),
self.df[i].im,
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_im_1[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "re":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].re,
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log_re":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].re),
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "im":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
self.df[i].im,
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log_im":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].im),
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_cycleno[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
elif bode == "log":
for i in range(len(self.df)):
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].re),
color=colors_real[i + 1],
marker="D",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_re_1[i],
)
ax1.plot(
np.log10(self.df[i].f),
np.log10(self.df[i].im),
color=colors_imag[i + 1],
marker="s",
ms=3,
lw=2.25,
ls="-",
alpha=0.7,
label=self.label_im_1[i],
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(
np.log10(self.df[i].f),
self.KK_rr_re[i] * 100,
color=colors_real[i + 1],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label=self.label_re_1[i],
)
ax2.plot(
np.log10(self.df[i].f),
self.KK_rr_im[i] * 100,
color=colors_imag[i + 1],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label=self.label_im_1[i],
)
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(
np.min(self.KK_rr_re_min) * 100 * 1.5,
np.max(np.abs(self.KK_rr_re_min)) * 100 * 1.5,
)
ax2.annotate(
"Lin-KK",
xy=[
np.min(np.log10(self.df[0].f)),
np.max(self.KK_rr_re_max) * 100 * 0.9,
],
color="k",
fontweight="bold",
)
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(
np.min(self.KK_rr_im_min) * 100 * 1.5,
np.max(self.KK_rr_im_max) * 100 * 1.5,
)
ax2.annotate(
"Lin-KK",
xy=[
np.min(np.log10(self.df[0].f)),
np.max(self.KK_rr_im_max) * 100 * 0.9,
],
color="k",
fontweight="bold",
)
### Figure specifics
if legend == "on" or legend == "potential":
ax.legend(loc="best", fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != "none":
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != "none":
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### Illustrating residuals only
elif plot == "residuals":
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax = fig.add_subplot(231)
ax.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == "on" or legend == "potential":
ax.legend(loc="best", fontsize=10, frameon=False)
ax.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(
self.KK_ymin * 100 * 1.5, np.abs(self.KK_ymin) * 100 * 1.5
)
if legend == "on":
ax.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(
np.negative(self.KK_ymax) * 100 * 1.5,
np.abs(self.KK_ymax) * 100 * 1.5,
)
if legend == "on":
ax.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.3,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax2.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax5.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(
self.KK_ymin[4] * 100 * 1.5, np.abs(self.KK_ymin[4]) * 100 * 1.5
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(
np.negative(self.KK_ymax[4]) * 100 * 1.5,
np.abs(self.KK_ymax[4]) * 100 * 1.5,
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymax[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
self.KK_ymax[4] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax5.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 6
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_re[5] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_im[5] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax6.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax6.legend(loc="best", fontsize=10, frameon=False)
ax6.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(
self.KK_ymin[4] * 100 * 1.5, np.abs(self.KK_ymin[4]) * 100 * 1.5
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(
np.negative(self.KK_ymax[4]) * 100 * 1.5,
np.abs(self.KK_ymax[4]) * 100 * 1.5,
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymax[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
self.KK_ymax[4] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(
self.KK_ymin[5] * 100 * 1.5, np.abs(self.KK_ymin[5]) * 100 * 1.5
)
if legend == "on":
ax6.annotate(
"Lin-KK, #6",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymin[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax6.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[5].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymin[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(
np.negative(self.KK_ymax[5]) * 100 * 1.5,
np.abs(self.KK_ymax[5]) * 100 * 1.5,
)
if legend == "on":
ax6.annotate(
"Lin-KK, #6",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymax[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax6.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[5].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[5].f)),
self.KK_ymax[5] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax3.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax5.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 6
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_re[5] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_im[5] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax6.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax6.legend(loc="best", fontsize=10, frameon=False)
ax6.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 7
ax7.plot(
np.log10(self.df[6].f),
self.KK_rr_re[6] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax7.plot(
np.log10(self.df[6].f),
self.KK_rr_im[6] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == "on" or legend == "potential":
ax7.legend(loc="best", fontsize=10, frameon=False)
ax7.axhline(0, ls="--", c="k", alpha=0.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(
self.KK_ymin[0] * 100 * 1.5, np.abs(self.KK_ymin[0]) * 100 * 1.5
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymin[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[0]) * 100 * 1.5,
)
if legend == "on":
ax1.annotate(
"Lin-KK, #1",
xy=[
np.min(np.log10(self.df[0].f)),
np.abs(self.KK_ymax[0]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax1.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[0].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[0].f)),
self.KK_ymax[0] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(
self.KK_ymin[1] * 100 * 1.5, np.abs(self.KK_ymin[1]) * 100 * 1.5
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.3,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
np.max(np.abs(self.KK_ymin[1])) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(
np.negative(self.KK_ymax[1]) * 100 * 1.5,
np.abs(self.KK_ymax[1]) * 100 * 1.5,
)
if legend == "on":
ax2.annotate(
"Lin-KK, #2",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax2.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[1].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[1].f)),
self.KK_ymax[1] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(
self.KK_ymin[2] * 100 * 1.5, np.abs(self.KK_ymin[2]) * 100 * 1.5
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymin[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(
np.negative(self.KK_ymax[0]) * 100 * 1.5,
np.abs(self.KK_ymax[2]) * 100 * 1.5,
)
if legend == "on":
ax3.annotate(
"Lin-KK, #3",
xy=[
np.min(np.log10(self.df[2].f)),
np.abs(self.KK_ymax[2]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax3.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[2].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[2].f)),
self.KK_ymax[2] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(
self.KK_ymin[3] * 100 * 1.5, np.abs(self.KK_ymin[3]) * 100 * 1.5
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymin[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(
np.negative(self.KK_ymax[3]) * 100 * 1.5,
np.abs(self.KK_ymax[3]) * 100 * 1.5,
)
if legend == "on":
ax4.annotate(
"Lin-KK, #4",
xy=[
np.min(np.log10(self.df[3].f)),
np.abs(self.KK_ymax[3]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax4.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[3].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[3].f)),
self.KK_ymax[3] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(
self.KK_ymin[4] * 100 * 1.5, np.abs(self.KK_ymin[4]) * 100 * 1.5
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymin[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(
np.negative(self.KK_ymax[4]) * 100 * 1.5,
np.abs(self.KK_ymax[4]) * 100 * 1.5,
)
if legend == "on":
ax5.annotate(
"Lin-KK, #5",
xy=[
np.min(np.log10(self.df[4].f)),
np.abs(self.KK_ymax[4]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax5.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[4].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[4].f)),
self.KK_ymax[4] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(
self.KK_ymin[5] * 100 * 1.5, np.abs(self.KK_ymin[5]) * 100 * 1.5
)
if legend == "on":
ax6.annotate(
"Lin-KK, #6",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymin[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax6.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[5].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymin[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(
np.negative(self.KK_ymax[5]) * 100 * 1.5,
np.abs(self.KK_ymax[5]) * 100 * 1.5,
)
if legend == "on":
ax6.annotate(
"Lin-KK, #6",
xy=[
np.min(np.log10(self.df[5].f)),
np.abs(self.KK_ymax[5]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax6.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[5].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[5].f)),
self.KK_ymax[5] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(
self.KK_ymin[6] * 100 * 1.5, np.abs(self.KK_ymin[6]) * 100 * 1.5
)
if legend == "on":
ax7.annotate(
"Lin-KK, #7",
xy=[
np.min(np.log10(self.df[6].f)),
np.abs(self.KK_ymin[6]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax7.annotate(
"Lin-KK ("
+ str(np.round(np.average(self.df[6].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[6].f)),
np.abs(self.KK_ymin[6]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(
np.negative(self.KK_ymax[6]) * 100 * 1.5,
np.abs(self.KK_ymax[6]) * 100 * 1.5,
)
if legend == "on":
ax7.annotate(
"Lin-KK, #7",
xy=[
np.min(np.log10(self.df[6].f)),
np.abs(self.KK_ymax[6]) * 100 * 1.2,
],
color="k",
fontweight="bold",
)
elif legend == "potential":
ax7.annotate(
"Lin-KK, ("
+ str(np.round(np.average(self.df[6].E_avg), 2))
+ " V)",
xy=[
np.min(np.log10(self.df[6].f)),
self.KK_ymax[6] * 100 * 1.2,
],
color="k",
fontweight="bold",
)
# Save Figure
if savefig != "none":
fig.savefig(savefig)
### 8 Cycles
elif len(self.df) == 8:
fig = figure(figsize=(12, 5), dpi=120, facecolor="w", edgecolor="k")
fig.subplots_adjust(
left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95
)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
ax8 = fig.add_subplot(338)
# cycle 1
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_re[0] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax1.plot(
np.log10(self.df[0].f),
self.KK_rr_im[0] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == "on" or legend == "potential":
ax1.legend(loc="best", fontsize=10, frameon=False)
ax1.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 2
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_re[1] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax2.plot(
np.log10(self.df[1].f),
self.KK_rr_im[1] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax2.legend(loc="best", fontsize=10, frameon=False)
ax2.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 3
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_re[2] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax3.plot(
np.log10(self.df[2].f),
self.KK_rr_im[2] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax3.legend(loc="best", fontsize=10, frameon=False)
ax3.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 4
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_re[3] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax4.plot(
np.log10(self.df[3].f),
self.KK_rr_im[3] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == "on" or legend == "potential":
ax4.legend(loc="best", fontsize=10, frameon=False)
ax4.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 5
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_re[4] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax5.plot(
np.log10(self.df[4].f),
self.KK_rr_im[4] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
if legend == "on" or legend == "potential":
ax5.legend(loc="best", fontsize=10, frameon=False)
ax5.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 6
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_re[5] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax6.plot(
np.log10(self.df[5].f),
self.KK_rr_im[5] * 100,
color=colors_imag[3],
marker="s",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$-Z''",
)
ax6.set_xlabel("log(f) [Hz]")
if legend == "on" or legend == "potential":
ax6.legend(loc="best", fontsize=10, frameon=False)
ax6.axhline(0, ls="--", c="k", alpha=0.5)
# Cycle 7
ax7.plot(
np.log10(self.df[6].f),
self.KK_rr_re[6] * 100,
color=colors_real[3],
marker="D",
ls="--",
ms=6,
alpha=0.7,
label="$\Delta$Z'",
)
ax7.plot(
|
np.log10(self.df[6].f)
|
numpy.log10
|
# A simple implementation of GW@DFT class; meant to be used to run GW on small molecules
# using RI/CD GW algorithm
import psi4
import numpy as np
import scipy as sp
class GW_DFT:
def __init__(self, wfn, mol, gw_par):
# wfn - Psi4 w.f. object from SCF calculation
# mol - Psi4 molecule object
# gw_par - is a dictionary with GW calculation parameters
# such as the number of states or the number of
# omega sampling points
self.scf_wfn = wfn
self.mol = mol
self.gw_par = gw_par
assert hasattr(self, 'scf_wfn')
assert hasattr(self, 'mol')
assert hasattr(self, 'gw_par')
self.nfzc = 0 if not 'nfzc' in gw_par.keys() else gw_par['nfzc'] # read this parameter early and make sure that nocc is adjusted properly
self._init_sys_params() # sets some basic system parameters
self._gen_ri_ints() # generates integrals for RI-GW and
# RI integrals are now available in self.nmR
# Generate modified Gauss-Legendre grid for the frequency integration on imaginary axis
self.gl_npoint = 100 if not 'gl_npoint' in gw_par.keys() else gw_par['gl_npoint']
self._gen_gaussleg()
# Setup exchange-correlation kernel
self._XC()
# set GW calculation parameters
# parameters of the self-energy calculation
nomega_sigma = 501 if not 'nomega_sigma' in gw_par.keys() else gw_par['nomega_sigma']
step_sigma = 0.01 if not 'step_sigma' in gw_par.keys() else gw_par['step_sigma']
# Quasi-particle states
self.no_qp = self.nocc if not 'no_qp' in gw_par.keys() else gw_par['no_qp'] # Number of hole states
self.nv_qp = 0 if not 'nv_qp' in gw_par.keys() else gw_par['nv_qp'] # Number of particle states
self.eta = 1e-3 if not 'eta' in gw_par.keys() else gw_par['eta'] # Default eta=1e-3 is recommended by Bruneval
# Algorithms
self.debug = False if not 'debug' in gw_par.keys() else gw_par['debug']
self.low_mem = True if not 'low_mem' in gw_par.keys() else gw_par['low_mem']
# Screened Coulomb interaction
self.analytic_W = False if not 'analytic_W' in gw_par.keys() else gw_par['analytic_W']
# More or less printout
self.verbose = True if not 'verbose' in gw_par.keys() else gw_par['verbose']
if self.debug:
print("Running in debug mode!")
else:
print("Running in production mode!")
# Quick sanity check
assert self.no_qp <= self.nocc and self.nv_qp <= self.nvir
# ### GW calculation starts here ###
# create an array of sampling frequencies similar to MolGW
nomega_grid = nomega_sigma // 2 # note this is a truncation (aka integer) division
omega_grid = np.array(range(-nomega_grid, nomega_grid + 1)) * step_sigma
# sampling energies for all the states so we could calculate the self-energy matrix (broadcasting)
omega_grid_all = omega_grid + self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp].reshape((-1, 1))
assert omega_grid_all.shape == (self.no_qp + self.nv_qp, 2*nomega_grid + 1)
print("Shape of the omega_grid_all is ", omega_grid_all.shape)
self.omega_grid_all = np.copy(omega_grid_all)
method = 'contour deformation'
print("Caculating GW self-energy via %s" % (method))
if self.analytic_W:
print("Analytic W has been requested; performing RPA calculation")
self._RPA(gw_par)
Sigma_c_grid = self._calculate_iGW(omega_grid_all) # self-energy matrix
print("Finished calculating self-energy")
Sigma_x = np.zeros(self.no_qp + self.nv_qp)
I = np.einsum("nmQ, mnQ->nm", self.nmR[self.nocc - self.no_qp:self.nocc + self.nv_qp, :self.nocc, :], self.nmR[:self.nocc, self.nocc - self.no_qp:self.nocc + self.nv_qp, :])
Sigma_x = -np.einsum("nm->n", I)
self.Sigma_c_grid = np.copy(Sigma_c_grid)
# Apply solvers; Similar to MolGW - linear & graphic solutions
print("Performing one-shot G0W0")
qp_molgw_lin_ = np.zeros(self.no_qp + self.nv_qp)
# Calculate pole strengths by performing numerical derivative on the omega grid
zz = np.real(Sigma_c_grid[:, nomega_grid + 1] - Sigma_c_grid[:, nomega_grid - 1]) / (omega_grid[nomega_grid + 1] - omega_grid[nomega_grid - 1])
zz = 1. / (1. - zz)
zz[zz <= 0.0] = 0.0
zz[zz >= 1.0] = 1.0
#xc_contr = (1. - self.alpha) * Sigma_x[self.nocc - self.no_qp:self.nocc + self.nv_qp] - np.diag(self.Vxc)[self.nocc - self.no_qp:self.nocc + self.nv_qp]
xc_contr = (1. - self.alpha) * Sigma_x - np.diag(self.Vxc)[self.nocc - self.no_qp:self.nocc + self.nv_qp]
print("SigX - Vxc")
print(xc_contr)
self.Sigma_x_Vxc = np.copy(xc_contr)
qp_molgw_lin_ = self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp] + zz * (np.real(Sigma_c_grid[:, nomega_grid]) + xc_contr)
#print(qp_molgw_lin_.shape)
print("Perfoming graphic solution of the inverse Dyson equation")
# both rhs and lhs of the QP equation have been calculated above
qp_molgw_graph_ = np.copy(self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp])
zz_graph = np.zeros(self.no_qp + self.nv_qp)
self.graph_solver_data = {} # Format: state = [[e1, e2, ...], [z1, z2, ...]]
for state in range(self.no_qp + self.nv_qp):
#z , e = self._find_fixed_point(omega_grid_all[state], np.real(Sigma_c_grid[state, :]) + self.eps[state + self.nocc - self.no_qp] + (1. - self.alpha) * Sigma_x[state + self.nocc - self.no_qp] - np.diag(self.Vxc)[state + self.nocc - self.no_qp])
z , e = self._find_fixed_point(omega_grid_all[state], np.real(Sigma_c_grid[state, :]) + self.eps[state + self.nocc - self.no_qp] + xc_contr[state])
if z[0] < 1e-6:
print("Graphical solver failed for state %d" % (state + 1))
# Do nothing since the array cell already contains HF orbital energy
else:
qp_molgw_graph_[state] = e[0]
zz_graph[state] = z[0]
# Save all the solutions to graph_solver_data in case wrong solution
# has the largest Z
self.graph_solver_data[state] = [e, z]
self.zz = np.copy(zz)
self.qp_molgw_lin_ = np.copy(qp_molgw_lin_)
self.qp_molgw_graph_ = np.copy(qp_molgw_graph_)
print("Done!")
self.evgw_iter = 0 if not 'evgw_iter' in gw_par.keys() else gw_par['evgw_iter']
if self.evgw_iter > 0:
print("Starting evGW loop...")
print("Number of iterations is %d" % (self.evgw_iter))
self.verbose = False
# Will use QP produced by graphical solver to perform iteration;
# Should also properly take care of screening if analytic_W is set
eps0 = np.copy(self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp])
for _ in range(self.evgw_iter):
if self.analytic_W:
# update omega and xpy
self._RPA(gw_par)
omega_grid_all_ev = omega_grid + self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp].reshape((-1, 1))
Sigma_c_grid = self._calculate_iGW(omega_grid_all_ev) # self-energy matrix
qp_ev = eps0 + (np.real(Sigma_c_grid[:, nomega_grid]) + xc_contr)
print(qp_ev * psi4.constants.hartree2ev)
self.eps[self.nocc - self.no_qp:self.nocc + self.nv_qp] = np.copy(qp_ev)
print("Done with evGW!")
def print_summary(self, extended=False):
Ha2eV = psi4.constants.hartree2ev
print("E^lin, eV E^graph, eV Z ")
for i in range(self.no_qp + self.nv_qp):
print("%13.6f %13.6f %13.6f" % (self.qp_molgw_lin_[i]*Ha2eV, self.qp_molgw_graph_[i]*Ha2eV, self.zz[i]))
if extended:
print("Graphical solver printout")
for s in self.graph_solver_data:
print("State %d" % (s))
print("E_qp, eV Z")
e_vals, z_vals = self.graph_solver_data[s]
for e, z in zip(e_vals, z_vals):
print("%13.6f %13.6f" % (e * Ha2eV, z))
def int_dump(self, filename='INTDUMP'):
output = open(filename, 'w')
print("Saving inegrals and SCF data to a disk file...")
naux = self.nmR.shape[2]
output("%5d %5d" % (self.nbf, naux))
# Write orbitals
for e in self.eps:
output.write("%29.20f" % e)
for n in range(self.nbf):
for m in range(self.nbf):
for R in range(naux):
output.write("%5d %5d %5d %29.20f" % (n, m, R, self.nmR))
print("Orbital energies and RI integrals were saved to file %s" % (filename))
output.close()
def _init_sys_params(self):
self.nocc = self.scf_wfn.nalpha() - self.nfzc
self.nbf = self.scf_wfn.nmo() - self.nfzc
self.nvir = self.nbf - self.nocc
self.C = np.asarray(self.scf_wfn.Ca())
self.Cocc = np.asarray(self.scf_wfn.Ca_subset("AO", "OCC"))
self.eps = np.copy(np.asarray(self.scf_wfn.epsilon_a()))
if self.nfzc > 0:
# Reset orbital data
self.C = np.copy(self.C[:, self.nfzc:])
self.Cocc = np.copy(self.Cocc[:, self.nfzc:])
self.eps = np.copy(self.eps[self.nfzc:])
# print a quick summary
print("Number of basis functions: ", self.nbf)
print("occ/virt: %d/%d" % (self.nocc, self.nvir))
def _XC(self):
assert hasattr(self, 'scf_wfn')
# The function constructs exchange-corrlation
# potential matrix and extracts some other
# relevant data from PSI4 objects
# It is assumed that we are working with the closed-shell
# reference
Va = np.asarray(self.scf_wfn.Va())
self.Vxc = np.einsum("ia, ij, jb-> ab", self.C, Va, self.C)
self.alpha = self.scf_wfn.V_potential().functional().x_alpha() # Fraction of HF exchange
print("Fraction of HF exchange is %6.3f" % (self.alpha))
# will also need the integrals of exchange-correlation kernel
# for fully analytic calculation but that will be implemented later
def _gen_ri_ints(self):
# MO coefficients
C = np.asarray(self.C)
# Extract basis set from the wfn object
orb = self.scf_wfn.basisset()
# Determine RI parameters
ri_type = "RIFIT" if not 'ri_type' in self.gw_par.keys() else self.gw_par['ri_type']
ri_basis = str(orb.name()) if not 'ri_basis' in self.gw_par.keys() else self.gw_par['ri_basis']
# Sanity check
assert ri_type in ["RIFIT", "JKFIT"]
print("Attempting to create RI basis set for %s (%s)... " % (ri_basis, ri_type))
# Build auxiliary basis set
#aux = psi4.core.BasisSet.build(self.mol, "DF_BASIS_SCF", "", "JKFIT", orb.name())
#aux = psi4.core.BasisSet.build(self.mol, "DF_BASIS_SCF", "", "RIFIT", orb.name())
aux = psi4.core.BasisSet.build(self.mol, "DF_BASIS_SCF", "", ri_type, ri_basis)
# From Psi4 doc as of March, 2019 (http://www.psicode.org/psi4manual/1.2/psi4api.html#psi4.core.BasisSet.zero_ao_basis_set):
# Returns a BasisSet object that actually has a single s-function at
# the origin with an exponent of 0.0 and contraction of 1.0.
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
# Create a MintsHelper Instance
mints = psi4.core.MintsHelper(orb)
# Build (pq|P) raw 3-index ERIs, dimension (nbf, nbf, Naux, 1)
pqP = mints.ao_eri(orb, orb, aux, zero_bas)
# Build and invert the metric
metric = mints.ao_eri(zero_bas, aux, zero_bas, aux)
metric.power(-0.5, 1.e-14)
# Remove the dimensions of size 1
pqP = np.squeeze(pqP)
metric = np.squeeze(metric)
# Transform (pq|P) to obtain (nm|P) in molecular orbital basis
nmP = np.einsum("pn, qm, pqR-> nmR", C, C, pqP)
# Contract with the inverse square root of the metric tensor
self.nmR = np.einsum( "nmP, PR-> nmR", nmP, metric)
print("Auxiliary basis set has been generated!")
print("Number of auxiliary basis functions: ", self.nmR.shape[2])
def _gen_gaussleg(self):
x, w = np.polynomial.legendre.leggauss(self.gl_npoint)
self.gl_x = (1. + x) / (1. - x)
self.gl_w = 2. * w / (1. - x)**2
def _find_fixed_point(self, lhs, rhs):
# This function returns an array of fixed points and correspoinding pole strengths
# Its application can be vectorized using strandard NumPy np.vectorize
assert lhs.shape == rhs.shape
# Maximum number of fixed points (same as in MolGW)
nfp_max = 4
# Pole strength threshold
pthresh = 1e-5
# Arrays of f.p. energies and Z
zfp = np.zeros(nfp_max)
zfp[:] = -1.0
efp = np.zeros(nfp_max)
# Auxiliary index array
idx = np.arange(nfp_max)
n = len(lhs)
ifixed = 0
g = rhs - lhs
# loop over grid points excluding the last one
for i in range(n - 1):
if g[i] * g[i + 1] < 0.0:
#print("Fixed point found betwenn %13.6f and %13.6f eV! " % (lhs[i] * Ha2eV, lhs[i+1] * Ha2eV))
z_zero = 1. / ( 1. - ( g[i+1] - g[i] ) / ( lhs[i+1] - lhs[i] ) )
if z_zero < pthresh:
continue
# Do some bookkeeping; the code looks ugly but that is exactly what F.Bruneval has in MolGW package
if z_zero > zfp[-1]:
jfixed = np.min(idx[z_zero > zfp])
zfp[jfixed + 1:] = zfp[jfixed:nfp_max - 1]
efp[jfixed + 1:] = efp[jfixed:nfp_max - 1]
zfp[jfixed] = z_zero
# Perfom linear interpolation to find the root
zeta = (g[i + 1] - g[i]) / (lhs[i + 1] - lhs[i])
efp[jfixed] = lhs[i] - g[i] / zeta
#print("Graphical solver concluded operation")
return (zfp, efp)
def _RPA(self, gw_par):
nocc = self.nocc
nvir = self.nvir
# Diagonal \epsilon_a - \epsilon_i
eps_diag = self.eps[nocc:].reshape(-1, 1) - self.eps[:nocc]
assert eps_diag.shape == (nvir, nocc)
# A^{+} + B^{+}
ApB = np.einsum("ij,ab,ai -> iajb", np.eye(nocc), np.eye(nvir), eps_diag) + 4. * np.einsum("iaQ, jbQ->iajb", self.nmR[:nocc, nocc:], self.nmR[:nocc, nocc:])
ApB = ApB.reshape((nocc*nvir, nocc*nvir))
# since nD numpy arrays have C-style memroy layout the occupied orbital inedex changes slower than the virtual one
# Diagonal of A^{+} - B^{+}
AmB_diag = eps_diag.T.reshape((1, -1))
AmB_diag = np.diag(AmB_diag[0,:])
assert AmB_diag.shape == ApB.shape
# Form C matrix (as one usually does when solving RPA eigenvalue problem)
C_ = np.einsum("ij,jk,kl->il", np.sqrt(AmB_diag), ApB, np.sqrt(AmB_diag))
# Solve for the excitation energies and calculate X + Y eigenvectors
omega2, Z = np.linalg.eigh(C_)
self.omega_s = np.sqrt(omega2)
self.xpy = np.einsum("ij,jk,kl->il", np.sqrt(AmB_diag), Z, np.diag(1./np.sqrt(self.omega_s)))
if self.debug:
print("RPA excitation energies:")
print(self.omega_s)
def _calculate_iGW(self, omega_grid_all):
nocc = self.nocc
nvir = self.nvir
eps = self.eps
#print("eps : ", eps)
no_qp = self.no_qp
nv_qp = self.nv_qp
nbf = self.nbf
e_fermi = (eps[nocc - 1] + eps[nocc]) / 2.
ngrid = omega_grid_all.shape[1]
naux = self.nmR.shape[2]
assert omega_grid_all.shape == (no_qp + nv_qp, ngrid)
# Self-energy calculation will be performed via contour deformation
# analytic continuation can be implemented later for comparison and
# benchmarking
e_ai = eps[nocc:, np.newaxis] - eps[np.newaxis, :nocc]
im_grid = self.gl_x * 1.j
# Calculate some intermediates for the imaginary time integration
f = np.ones(nbf)
f[eps > e_fermi] = -1.
assert np.sum(f) == nocc - nvir
complex_eps = eps + 0.5j * self.eta * f
Wnm_im_grid = np.zeros((no_qp + nv_qp, nbf, len(self.gl_x)))
omega_rts = np.zeros(1)
# Calculate Wnm on the imaginary frequency grid
if self.analytic_W:
### Aanalytic calculation of W on imaginary frequency grid
# Omega tensors; Will be reused later to calculate the residue term
i_rtia = np.einsum("iaQ, rtQ ->rtia", self.nmR[:nocc, nocc:, :], self.nmR)
i_rtia = i_rtia.reshape((nbf, nbf, nocc*nvir))
omega_rts = np.sqrt(2.) * np.einsum("rtk, ks->rts", i_rtia, self.xpy)
if self.verbose:
print("Shape of omega tensor is ", omega_rts.shape)
Ds_p = self.omega_s.reshape((-1, 1)) + im_grid - 0.5j*self.eta
Ds_m = -self.omega_s.reshape((-1, 1)) + im_grid + 0.5j*self.eta
assert Ds_p.shape == Ds_m.shape and Ds_m.shape == (len(self.omega_s), len(self.gl_x))
Wnm_im_grid = np.einsum("nms, sg, nms -> nmg",omega_rts[nocc - no_qp:nocc + nv_qp,:,:], 1./Ds_m - 1./Ds_p, omega_rts[nocc - no_qp:nocc + nv_qp,:,:])
#print(Wnm_im_grid.shape)
#print( (no_qp + nv_qp, nbf, len(self.gl_x)) )
assert Wnm_im_grid.shape == (no_qp + nv_qp, nbf, len(self.gl_x))
else:
O_ = self.nmR[nocc-no_qp:nocc+nv_qp, :, :]
dp_ = im_grid[:,np.newaxis, np.newaxis] + e_ai[np.newaxis, :, :] - 0.5j * self.eta
dm_ = im_grid[:,np.newaxis, np.newaxis] - e_ai[np.newaxis, :, :] + 0.5j * self.eta
if self.debug:
dp_debug = np.zeros((self.gl_npoint, nvir, nocc), dtype=np.complex128)
dm_debug = np.zeros((self.gl_npoint, nvir, nocc), dtype=np.complex128)
for idx, grid_point in enumerate(im_grid):
tmp_p = grid_point + e_ai - 0.5j * self.eta
tmp_m = grid_point - e_ai + 0.5j * self.eta
dp_debug[idx, :, :] = tmp_p
dm_debug[idx, :, :] = tmp_m
assert np.allclose(dp_debug, dp_) and np.allclose(dm_debug, dm_)
id_pq = np.eye(naux)
assert id_pq.shape == (naux, naux) and np.all(np.diag(id_pq) == np.ones(naux))
#Ppq_ = np.einsum("iaP, gai, iaQ->gPQ", self.nmR[:nocc, nocc:,:], 1./dm_ - 1./dp_, self.nmR[:nocc,nocc:,:])
Ppq_ = 2. * np.einsum("iaP, gai, iaQ->gPQ", self.nmR[:nocc, nocc:,:], 1./dm_ - 1./dp_, self.nmR[:nocc,nocc:,:])
if self.debug:
Ppq_debug = np.zeros((self.gl_npoint, naux, naux), dtype=np.complex128)
tmp_O = self.nmR[:nocc,nocc:,:]
for idx, grid_point in enumerate(im_grid):
#Ppq_debug[idx, :,:] = np.einsum("iaP, ai, iaQ->PQ", tmp_O, 1./dm_[idx,:,:] - 1./dp_[idx,:,:], tmp_O)
Ppq_debug[idx, :,:] = 2. * np.einsum("iaP, ai, iaQ->PQ", tmp_O, 1./dm_[idx,:,:] - 1./dp_[idx,:,:], tmp_O)
assert np.allclose(Ppq_debug, Ppq_)
assert Ppq_.shape == (len(im_grid), naux, naux)
Wnm_im_grid = np.einsum("nmP, lPQ, nmQ -> nml", O_, (np.linalg.inv(id_pq[np.newaxis, :,:] - Ppq_) - id_pq[np.newaxis, :,:]), O_)
assert Wnm_im_grid.shape == (no_qp + nv_qp, nbf, len(self.gl_x))
# Check if matrix inverse is numerically accurate
if self.debug:
inv_thresh = 1e-12
for idx, grid_point in enumerate(im_grid):
tmp1 = id_pq - Ppq_[idx, :,:]
tmp2 = np.linalg.inv(tmp1)
tmp3 = np.dot(tmp1, tmp2)
max_err = np.max(np.abs(np.real(tmp3) - id_pq))
max_err_im = np.max(np.abs(np.imag(tmp3)))
if max_err > inv_thresh or max_err_im > inv_thresh:
print("Matrix inverse failed when calculating the integral term!")
print(max_err)
print(max_err_im)
# ### GW self-energy calculation via contour deformation
# Inform the user about the amout of memory required for the calculation
# with the current implementation (residue term is the most expensive one)
# This excules the amout of memory needed to store the target objects
mem_int = (nocc * nvir * (no_qp + nv_qp) + 4 * (nocc * nvir * len(self.gl_x)) + 2 * (len(im_grid) * naux * naux))
mem_res = 4. * ngrid * nbf * nocc * nvir + 2 * ngrid * nbf * naux * naux + nbf * naux + naux**2
if self.low_mem:
mem_res = 4. * ngrid * nocc * nvir + naux + naux**2 + 2 * ngrid * naux**2
if self.verbose:
print("Calculation of the integral term requires %8.3f Gb" %(mem_int * 8e-9)) # Each standard double is 8 bytes
print("Calculation of the residue term requires %8.3f Gb" %(mem_res * 8e-9))
if self.low_mem and self.verbose:
print("Using low-memory algorithm")
In = np.zeros(omega_grid_all.shape, dtype=np.complex128) # Integral term
Rn = np.zeros(omega_grid_all.shape, dtype=np.complex128) # Residue term
# Integral term
for qp in range(no_qp + nv_qp):
# Calculate GF denominators = omega + 1.j omega_prime - eps_m \pm i eta
qp_grid = np.copy(omega_grid_all[qp, :])
Dgf_p = qp_grid[:, np.newaxis, np.newaxis] - complex_eps[np.newaxis, :, np.newaxis] + im_grid[np.newaxis,np.newaxis, :]
Dgf_m = qp_grid[:, np.newaxis, np.newaxis] - complex_eps[np.newaxis, :, np.newaxis] - im_grid[np.newaxis,np.newaxis, :]
if self.debug:
# Print some diagnostic information about the denominators
thresh = 1e-10
Dgf_p_min = np.min(np.absolute(Dgf_p))
Dgf_m_min = np.min(np.absolute(Dgf_m))
if Dgf_p_min < thresh or Dgf_m_min < thresh:
print("Small denominator detected when calculating the integral term! Diagonstic info is printed below:")
print(Dgf_p_min_re)
print(Dgf_m_min_re)
assert Dgf_p.shape == (ngrid, nbf, len(self.gl_x)) and Dgf_m.shape == (ngrid, nbf, len(self.gl_x))
Wnm_tmp = np.copy(Wnm_im_grid[qp,:,:])
I_term = 1. / (2.*np.pi) * np.einsum("fmg, g->f", Wnm_tmp[np.newaxis, :,:]* (1./ Dgf_p + 1./Dgf_m), self.gl_w)
In[qp, :] = np.copy(I_term)
del Dgf_p
del Dgf_m
del Wnm_tmp
del I_term
# Residue term
# Caculate Wnm and f vector for a give quasi-particle
# Not memory efficient; just for testing
#offset_complex_eps = complex_eps[:, np.newaxis] - qp_grid
offset_complex_eps = np.abs(eps[:, np.newaxis] - qp_grid) + 0.5j * self.eta
assert offset_complex_eps.shape == (nbf, ngrid)
Wnm_4res = np.zeros((nbf, ngrid), dtype=np.complex128)
fill_factors = np.zeros((ngrid, nbf))
#mask_vir = np.logical_and(qp_grid[:,np.newaxis] > eps[np.newaxis, :], eps.reshape((1, -1)) > e_fermi)
#mask_occ = np.logical_and(qp_grid[:,np.newaxis] < eps[np.newaxis, :], eps.reshape((1, -1)) < e_fermi)
# This is still incorrect but should be a bit better
mask_vir = np.logical_and(qp_grid[:,np.newaxis] > eps[np.newaxis, :], eps.reshape((1, -1)) > e_fermi)
mask_occ = np.logical_and(qp_grid[:,np.newaxis] < eps[np.newaxis, :], eps.reshape((1, -1)) < e_fermi)
fill_factors[mask_vir] = 1.
fill_factors[mask_occ] = -1.
# Treat a special case
mask_vir_eq = np.logical_and(qp_grid[:,np.newaxis] == eps[np.newaxis, :], eps.reshape((1, -1)) > e_fermi)
mask_occ_eq = np.logical_and(qp_grid[:,np.newaxis] == eps[np.newaxis, :], eps.reshape((1, -1)) < e_fermi)
fill_factors[mask_vir_eq] = 0.5
fill_factors[mask_occ_eq] = -0.5
if self.low_mem: # Low memory algorithm; Calculates W matrix elements only for those orbitals that have non-zero fill factors
# This implementation will be a lot slower but will utilize much less memory
# Basis set size is usually much smaller than the grid size for my systems,
# yet if I compute the residue term for each basis function separately => this may reduce memory cost 20 times!
for m in range(nbf):
g_index = np.arange(ngrid)
g_res = g_index[fill_factors[:, m] != 0.]
if len(g_res) == 0:
continue
else:
#print(len(g_res))
pass
if self.analytic_W:
Ds_p__ = offset_complex_eps[m, g_res, np.newaxis] + self.omega_s[np.newaxis,:] - 0.5j*self.eta
Ds_m__ = offset_complex_eps[m, g_res, np.newaxis] - self.omega_s[np.newaxis,:] + 0.5j*self.eta
assert Ds_p__.shape == Ds_m__.shape and Ds_p__.shape == (len(g_res), len(self.omega_s))
Wnm_4res[m, g_res] = np.einsum("s, s, gs -> g",omega_rts[nocc - no_qp + qp, m,:], omega_rts[nocc - no_qp + qp,m,:], 1./Ds_m__ - 1./Ds_p__)
else:
O__ = self.nmR[nocc-no_qp + qp, m, :]
dp__ = offset_complex_eps[m, g_res, np.newaxis, np.newaxis] + e_ai[np.newaxis, :, :] - 0.501j * self.eta
dm__ = offset_complex_eps[m, g_res, np.newaxis, np.newaxis] - e_ai[np.newaxis, :, :] + 0.501j * self.eta
zero_thresh = 1e-12
assert np.min(np.abs(dp__)) > zero_thresh and np.min(np.abs(dm__)) > zero_thresh
id_pq = np.eye(naux)
#Ppq__ = np.einsum("aiP, gai, aiQ->gPQ", self.nmR[nocc:, :nocc,:], 1./dm__ - 1./dp__, self.nmR[nocc:,:nocc,:])
Ppq__ = 2. * np.einsum("aiP, gai, aiQ->gPQ", self.nmR[nocc:, :nocc,:], 1./dm__ - 1./dp__, self.nmR[nocc:,:nocc,:])
del dp__
del dm__
Wnm_4res[m, g_res] = np.einsum("P, gPQ, Q -> g", O__, (np.linalg.inv(id_pq[np.newaxis, :,:] - Ppq__) - id_pq[np.newaxis, :,:]), O__)
if self.debug:
# loop over the grid and check if matrix inverse was performed with sufficient accuracy
thresh = 1e-11
for g in range(len(g_res)):
tmp = id_pq - Ppq__[g, :, :]
tmp_1 = np.linalg.inv(tmp)
diff = np.dot(tmp, tmp_1) - id_pq
if not (np.max(np.real(diff)) < thresh and np.max(np.imag(diff)) < thresh):
print("Matrix inverse cannot be performed with sufficient accuracy!")
print("Errors (real and imaginary parts)")
print(np.max(np.real(diff)))
print(np.max(np.imag(diff)))
assert np.max(np.real(diff)) < thresh and np.max(np.imag(diff)) < thresh
else: # Simple algorithm; supposed to be fast (especially for analytic W) but not memory eifficient
if self.analytic_W:
Ds_p__ = offset_complex_eps[:,:,np.newaxis] + self.omega_s[np.newaxis, np.newaxis,:] - 0.5j*self.eta
Ds_m__ = offset_complex_eps[:,:,np.newaxis] - self.omega_s[np.newaxis, np.newaxis,:] + 0.5j*self.eta
assert Ds_p__.shape == Ds_m__.shape and Ds_p__.shape == (nbf, ngrid, len(self.omega_s))
Wnm_4res = np.einsum("ms, ms, mgs -> mg",omega_rts[nocc - no_qp + qp,:,:], omega_rts[nocc - no_qp + qp,:,:], 1./Ds_m__ - 1./Ds_p__)
else:
O__ = self.nmR[nocc-no_qp + qp, :, :]
dp__ = offset_complex_eps[:, :, np.newaxis, np.newaxis] + e_ai[np.newaxis, np.newaxis, :, :] - 0.501j * self.eta
dm__ = offset_complex_eps[:, :, np.newaxis, np.newaxis] - e_ai[np.newaxis, np.newaxis, :, :] + 0.501j * self.eta
zero_thresh = 1e-10
assert np.min(np.abs(dp__)) > zero_thresh and np.min(np.abs(dm__)) > zero_thresh
id_pq = np.eye(naux)
assert id_pq.shape == (naux, naux) and np.all(np.diag(id_pq) == np.ones(naux))
#Ppq__ = np.einsum("aiP, mgai, aiQ->mgPQ", self.nmR[nocc:, :nocc,:], 1./dm__ - 1./dp__, self.nmR[nocc:,:nocc,:])
Ppq__ = 2. * np.einsum("aiP, mgai, aiQ->mgPQ", self.nmR[nocc:, :nocc,:], 1./dm__ - 1./dp__, self.nmR[nocc:,:nocc,:])
del dp__
del dm__
assert Ppq__.shape == (nbf, ngrid, naux, naux)
assert O__.shape == (nbf, naux)
Wnm_4res = np.einsum("mP, mgPQ, mQ -> mg", O__, (np.linalg.inv(id_pq[np.newaxis, np.newaxis, :,:] - Ppq__) - id_pq[np.newaxis, np.newaxis, :,:]), O__)
R_term =
|
np.einsum("gm, mg->g", fill_factors, Wnm_4res)
|
numpy.einsum
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque, defaultdict
import os
import os.path as osp
import gin
import time
import math
import dill
import numpy as np
import tensorflow as tf
from PIL import Image
from third_party.baselines import logger
from third_party.baselines.common import tf_util as U
from third_party.baselines.common import explained_variance
from third_party.baselines.common.input import observation_input
from third_party.baselines.common.runners import AbstractEnvRunner
from third_party.baselines.ppo2 import pathak_utils
from third_party.baselines.ppo2 import policies
from sprl.bonus_model import BonusModel
from sprl.utils import _flatten, _unflatten, dump_pickle, load_pickle
@gin.configurable
class Model(object):
def __init__(self, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm,
use_curiosity, curiosity_strength, forward_inverse_ratio,
curiosity_loss_strength, random_state_predictor, pathak_multiplier,
hidden_layer_size):
sess = tf.get_default_session()
act_model = policy(sess, ob_space, ac_space, nbatch_act, reuse=False)
train_model = policy(sess, ob_space, ac_space, nbatch_train, reuse=True)
self.nbatch_act = nbatch_act
self.nbatch_train = nbatch_train
self.action_dim = ac_space.n
if use_curiosity:
self.state_encoder_net = tf.make_template(
'state_encoder_net', pathak_utils.universeHead,
create_scope_now_=True,
trainable=(not random_state_predictor),)
self.icm_forward_net = tf.make_template(
'icm_forward', pathak_utils.icm_forward_model,
create_scope_now_=True, num_actions=ac_space.n,
hidden_layer_size=hidden_layer_size, one_hot=(ac_space.dtype!='float32'))
self.icm_forward_output = tf.make_template(
'icm_forward_output', pathak_utils.icm_forward_output,
create_scope_now_=True)
self.icm_inverse_net = tf.make_template(
'icm_inverse', pathak_utils.icm_inverse_model,
create_scope_now_=True,
hidden_layer_size=hidden_layer_size)
self.icm_inverse_output = tf.make_template(
'icm_inverse_output', pathak_utils.icm_inverse_output,
create_scope_now_=True)
else:
self.state_encoder_net = None
self.icm_forward_net = None
self.icm_forward_output = None
self.icm_inverse_net = None
self.icm_inverse_output = None
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
# When computing intrinsic reward a different batch size is used (number
# of parallel environments), thus we need to define separate
# placeholders for them.
X_NEXT, _ = observation_input(ob_space, nbatch_train)
X_INTRINSIC_NEXT, _ = observation_input(ob_space, None)
X_INTRINSIC_CURRENT, _ = observation_input(ob_space, None)
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED,
- CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio,
1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0),
CLIPRANGE)))
forward_loss, inverse_loss = self.compute_curiosity_loss(
use_curiosity, train_model.X, A, X_NEXT,
forward_inverse_ratio=forward_inverse_ratio,
curiosity_loss_strength=curiosity_loss_strength)
curiosity_loss = forward_loss + inverse_loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + curiosity_loss
if use_curiosity:
encoded_time_step = self.state_encoder_net(X_INTRINSIC_CURRENT)
encoded_next_time_step = self.state_encoder_net(X_INTRINSIC_NEXT)
intrinsic_reward = self.curiosity_forward_model_loss(
encoded_time_step, A, encoded_next_time_step)
with tf.variable_scope('model'):
params = tf.trainable_variables()
grads = tf.gradients(loss * pathak_multiplier, params)
if max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
_train = trainer.apply_gradients(grads)
def getIntrinsicReward(curr, next_obs, actions):
return sess.run(intrinsic_reward, {X_INTRINSIC_CURRENT: curr,
X_INTRINSIC_NEXT: next_obs,
A: actions})
def train(lr, cliprange, obs, next_obs, returns, actions, values,
neglogpacs):
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
# Inputs
td_map = {train_model.X: obs, A: actions, ADV: advs, R: returns, LR: lr,
CLIPRANGE: cliprange, OLDNEGLOGPAC: neglogpacs,
OLDVPRED: values, X_NEXT: next_obs}
# Output
summaries = [loss, pg_loss, vf_loss, entropy]
if use_curiosity:
summaries += [forward_loss, inverse_loss]
return sess.run(summaries + [_train], td_map)[:-1]
self.loss_names = ['loss', 'loss/policy', 'loss/value', 'policy_entropy']
if use_curiosity:
self.loss_names += ['loss/forward', 'loss/inverse']
def save(save_path):
ps = sess.run(params)
with tf.gfile.Open(save_path, 'wb') as fh:
fh.write(dill.dumps(ps))
def load(load_path):
with tf.gfile.Open(load_path, 'rb') as fh:
val = fh.read()
loaded_params = dill.loads(val)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.getIntrinsicReward = getIntrinsicReward
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.eval_step = act_model.eval_step
self.value = act_model.value
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess) # pylint: disable=E1101
def curiosity_forward_model_loss(self, encoded_state, action,
encoded_next_state):
pred_next_state = self.icm_forward_output(encoded_state, self.icm_forward_net(encoded_state, action))
forward_loss = 0.5 * tf.reduce_mean(
tf.squared_difference(pred_next_state, encoded_next_state), axis=1)
forward_loss = forward_loss * 288.0
return forward_loss
def curiosity_inverse_model_loss(self, encoded_states, actions,
encoded_next_states):
pred_action_logits = self.icm_inverse_output(self.icm_inverse_net(encoded_states,
encoded_next_states), self.action_dim)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pred_action_logits, labels=actions), name='invloss')
def compute_curiosity_loss(self, use_curiosity, time_steps, actions,
next_time_steps, forward_inverse_ratio,
curiosity_loss_strength):
if use_curiosity:
with tf.name_scope('curiosity_loss'):
encoded_time_steps = self.state_encoder_net(time_steps)
encoded_next_time_steps = self.state_encoder_net(next_time_steps)
inverse_loss = self.curiosity_inverse_model_loss(
encoded_time_steps, actions, encoded_next_time_steps)
forward_loss = self.curiosity_forward_model_loss(
encoded_time_steps, actions, encoded_next_time_steps)
forward_loss = tf.reduce_mean(forward_loss)
else:
forward_loss = tf.constant(0.0, dtype=tf.float32,
name='forward_loss')
inverse_loss = tf.constant(0.0, dtype=tf.float32,
name='inverse_loss')
return curiosity_loss_strength * (forward_inverse_ratio * forward_loss), curiosity_loss_strength * (1 - forward_inverse_ratio) * inverse_loss
class Runner(AbstractEnvRunner):
def __init__(self, env, model, bonus_model, rnet_trainer, rnet, nsteps, gamma, lam,
log_dir='.', eval_callback=None):
super(Runner, self).__init__(env=env, model=model, nsteps=nsteps)
self._eval_callback = eval_callback
self.lam = lam
self.gamma = gamma
self.nenvs = self.env.num_envs
self.action_dim = self.env.action_space.n
#
self.bonus_model = bonus_model
self.rnet_trainer = rnet_trainer
self.rnet = rnet
#
self._collection_iteration = 0
self.bonus_accum_dict = {
'sprl': np.zeros(self.nenvs),
'eco': np.zeros(self.nenvs),
'icm': np.zeros(self.nenvs),
'bonus': np.zeros(self.nenvs),
}
self.infos = env.get_initial_infos()
self._ep_return = np.zeros(self.nenvs)
self._ep_length = np.zeros(self.nenvs)
self.log_dir = log_dir
self.rewards = 0
def _update_accumulator(self, mb_dones, current_dict, epinfos, accum_dict):
for epinfo in epinfos:
for key in accum_dict:
epinfo[key] = 0.
count = 0
# mb_dones => [256, num_envs]
for idx, batch_done in enumerate(mb_dones): # idx = 0~255
# batch_done => [num_envs]
for key, batch_current in current_dict.items():
# batch_current : [256 x num_envs]
current_vec = batch_current[idx] # [num_envs]
if key in accum_dict:
accum_dict[key] += current_vec
for env_ind, done in enumerate(batch_done): # for each env: 0~11
if done:
for key in accum_dict:
epinfos[count][key] = accum_dict[key][env_ind].copy()
accum_dict[key][env_ind] = 0.
count += 1
return epinfos
@gin.configurable
def run(self):
if self._eval_callback:
self._eval_callback(self.model.eval_step)
self._collection_iteration += 1
mb_obs, mb_raw_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_infos = [], [], [], [], [], [], []
mb_neglogpacs, mb_next_obs = [], []
extra_logs = defaultdict(list)
init_dones = self.dones
epinfos = []
for env_step in range(self.nsteps):
# 1. Policy step (See policies.py for step() function)
obs = self.obs
actions, values, neglogpacs = self.model.step(obs,
self.dones,
self.infos)
'''
Notations: d(t), s(t), info(t) -> a(t) ~ policy -> r(t) -> d(t+1), s(t+1), info(t+1)
For mb_xxx[i], we save s(t), info(t), d(t) -> a(t) -> r(t)
mb_obs[i] : s(t)
mb_infos[i] : info(t) --> contains raw_obs(t), mask(t)
mb_dones[i] : done(t) --> if done(t) == 1: s(t) is initial state of an episode
=== self.model.step == (policy)
mb_actions[i] : a(t)
mb_neglogpacs[i]: -log(pi(a(t)|s(t)))
mb_values[i] : V(s(t))
=== self.env.step == (environment)
mb_reward[i] : r(t)
mb_next_dones[i]: d(t+1)
mb_next_obs[i] : s(t+1)
mb_next_infos[i]: info(t+1) --> contains raw_obs(t+1), mask(t+1)
'''
assert self.obs.dtype == np.uint8
assert self.infos[0]['observations'].dtype == np.uint8
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
mb_infos.append(self.infos)
mb_raw_obs.append(self.infos[0]['observations'])
# 2. Environment step
self.obs[:], rewards, self.dones, self.infos = self.env.step(actions)
self.rewards = rewards
# 3. Etc
mb_next_obs.append(self.obs.copy())
mb_rewards.append(rewards)
# Record Episode statistics
self._ep_return += rewards
self._ep_length += 1
for env_ind, (done, info) in enumerate(zip(self.dones, self.infos)): # for each env: 0~11
if done:
epinfos.append({'l':self._ep_length[env_ind], 'r':self._ep_return[env_ind]})
self._ep_length[env_ind] = 0
self._ep_return[env_ind] = 0
# 3. Post-processing
### batch of steps to batch of rollouts
preprocess_mb_raw_obs = mb_raw_obs
preprocess_mb_actions = mb_actions
mb_raw_obs = np.asarray(mb_raw_obs, dtype=np.uint8)
mb_obs = np.asarray(mb_obs, dtype=np.uint8)
mb_next_obs =
|
np.asarray(mb_next_obs, dtype=np.uint8)
|
numpy.asarray
|
from collections import OrderedDict
from functools import partial
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, LabelCategories, Mask, MaskCategories,
)
from datumaro.components.dataset import Dataset
from datumaro.components.environment import Environment
from datumaro.components.extractor import DatasetItem, Extractor
from datumaro.components.media import Image
from datumaro.plugins.camvid_format import CamvidConverter, CamvidImporter
from datumaro.util.meta_file_util import parse_meta_file
from datumaro.util.test_utils import (
TestDir, check_save_and_load, compare_datasets,
)
import datumaro.plugins.camvid_format as Camvid
from .requirements import Requirements, mark_requirement
class CamvidFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_write_and_parse_labelmap(self):
src_label_map = Camvid.CamvidLabelMap
with TestDir() as test_dir:
file_path = osp.join(test_dir, 'label_colors.txt')
Camvid.write_label_map(file_path, src_label_map)
dst_label_map = Camvid.parse_label_map(file_path)
self.assertEqual(src_label_map, dst_label_map)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_write_and_parse_meta_file(self):
src_label_map = Camvid.CamvidLabelMap
with TestDir() as test_dir:
source_dataset = Dataset.from_iterable([],
categories=Camvid.make_camvid_categories(src_label_map))
CamvidConverter.convert(source_dataset, test_dir,
save_dataset_meta=True)
dst_label_map = parse_meta_file(test_dir)
self.assertEqual(src_label_map, dst_label_map)
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'camvid_dataset')
class TestExtractorBase(Extractor):
def _label(self, camvid_label):
return self.categories()[AnnotationType.label].find(camvid_label)[0]
def categories(self):
return Camvid.make_camvid_categories()
class CamvidImportTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='0001TP_008550', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 0, 0]]), label=1),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=22),
]
),
DatasetItem(id='0001TP_008580', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 0, 0]]), label=2),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=27),
]
),
DatasetItem(id='0001TP_006690', subset='train',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 0, 1, 1]]), label=3),
Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18),
]
),
DatasetItem(id='0016E5_07959', subset = 'val',
image=np.ones((1, 5, 3)),
annotations=[
Mask(image=np.array([[1, 1, 1, 0, 0]]), label=1),
Mask(image=np.array([[0, 0, 0, 1, 1]]), label=8),
]
),
], categories=Camvid.make_camvid_categories())
parsed_dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'camvid')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_camvid(self):
detected_formats = Environment().detect_dataset(DUMMY_DATASET_DIR)
self.assertEqual([CamvidImporter.NAME], detected_formats)
class CamvidConverterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def _test_save_and_load(self, source_dataset, converter, test_dir,
target_dataset=None, importer_args=None, **kwargs):
return check_save_and_load(self, source_dataset, converter, test_dir,
importer='camvid',
target_dataset=target_dataset, importer_args=importer_args, **kwargs)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_camvid_segm(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='a/b/1', subset='test',
image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert, label_map='camvid'),
test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_camvid_segm_unpainted(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
class DstExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0),
Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3),
Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CamvidConverter.convert,
label_map='camvid', apply_colormap=False),
test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_no_subsets(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(image=np.array([[1, 0, 0, 1, 0]]), label=0),
Mask(image=
|
np.array([[0, 1, 1, 0, 1]])
|
numpy.array
|
'''
script to generate test nc and other data files.
'''
from shapely.geometry.point import Point
import datetime
import numpy as np
import os.path
import netCDF4 as nc
SEED = 1 #: random number seeding for missing data
OUTDIR = os.path.split(__file__)[0] #: location to write
OUTNAME = 'test_simple_spatial_01.nc' #: name of the file to write
RES = 1 #: resolution of the grid
ORIGIN = Point(-105,40) #: center coordinate of upper left cell
DIM = [4,4] #: number of cells [dimx,dimy]
## the scalar value to fill the cells. set to None for manual values. will use
## VAL_MAN variable instead.
VAL = None
VAR = ['foo','foo2'] #: name of the data variable
## any relevant variables for the time construction
TIME = {'origin':datetime.datetime(2000,1,1,0,0,0),
'end':datetime.datetime(2001,12,31,0,0,0),
'calendar':'proleptic_gregorian',
'units':'days since 2000-01-01 00:00:00',
'name':'time'}
## any relevant variables for the spatial construction
SPACE = {'row_bnds':'bounds_latitude',
'col_bnds':'bounds_longitude'}
## any relevant variables for the level construction
LEVEL = {'name':'level',
'n':2}
## variables for masked data
MASK = {'n':0,
'value':float(1e20)}
## GENERATE BASE ARRAYS ########################################################
## make time vector
delta = datetime.timedelta(1)
start = TIME['origin']
timevec = []
while start <= TIME['end']:
timevec.append(start)
start += delta
timevec = np.array(timevec)
## make the level vector
levelvec = np.arange(1,LEVEL['n']+1)*100
## make centroids
col_coords = -np.arange(abs(ORIGIN.x)-RES*(DIM[0]-1),abs(ORIGIN.x)+RES,RES)
col_coords = col_coords[::-1]
row_coords = np.arange(ORIGIN.y-RES*(DIM[1]-1),ORIGIN.y+RES,RES)
row_coords = row_coords[::-1]
col,row = np.meshgrid(col_coords,row_coords)
## using points from |coords| adjust by |res| to provide bounds
def make_bounds(coords,res):
bnds = []
for g in coords.flat:
bnds.append([g-res*0.5,g+res*0.5])
return(
|
np.array(bnds)
|
numpy.array
|
"""
Reference: <NAME> et al. "Deep Interest Network for Click-Through Rate Prediction"
(https://arxiv.org/pdf/1706.06978.pdf)
author: massquantity
"""
import os
from itertools import islice
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from tensorflow.keras.initializers import (
truncated_normal as tf_truncated_normal
)
from .base import Base, TfMixin
from ..evaluation.evaluate import EvalMixin
from ..utils.tf_ops import (
reg_config,
dropout_config,
dense_nn,
lr_decay_config
)
from ..data.data_generator import DataGenSequence
from ..data.sequence import user_last_interacted
from ..utils.misc import time_block, colorize
from ..utils.misc import count_params
from ..feature import (
get_predict_indices_and_values,
get_recommend_indices_and_values,
features_from_dict,
add_item_features
)
tf.disable_v2_behavior()
class DIN(Base, TfMixin, EvalMixin):
user_variables = ["user_feat"]
item_variables = ["item_feat"]
sparse_variables = ["sparse_feat"]
dense_variables = ["dense_feat"]
def __init__(
self,
task,
data_info=None,
embed_size=16,
n_epochs=20,
lr=0.001,
lr_decay=False,
reg=None,
batch_size=256,
num_neg=1,
use_bn=True,
dropout_rate=None,
hidden_units="128,64,32",
recent_num=10,
random_num=None,
use_tf_attention=False,
seed=42,
lower_upper_bound=None,
tf_sess_config=None
):
Base.__init__(self, task, data_info, lower_upper_bound)
TfMixin.__init__(self, tf_sess_config)
EvalMixin.__init__(self, task, data_info)
self.task = task
self.data_info = data_info
self.embed_size = embed_size
self.n_epochs = n_epochs
self.lr = lr
self.lr_decay = lr_decay
self.reg = reg_config(reg)
self.batch_size = batch_size
self.num_neg = num_neg
self.use_bn = use_bn
self.dropout_rate = dropout_config(dropout_rate)
self.hidden_units = list(map(int, hidden_units.split(",")))
self.n_users = data_info.n_users
self.n_items = data_info.n_items
self.use_tf_attention = use_tf_attention
(
self.interaction_mode,
self.max_seq_len
) = self._check_interaction_mode(recent_num, random_num)
self.seed = seed
self.user_consumed = data_info.user_consumed
self.sparse = self._decide_sparse_indices(data_info)
self.dense = self._decide_dense_values(data_info)
if self.sparse:
self.sparse_feature_size = self._sparse_feat_size(data_info)
self.sparse_field_size = self._sparse_field_size(data_info)
if self.dense:
self.dense_field_size = self._dense_field_size(data_info)
self.item_sparse = (
True
if data_info.item_sparse_unique is not None
else False
)
self.item_dense = (
True
if data_info.item_dense_unique is not None
else False
)
if self.item_sparse:
# item sparse col indices in all sparse cols
self.item_sparse_col_indices = data_info.item_sparse_col.index
if self.item_dense:
# item dense col indices in all dense cols
self.item_dense_col_indices = data_info.item_dense_col.index
self.user_last_interacted = None
self.last_interacted_len = None
self.all_args = locals()
def _build_model(self):
self.graph_built = True
tf.set_random_seed(self.seed)
self.concat_embed, self.item_embed, self.seq_embed = [], [], []
self._build_placeholders()
self._build_variables()
self._build_user_item()
if self.sparse:
self._build_sparse()
if self.dense:
self._build_dense()
self._build_attention()
concat_embed = tf.concat(self.concat_embed, axis=1)
mlp_layer = dense_nn(concat_embed,
self.hidden_units,
use_bn=self.use_bn,
dropout_rate=self.dropout_rate,
is_training=self.is_training,
name="mlp")
self.output = tf.reshape(
tf.layers.dense(inputs=mlp_layer, units=1), [-1])
count_params()
def _build_placeholders(self):
self.user_indices = tf.placeholder(tf.int32, shape=[None])
self.item_indices = tf.placeholder(tf.int32, shape=[None])
self.user_interacted_seq = tf.placeholder(
tf.int32, shape=[None, self.max_seq_len]) # B * seq
self.user_interacted_len = tf.placeholder(tf.float32, shape=[None])
self.labels = tf.placeholder(tf.float32, shape=[None])
self.is_training = tf.placeholder_with_default(False, shape=[])
if self.sparse:
self.sparse_indices = tf.placeholder(
tf.int32, shape=[None, self.sparse_field_size])
if self.dense:
self.dense_values = tf.placeholder(
tf.float32, shape=[None, self.dense_field_size])
def _build_variables(self):
self.user_feat = tf.get_variable(
name="user_feat",
shape=[self.n_users + 1, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
self.item_feat = tf.get_variable(
name="item_feat",
shape=[self.n_items + 1, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
if self.sparse:
self.sparse_feat = tf.get_variable(
name="sparse_feat",
shape=[self.sparse_feature_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
if self.dense:
self.dense_feat = tf.get_variable(
name="dense_feat",
shape=[self.dense_field_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
def _build_user_item(self):
user_embed = tf.nn.embedding_lookup(self.user_feat, self.user_indices)
item_embed = tf.nn.embedding_lookup(self.item_feat, self.item_indices)
self.concat_embed.extend([user_embed, item_embed])
self.item_embed.append(item_embed)
def _build_sparse(self):
sparse_embed = tf.nn.embedding_lookup(
self.sparse_feat, self.sparse_indices)
self.concat_embed.append(tf.reshape(
sparse_embed, [-1, self.sparse_field_size * self.embed_size])
)
if self.item_sparse:
item_sparse_embed = tf.layers.flatten(
tf.gather(sparse_embed, self.item_sparse_col_indices, axis=1)
)
self.item_embed.append(item_sparse_embed)
def _build_dense(self):
batch_size = tf.shape(self.dense_values)[0]
# 1 * F_dense * K
dense_embed = tf.expand_dims(self.dense_feat, axis=0)
# B * F_dense * K
dense_embed = tf.tile(dense_embed, [batch_size, 1, 1])
dense_values_reshape = tf.reshape(
self.dense_values, [-1, self.dense_field_size, 1])
dense_embed = tf.multiply(dense_embed, dense_values_reshape)
self.concat_embed.append(tf.reshape(
dense_embed, [-1, self.dense_field_size * self.embed_size])
)
if self.item_dense:
item_dense_embed = tf.layers.flatten(
tf.gather(dense_embed, self.item_dense_col_indices, axis=1)
)
self.item_embed.append(item_dense_embed)
def _build_attention(self):
# B * seq * K
seq_item_embed = tf.nn.embedding_lookup(
self.item_feat, self.user_interacted_seq)
self.seq_embed.append(seq_item_embed)
if self.item_sparse:
# contains unique field indices for each item
item_sparse_fields = tf.convert_to_tensor(
self.data_info.item_sparse_unique, dtype=tf.int64)
item_sparse_fields_num = tf.shape(item_sparse_fields)[1]
# B * seq * F_sparse
seq_sparse_fields = tf.gather(
item_sparse_fields, self.user_interacted_seq)
# B * seq * F_sparse * K
seq_sparse_embed = tf.nn.embedding_lookup(
self.sparse_feat, seq_sparse_fields)
# B * seq * FK
seq_sparse_embed = tf.reshape(
seq_sparse_embed,
[-1, self.max_seq_len, item_sparse_fields_num * self.embed_size]
)
self.seq_embed.append(seq_sparse_embed)
if self.item_dense:
# contains unique dense values for each item
item_dense_values = tf.convert_to_tensor(
self.data_info.item_dense_unique, dtype=tf.float32)
item_dense_fields_num = tf.shape(item_dense_values)[1]
# B * seq * F_dense
seq_dense_values = tf.gather(
item_dense_values, self.user_interacted_seq)
# B * seq * F_dense * 1
seq_dense_values = tf.expand_dims(seq_dense_values, axis=-1)
batch_size = tf.shape(seq_dense_values)[0]
dense_embed = tf.reshape(
self.dense_feat, [1, 1, self.dense_field_size, self.embed_size])
# B * seq * F_dense * K
# Since dense_embeddings are same for all items,
# we can simply repeat it (batch * seq) times
seq_dense_embed = tf.tile(
dense_embed, [batch_size, self.max_seq_len, 1, 1])
seq_dense_embed = tf.multiply(
seq_dense_embed, seq_dense_values)
# B * seq * FK
seq_dense_embed = tf.reshape(
seq_dense_embed,
[-1, self.max_seq_len, item_dense_fields_num * self.embed_size]
)
self.seq_embed.append(seq_dense_embed)
# B * K
item_total_embed = tf.concat(self.item_embed, axis=1)
# B * seq * K
seq_total_embed = tf.concat(self.seq_embed, axis=2)
attention_layer = self._attention_unit(
item_total_embed, seq_total_embed, self.user_interacted_len)
self.concat_embed.append(tf.layers.flatten(attention_layer))
def _attention_unit(self, queries, keys, keys_len):
if self.use_tf_attention:
query_masks = tf.cast(
tf.ones_like(tf.reshape(self.user_interacted_len, [-1, 1])),
dtype=tf.bool
)
key_masks = tf.sequence_mask(
self.user_interacted_len, self.max_seq_len
)
queries = tf.expand_dims(queries, axis=1)
attention = tf.keras.layers.Attention(use_scale=False)
pooled_outputs = attention(inputs=[queries, keys],
mask=[query_masks, key_masks])
return pooled_outputs
else:
# queries: B * K, keys: B * seq * K
queries = tf.expand_dims(queries, axis=1)
# B * seq * K
queries = tf.tile(queries, [1, self.max_seq_len, 1])
queries_keys_cross = tf.concat(
[queries, keys, queries - keys, queries * keys], axis=2)
mlp_layer = dense_nn(queries_keys_cross, (16,), use_bn=False,
activation=tf.nn.sigmoid, name="attention")
# B * seq * 1
mlp_layer = tf.layers.dense(mlp_layer, units=1, activation=None)
# attention_weights = tf.transpose(mlp_layer, [0, 2, 1])
attention_weights = tf.layers.flatten(mlp_layer)
key_masks = tf.sequence_mask(keys_len, self.max_seq_len)
paddings = tf.ones_like(attention_weights) * (-2**32 + 1)
attention_scores = tf.where(key_masks, attention_weights, paddings)
attention_scores = tf.div_no_nan(
attention_scores,
tf.sqrt(
tf.cast(keys.get_shape().as_list()[-1], tf.float32)
)
)
# B * 1 * seq
attention_scores = tf.expand_dims(
tf.nn.softmax(attention_scores), 1)
# B * 1 * K
pooled_outputs = attention_scores @ keys
return pooled_outputs
def _build_train_ops(self, **kwargs):
if self.task == "rating":
self.loss = tf.losses.mean_squared_error(labels=self.labels,
predictions=self.output)
elif self.task == "ranking":
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels,
logits=self.output)
)
if self.reg is not None:
reg_keys = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = self.loss + tf.add_n(reg_keys)
else:
total_loss = self.loss
if self.lr_decay:
n_batches = int(self.data_info.data_size / self.batch_size)
self.lr, global_steps = lr_decay_config(self.lr, n_batches,
**kwargs)
else:
global_steps = None
optimizer = tf.train.AdamOptimizer(self.lr)
optimizer_op = optimizer.minimize(total_loss, global_step=global_steps)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.training_op = tf.group([optimizer_op, update_ops])
self.sess.run(tf.global_variables_initializer())
def fit(self, train_data, verbose=1, shuffle=True,
eval_data=None, metrics=None, **kwargs):
self.show_start_time()
if not self.graph_built:
self._build_model()
self._build_train_ops(**kwargs)
data_generator = DataGenSequence(train_data, self.data_info,
self.sparse, self.dense,
mode=self.interaction_mode,
num=self.max_seq_len,
padding_idx=0)
for epoch in range(1, self.n_epochs + 1):
if self.lr_decay:
print(f"With lr_decay, epoch {epoch} learning rate: "
f"{self.sess.run(self.lr)}")
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for (u_seq, u_len, user, item, label, sparse_idx, dense_val
) in data_generator(shuffle, self.batch_size):
feed_dict = self._get_seq_feed_dict(
u_seq, u_len, user, item, label,
sparse_idx, dense_val, True
)
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
# for evaluation
self._set_last_interacted()
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("=" * 30)
# for prediction and recommendation
self._set_last_interacted()
self.assign_oov()
def predict(self, user, item, feats=None, cold_start="average",
inner_id=False):
user, item = self.convert_id(user, item, inner_id)
unknown_num, unknown_index, user, item = self._check_unknown(user, item)
(
user_indices,
item_indices,
sparse_indices,
dense_values
) = get_predict_indices_and_values(
self.data_info, user, item, self.n_items, self.sparse, self.dense)
if feats is not None:
assert isinstance(feats, (dict, pd.Series)), (
"feats must be dict or pandas.Series.")
assert len(user_indices) == 1, "only support single user for feats"
sparse_indices, dense_values = features_from_dict(
self.data_info, sparse_indices, dense_values, feats, "predict")
feed_dict = self._get_seq_feed_dict(self.user_last_interacted[user],
self.last_interacted_len[user],
user_indices, item_indices,
None, sparse_indices,
dense_values, False)
preds = self.sess.run(self.output, feed_dict)
if self.task == "rating":
preds = np.clip(preds, self.lower_bound, self.upper_bound)
elif self.task == "ranking":
preds = 1 / (1 + np.exp(-preds))
if unknown_num > 0 and cold_start == "popular":
preds[unknown_index] = self.default_prediction
return preds
def recommend_user(self, user, n_rec, user_feats=None, item_data=None,
cold_start="average", inner_id=False):
user_id = self._check_unknown_user(user, inner_id)
if user_id is None:
if cold_start == "average":
user_id = self.n_users
elif cold_start == "popular":
return self.data_info.popular_items[:n_rec]
else:
raise ValueError(user)
(
user_indices,
item_indices,
sparse_indices,
dense_values
) = get_recommend_indices_and_values(
self.data_info, user_id, self.n_items, self.sparse, self.dense)
if user_feats is not None:
assert isinstance(user_feats, (dict, pd.Series)), (
"feats must be dict or pandas.Series.")
sparse_indices, dense_values = features_from_dict(
self.data_info, sparse_indices, dense_values, user_feats,
"recommend")
if item_data is not None:
assert isinstance(item_data, pd.DataFrame), (
"item_data must be pandas DataFrame")
assert "item" in item_data.columns, (
"item_data must contain 'item' column")
sparse_indices, dense_values = add_item_features(
self.data_info, sparse_indices, dense_values, item_data)
u_last_interacted = np.tile(self.user_last_interacted[user_id],
(self.n_items, 1))
u_interacted_len = np.repeat(self.last_interacted_len[user_id],
self.n_items)
feed_dict = self._get_seq_feed_dict(u_last_interacted, u_interacted_len,
user_indices, item_indices, None,
sparse_indices, dense_values, False)
recos = self.sess.run(self.output, feed_dict)
if self.task == "ranking":
recos = 1 / (1 + np.exp(-recos))
consumed = set(self.user_consumed[user_id])
count = n_rec + len(consumed)
ids = np.argpartition(recos, -count)[-count:]
rank = sorted(zip(ids, recos[ids]), key=lambda x: -x[1])
recs_and_scores = islice(
(rec if inner_id else (self.data_info.id2item[rec[0]], rec[1])
for rec in rank if rec[0] not in consumed),
n_rec
)
return list(recs_and_scores)
def _set_last_interacted(self):
if (self.user_last_interacted is None
and self.last_interacted_len is None):
user_indices = np.arange(self.n_users)
(
self.user_last_interacted,
self.last_interacted_len
) = user_last_interacted(user_indices, self.user_consumed,
self.n_items, self.max_seq_len)
oov = np.full(self.max_seq_len, self.n_items, dtype=np.int32)
self.user_last_interacted = np.vstack(
[self.user_last_interacted, oov]
)
self.last_interacted_len =
|
np.append(self.last_interacted_len, [1])
|
numpy.append
|
# coding: utf-8
from __future__ import division
import numpy as np
import scipy.spatial.distance as sd
from scipy.special import gamma
from scipy.linalg import toeplitz
from scipy.optimize import minimize
from scipy.stats import ttest_1samp as ttest
import hypertools as hyp
import pandas as pd
import warnings
from matplotlib import pyplot as plt
gaussian_params = {'var': 100}
laplace_params = {'scale': 100}
eye_params = {}
t_params = {'df': 100}
mexican_hat_params = {'sigma': 10}
uniform_params = {}
boxcar_params = {'width': 10}
def gaussian_weights(T, params=gaussian_params):
if params is None:
params = gaussian_params
c1 = np.divide(1, np.sqrt(2 * np.math.pi * params['var']))
c2 = np.divide(-1, 2 * params['var'])
sqdiffs = toeplitz(np.arange(T) ** 2)
return c1 * np.exp(c2 * sqdiffs)
def laplace_weights(T, params=laplace_params):
if params is None:
params = laplace_params
absdiffs = toeplitz(np.arange(T))
return np.multiply(np.divide(1, 2 * params['scale']), np.exp(-np.divide(absdiffs, params['scale']))) #scale by a factor of 2.5 to prevent near-zero rounding issues
def eye_weights(T, params=eye_params):
return np.eye(T)
def uniform_weights(T, params=uniform_params):
return np.ones([T, T])
def t_weights(T, params=t_params):
if params is None:
params = t_params
c1 = np.divide(gamma((params['df'] + 1) / 2), np.sqrt(params['df'] * np.math.pi) * gamma(params['df'] / 2))
c2 = np.divide(-params['df'] + 1, 2)
sqdiffs = toeplitz(np.arange(T) ** 2)
return np.multiply(c1, np.power(1 + np.divide(sqdiffs, params['df']), c2))
def mexican_hat_weights(T, params=mexican_hat_params):
if params is None:
params = mexican_hat_params
absdiffs = toeplitz(np.arange(T))
sqdiffs = toeplitz(np.arange(T) ** 2)
a = np.divide(2, np.sqrt(3 * params['sigma']) * np.power(np.math.pi, 0.25))
b = 1 - np.power(np.divide(absdiffs, params['sigma']), 2)
c = np.exp(-np.divide(sqdiffs, 2 * np.power(params['sigma'], 2)))
return np.multiply(a, np.multiply(b, c))
def boxcar_weights(T, params=boxcar_params):
if params is None:
params = boxcar_params
return np.multiply(toeplitz(np.arange(T)) < params['width']/2., 1.)
def format_data(data):
def zero_nans(x):
x[np.isnan(x)] = 0
return x
x = hyp.tools.format_data(data, ppca=False, )
return list(map(zero_nans, x))
def _is_empty(dict):
if not bool(dict):
return True
return False
def wcorr(a, b, weights):
'''
Compute moment-by-moment correlations between sets of observations
:param a: a number-of-timepoints by number-of-features observations matrix
:param b: a number-of-timepoints by number-of-features observations matrix
:param weights: a number-of-timepoints by number-of-timepoints weights matrix
specifying the per-timepoint weights to be considered (for each timepoint)
:return: a a.shape[1] by b.shape[1] by weights.shape[0] array of per-timepoint
correlation matrices.
'''
def weighted_var_diffs(x, w):
w[np.isnan(w)] = 0
if np.sum(np.abs(w)) == 0:
weights_tiled = np.ones(x.shape)
else:
weights_tiled = np.tile(w[:, np.newaxis], [1, x.shape[1]])
mx = np.sum(np.multiply(weights_tiled, x), axis=0)[:, np.newaxis].T
diffs = x - np.tile(mx, [x.shape[0], 1])
varx = np.sum(diffs ** 2, axis=0)[:, np.newaxis].T
return varx, diffs
autocorrelation = np.isclose(a, b).all()
corrs = np.zeros([a.shape[1], b.shape[1], weights.shape[1]])
for t in np.arange(weights.shape[1]):
vara, diffs_a = weighted_var_diffs(a, weights[:, t])
if autocorrelation:
varb = vara
diffs_b = diffs_a
else:
varb, diffs_b = weighted_var_diffs(b, weights[:, t])
alpha = np.dot(diffs_a.T, diffs_b)
beta = np.sqrt(np.dot(vara.T, varb))
corrs[:, :, t] = np.divide(alpha, beta)
return corrs
def wisfc(data, timepoint_weights, subject_weights=None):
'''
Compute moment-by-moment correlations between sets of observations
:data: a list of number-of-timepoints by V matrices
:timepoint weights: a number-of-timepoints by number-of-timepoints weights matrix
specifying the per-timepoint weights to be considered (for each timepoint)
:subject weights: number-of-subjects by number-of-subjects weights matrix
:return: a list of number-of-timepoints by (V^2 - V)/2 + V correlation matrices
'''
if type(data) != list:
return wisfc([data], timepoint_weights, subject_weights=subject_weights)[0]
if subject_weights is None:
K = data[0].shape[1]
connectomes = np.zeros([len(data), int((K ** 2 - K) / 2)])
for s in np.arange(len(data)):
connectomes[s, :] = 1 - sd.pdist(data[s].T, metric='correlation')
subject_weights = 1 - sd.squareform(sd.pdist(connectomes, metric='correlation'))
np.fill_diagonal(subject_weights, 0)
elif np.isscalar(subject_weights):
subject_weights = subject_weights * np.ones([len(data), len(data)])
np.fill_diagonal(subject_weights, 0)
corrs = []
for s, a in enumerate(data):
b = weighted_mean(np.stack(data, axis=2), axis=2, weights=subject_weights[s, :])
wc = wcorr(a, b, timepoint_weights)
wc[np.isnan(wc)] = 0
wc[np.isinf(wc)] = 1
try:
corrs.append(mat2vec(wc))
except:
print('mystery!')
return corrs
def isfc(data, timepoint_weights):
if type(data) != list:
return isfc([data], timepoint_weights)[0]
return wisfc(data, timepoint_weights, subject_weights=1 - np.eye(len(data)))
def autofc(data, timepoint_weights):
if type(data) != list:
return autofc([data], timepoint_weights)[0]
return wisfc(data, timepoint_weights, subject_weights=np.eye(len(data)))
def apply_by_row(corrs, f):
'''
apply the function f to the correlation matrix specified in each row, and return a
matrix of the concatenated results
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param f: a function to apply to each vectorized correlation matrix
:return: a matrix of function outputs (for each row of the given matrices), or a list of
such matrices
'''
if type(corrs) is list:
return list(map(lambda x: apply_by_row(x, f), corrs))
corrs = vec2mat(corrs)
return np.stack(list(map(lambda x: f(np.squeeze(x)), np.split(corrs, corrs.shape[2], axis=2))), axis=0)
def corrmean_combine(corrs):
'''
Compute the mean element-wise correlation across each matrix in a list.
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:return: a mean vectorized correlation matrix
'''
if not (type(corrs) == list):
return corrs
elif np.shape(corrs)[0] == 1:
return corrs
else:
return z2r(np.mean(r2z(np.stack(corrs, axis=2)), axis=2))
def mean_combine(vals):
'''
Compute the element-wise mean across each matrix in a list.
:param vals: a matrix, or a list of matrices
:return: a mean matrix
'''
if not (type(vals) == list):
return vals
else:
return np.mean(np.stack(vals, axis=2), axis=2)
def tstat_combine(corrs, return_pvals=False):
'''
Compute element-wise t-tests (comparing distribution means to 0) across each
correlation matrix in a list.
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param return_pvals: Boolean (default: False). If True, return a second matrix (or list)
of the corresponding t-tests' p-values
:return: a matrix of t-statistics of the same shape as a matrix of vectorized correlation
matrices
'''
if not (type(corrs) == list):
ts = corrs
ps = np.nan * np.zeros_like(corrs)
else:
ts, ps = ttest(r2z(np.stack(corrs, axis=2)), popmean=0, axis=2)
if return_pvals:
return ts, ps
else:
return ts
def null_combine(corrs):
'''
Placeholder function that returns the input
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:return: the input
'''
return corrs
def reduce(corrs, rfun=None):
'''
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param rfun: function to use for dimensionality reduction. All hypertools and
scikit-learn functions are supported: PCA, IncrementalPCA, SparsePCA,
MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD,
DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap,
SpectralEmbedding, LocallyLinearEmbedding, MDS, and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g.
reduction={‘model’ : ‘PCA’, ‘params’ : {‘whiten’ : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
Another option is to use graph theoretic measures computed for each node.
The following measures are supported (via the brainconn toolbox):
eigenvector_centrality, pagerank_centrality, and strength. (Each
of these must be specified as a string; dictionaries not supported.)
Default: None (no dimensionality reduction)
:return: dimensionality-reduced (or original) correlation matrices
'''
try:
import brainconn as bc
_has_brainconn = True
graph_measures = {'eigenvector_centrality': bc.centrality.eigenvector_centrality_und,
'pagerank_centrality': lambda x: bc.centrality.pagerank_centrality(x, d=0.85),
'strength': bc.degree.strengths_und}
except ImportError:
_has_brainconn = False
graph_measures = {'eigenvector_centrality': None,
'pagerank_centrality': None,
'strength': None}
if rfun is None:
return corrs
get_V = lambda x: int(np.divide(np.sqrt(8 * x + 1) - 1, 2))
if type(corrs) is list:
V = get_V(corrs[0].shape[1])
else:
V = get_V(corrs.shape[1])
if _has_brainconn and rfun in graph_measures.keys():
return apply_by_row(corrs, graph_measures[rfun])
elif not _has_brainconn and rfun in graph_measures.keys():
raise ImportError('brainconn is not installed. Please install "git+https://github.com/FIU-Neuro/brainconn#egg=brainconn"')
else:
red_corrs = hyp.reduce(corrs, reduce=rfun, ndims=V)
D = np.shape(red_corrs)[-1]
if D < V :
red_corrs = np.hstack((red_corrs, np.zeros((D, V - D))))
return red_corrs
def smooth(w, windowsize=10, kernel_fun=laplace_weights, kernel_params=laplace_params):
if type(w) is list:
return list(map(lambda x: smooth(x, windowsize=windowsize, kernel_fun=kernel_fun, kernel_params=kernel_params), w))
assert type(windowsize) == int, 'smoothing kernel must have integer width'
k = kernel_fun(windowsize, params=kernel_params)
if iseven(windowsize):
kernel = np.divide(k[int(np.floor(windowsize/2) - 1), :] + k[int(np.ceil(windowsize/2) - 1), :], 2)
else:
kernel = k[int(np.floor(windowsize/2)), :]
kernel /= kernel.sum()
x = np.zeros_like(w)
for i in range(0, w.shape[1]):
x[:, i] = np.convolve(kernel, w[:, i], mode='same')
return x
def timepoint_decoder(data, mu=None, nfolds=2, level=0, cfun=isfc, weights_fun=laplace_weights, weights_params=laplace_params,
combine=mean_combine, rfun=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param mu: list of floats sum to one for mixing proportions vector
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
assert len(np.unique(
list(map(lambda x: x.shape[0], data)))) == 1, 'all data matrices must have the same number of timepoints'
assert len(np.unique(
list(map(lambda x: x.shape[1], data)))) == 1, 'all data matrices must have the same number of features'
group_assignments = get_xval_assignments(len(data), nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if mu:
orig_level = level.max()
orig_level = np.ravel(orig_level)
assert np.sum(mu)==1, 'weights must sum to one'
assert np.shape(mu)[0]== level.max()+1, 'weights lengths need to be the same as number of levels'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = pd.DataFrame()
corrs = 0
for i in range(0, nfolds):
in_raw = []
out_raw = []
for v in level:
if v==0:
in_data = [x for x in data[group_assignments == i]]
out_data = [x for x in data[group_assignments != i]]
in_smooth, out_smooth, in_raw, out_raw = reduce_wrapper(folding_levels(in_data, out_data, level=v, cfun=None,rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params), level=v, rfun=rfun)
else:
in_smooth, out_smooth, in_raw, out_raw = reduce_wrapper(folding_levels(in_raw, out_raw, level=v, cfun=cfun,
rfun=p_rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params), level=v, rfun=rfun)
if mu:
next_corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs += mu[v] * z2r(next_corrs)
else:
corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
if v in orig_level:
if mu:
corrs = r2z(corrs)
next_results_pd = decoder(corrs)
next_results_pd['level'] = v
next_results_pd['folds'] = i
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def weighted_timepoint_decoder(data, nfolds=2, level=0, optimize_levels=None, cfun=isfc, weights_fun=laplace_weights,
weights_params=laplace_params, combine=mean_combine, rfun=None, opt_init=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
assert len(np.unique(
list(map(lambda x: x.shape[0], data)))) == 1, 'all data matrices must have the same number of timepoints'
assert len(np.unique(
list(map(lambda x: x.shape[1], data)))) == 1, 'all data matrices must have the same number of features'
if nfolds == 1:
sub_nfolds = 1
nfolds = 2
warnings.warn('When nfolds is set to one, the analysis will be circular.')
else:
sub_nfolds = 1
group_assignments = get_xval_assignments(len(data), nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of ' \
'functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, ' \
'or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = pd.DataFrame()
for i in range(0, nfolds):
in_raw = []
out_raw = []
sub_in_raw = []
sub_out_raw = []
sub_corrs = []
corrs = []
subgroup_assignments = get_xval_assignments(len(data[group_assignments == i]), nfolds)
in_data = [x for x in data[group_assignments == i]]
out_data = [x for x in data[group_assignments != i]]
for v in level:
if v==0:
in_smooth, out_smooth, in_raw, out_raw = folding_levels(in_data, out_data, level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(mean_combine([x for x in in_raw]), mean_combine([x for x in out_raw]),
'correlation'))
# next_corrs = (1 - sd.cdist(mean_combine(in_smooth), mean_combine(out_smooth),
# 'correlation'))
corrs.append(next_corrs)
for s in range(0, 1):
sub_in_data = [x for x in data[group_assignments == i][subgroup_assignments==s]]
sub_out_data = [x for x in data[group_assignments == i][subgroup_assignments!=s]]
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels(sub_in_data, sub_out_data,
level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
next_subcorrs = (1 - sd.cdist(mean_combine([x for x in sub_in_raw]),
mean_combine([x for x in sub_out_raw]), 'correlation'))
# next_subcorrs = (1 - sd.cdist(mean_combine(sub_in_smooth),
# mean_combine(sub_out_smooth), 'correlation'))
sub_corrs.append(next_subcorrs)
else:
in_smooth, out_smooth, in_raw, out_raw = folding_levels(in_raw, out_raw, level=v, cfun=cfun,
rfun=rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs.append(next_corrs)
print('corrs ' + str(v))
for s in range(0, 1):
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels(sub_in_raw,
sub_out_raw,
level=v,
cfun=cfun,
rfun=rfun,
combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
print('sub corrs ' + str(v) + str(s))
next_subcorrs = (1 - sd.cdist(sub_in_smooth, sub_out_smooth, 'correlation'))
sub_corrs.append(next_subcorrs)
sub_corrs = np.array(sub_corrs)
corrs = np.array(corrs)
if sub_nfolds == 1:
sub_corrs = corrs
if not optimize_levels:
optimize_levels = range(v+1)
opt_over = []
for lev in optimize_levels:
opt_over.append(lev)
sub_out_corrs = sub_corrs[opt_over,:,:]
out_corrs = corrs[opt_over, :, :]
mu = optimize_weights(sub_out_corrs, opt_init)
w_corrs = weight_corrs(out_corrs, mu)
next_results_pd = decoder(w_corrs)
print(next_results_pd)
next_results_pd['level'] = lev
next_results_pd['folds'] = i
mu_pd = pd.DataFrame()
for c in opt_over:
mu_pd['level_' + str(c)] = [0]
mu_pd += mu
next_results_pd = pd.concat([next_results_pd, mu_pd], axis=1, join_axes=[next_results_pd.index])
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def folding_levels(infold_data, outfold_data, level=0, cfun=None, weights_fun=None, weights_params=None, combine=None,
rfun=None):
from .timecorr import timecorr
if rfun is None:
rfun = [None] * np.shape(level)[0]
p_cfun = eval('autofc')
if level == 0:
in_fold_smooth = np.asarray(timecorr([x for x in infold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr([x for x in outfold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
in_fold_raw = infold_data
out_fold_raw = outfold_data
else:
in_fold_smooth = np.asarray(timecorr(list(infold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr(list(outfold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
in_fold_raw = np.asarray(timecorr(list(infold_data), cfun=p_cfun, rfun=rfun[level], combine=null_combine,
weights_function=eye_weights, weights_params=eye_params))
out_fold_raw = np.asarray(timecorr(list(outfold_data), cfun=p_cfun, rfun=rfun[level], combine=null_combine,
weights_function=eye_weights, weights_params=eye_params))
return in_fold_smooth, out_fold_smooth, in_fold_raw, out_fold_raw
def weighted_timepoint_decoder_ec(data, nfolds=2, level=0, optimize_levels=None, cfun=isfc, weights_fun=laplace_weights,
weights_params=laplace_params, combine=mean_combine, rfun=None, opt_init=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
if nfolds == 1:
sub_nfolds = 1
nfolds = 2
warnings.warn('When nfolds is set to one, the analysis will be circular.')
else:
sub_nfolds = 1
group_assignments = get_xval_assignments(data.shape[1], nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of ' \
'functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, ' \
'or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = pd.DataFrame()
for i in range(0, nfolds):
sub_corrs = []
corrs = []
subgroup_assignments = get_xval_assignments(len(data[0][group_assignments == i]), nfolds)
in_data = [x for x in data[0][group_assignments == i]]
out_data = [x for x in data[0][group_assignments != i]]
for v in level:
if v==0:
in_smooth, out_smooth, in_raw, out_raw = folding_levels_ec(in_data, out_data, level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
# next_corrs = (1 - sd.cdist(mean_combine(in_smooth), mean_combine(out_smooth),
# 'correlation'))
next_corrs = (1 - sd.cdist(mean_combine(in_raw), mean_combine(out_raw),
'correlation'))
corrs.append(next_corrs)
for s in range(0, 1):
sub_in_data = [x for x in data[0][group_assignments == i][subgroup_assignments==s]]
sub_out_data = [x for x in data[0][group_assignments == i][subgroup_assignments!=s]]
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels_ec(sub_in_data, sub_out_data,
level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
# next_subcorrs = (1 - sd.cdist(mean_combine(sub_in_smooth),
# mean_combine(sub_out_smooth), 'correlation'))
next_subcorrs = (1 - sd.cdist(mean_combine(sub_in_raw),
mean_combine(sub_out_raw), 'correlation'))
sub_corrs.append(next_subcorrs)
elif v==1:
in_smooth, out_smooth, in_raw, out_raw = folding_levels_ec(in_raw, out_raw, level=v, cfun=cfun,
rfun=rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(mean_combine(in_smooth), mean_combine(out_smooth),
'correlation'))
corrs.append(next_corrs)
for s in range(0, 1):
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels_ec(sub_in_raw,
sub_out_raw,
level=v,
cfun=cfun,
rfun=rfun,
combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
next_subcorrs = (1 - sd.cdist(mean_combine(sub_in_smooth),
mean_combine(sub_out_smooth), 'correlation'))
sub_corrs.append(next_subcorrs)
else:
in_raw = [x for x in data[v-1][group_assignments == i]]
out_raw = [x for x in data[v-1][group_assignments != i]]
in_smooth, out_smooth, in_raw, out_raw = folding_levels_ec(in_raw, out_raw, level=v, cfun=cfun,
rfun=rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs.append(next_corrs)
print('corrs ' + str(v))
for s in range(0, 1):
sub_in_raw = [x for x in data[v-1][group_assignments == i][subgroup_assignments==s]]
sub_out_raw = [x for x in data[v-1][group_assignments == i][subgroup_assignments!=s]]
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels_ec(sub_in_raw,
sub_out_raw,
level=v,
cfun=cfun,
rfun=rfun,
combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
print('sub corrs ' + str(v) + str(s))
next_subcorrs = (1 - sd.cdist(sub_in_smooth, sub_out_smooth, 'correlation'))
sub_corrs.append(next_subcorrs)
sub_corrs = np.array(sub_corrs)
corrs = np.array(corrs)
if sub_nfolds == 1:
sub_corrs = corrs
if not optimize_levels:
optimize_levels = range(v+1)
opt_over = []
for lev in optimize_levels:
opt_over.append(lev)
sub_out_corrs = sub_corrs[opt_over,:,:]
out_corrs = corrs[opt_over, :, :]
mu = optimize_weights(sub_out_corrs, opt_init)
w_corrs = weight_corrs(out_corrs, mu)
next_results_pd = decoder(w_corrs)
print(next_results_pd)
next_results_pd['level'] = lev
next_results_pd['folds'] = i
mu_pd = pd.DataFrame()
for c in opt_over:
mu_pd['level_' + str(c)] = [0]
mu_pd += mu
next_results_pd = pd.concat([next_results_pd, mu_pd], axis=1, join_axes=[next_results_pd.index])
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def folding_levels_ec(infold_data, outfold_data, level=0, cfun=None, weights_fun=None, weights_params=None, combine=None,
rfun=None):
from .timecorr import timecorr
if rfun is None:
rfun = [None] * np.shape(level)[0]
p_cfun = eval('autofc')
if level == 0:
in_fold_smooth = np.asarray(timecorr([x for x in infold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr([x for x in outfold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
in_fold_raw = infold_data
out_fold_raw = outfold_data
else:
raw_rfun = [None] * (level + 1)
in_fold_smooth = np.asarray(timecorr(list(infold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr(list(outfold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
in_fold_raw = infold_data
out_fold_raw = outfold_data
return in_fold_smooth, out_fold_smooth, in_fold_raw, out_fold_raw
def pca_decoder(data, nfolds=2, dims=10, cfun=isfc, weights_fun=laplace_weights,
weights_params=laplace_params, combine=mean_combine, rfun=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
assert len(np.unique(
list(map(lambda x: x.shape[0], data)))) == 1, 'all data matrices must have the same number of timepoints'
assert len(np.unique(
list(map(lambda x: x.shape[1], data)))) == 1, 'all data matrices must have the same number of features'
pca_data = np.asarray(hyp.reduce(list(data), ndims=dims))
group_assignments = get_xval_assignments(len(pca_data), nfolds)
results_pd = pd.DataFrame()
for i in range(0, nfolds):
for d in range(1, dims + 1):
in_data = np.asarray([x for x in pca_data[group_assignments == i]])[:, :, :d]
out_data = np.asarray([x for x in pca_data[group_assignments != i]])[:, :, :d]
in_smooth, out_smooth, in_raw, out_raw = folding_levels(in_data, out_data, level=0, cfun=isfc, rfun=[None],
combine=[mean_combine], weights_fun=weights_fun,
weights_params=weights_params)
if d < 3:
in_smooth = np.hstack((in_smooth, np.zeros((in_smooth.shape[0], 3 - in_smooth.shape[1]))))
out_smooth = np.hstack((out_smooth, np.zeros((out_smooth.shape[0], 3 - out_smooth.shape[1]))))
corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs = np.array(corrs)
next_results_pd = decoder(corrs)
next_results_pd['dims'] = d
next_results_pd['folds'] = i
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def reduce_wrapper(data, dims=10, level=0, rfun=None):
if not level == 0:
all_smooth = list(data[0][np.newaxis, :, :]) + list(data[1][np.newaxis, :, :])
all_raw = list(data[2]) + list(data[3])
all_smooth_reduced = reduce(all_smooth, rfun=rfun[level])
all_raw_reduced = reduce(all_raw, rfun=rfun[level])
return all_smooth_reduced[0], all_smooth_reduced[1], all_raw_reduced[0], all_raw_reduced[1]
else:
return data[0], data[1], data[2], data[3]
def optimize_weights(corrs, opt_init=None):
b = (0, 1)
bns = (b,) * np.shape(corrs)[0]
con1 = {'type': 'eq', 'fun': lambda x: 1 - np.sum(x)}
if opt_init=='random':
x0 = sum_to_x(np.shape(corrs)[0], 1)
elif opt_init=='last':
x0 = np.repeat(1 / np.shape(corrs)[0], np.shape(corrs)[0])
x0[-1] = 1
else:
x0 = np.repeat(1/np.shape(corrs)[0], np.shape(corrs)[0])
min_mu = minimize(calculate_error, x0, args=corrs, bounds=bns, constraints=con1, options={'disp': True, 'eps': 1e-1})
return min_mu.x
def sum_to_x(n, x):
values = [0.0, x] + list(np.random.uniform(low=0.0,high=x,size=n-1))
values.sort()
return np.asarray([values[i+1] - values[i] for i in range(n)])
def calculate_error(mu, corrs, metric='error', sign=1):
results = decoder(weight_corrs(corrs, mu))
return sign * results[metric].values
def weight_corrs(corrs, mu):
assert np.shape(mu)[0] == len(corrs)
weighted_corrs = 0
for i in np.arange(np.shape(corrs)[0]):
weighted_corrs += mu[i] * r2z(corrs[i])
return z2r(weighted_corrs)
def decoder(corrs):
next_results_pd = pd.DataFrame({'rank': [0], 'accuracy': [0], 'error': [0]})
for t in np.arange(corrs.shape[0]):
decoded_inds = np.argmax(corrs[t, :])
next_results_pd['error'] += np.mean(np.abs(decoded_inds - np.array(t))) / corrs.shape[0]
next_results_pd['accuracy'] += np.mean(decoded_inds == np.array(t))
next_results_pd['rank'] += np.mean(list(map((lambda x: int(x)), (corrs[t, :] <= corrs[t, t]))))
next_results_pd['error'] = next_results_pd['error'].values / corrs.shape[0]
next_results_pd['accuracy'] = next_results_pd['accuracy'].values / corrs.shape[0]
next_results_pd['rank']= next_results_pd['rank'].values / corrs.shape[0]
return next_results_pd
def weighted_mean(x, axis=None, weights=None, tol=1e-5):
if axis is None:
axis = len(x.shape) - 1
if weights is None:
weights = np.ones([1, x.shape[axis]])
# remove nans and force weights to sum to 1
weights[np.isnan(weights)] = 0
if np.sum(weights) == 0:
return np.mean(x, axis=axis)
# get rid of 0 weights to avoid unnecessary computations
good_inds = np.abs(weights) > tol
weights[good_inds] /= np.sum(weights[good_inds])
weighted_sum = np.zeros(np.take(x, 0, axis=axis).shape)
for i in np.where(good_inds)[0]:
weighted_sum += weights[i] * np.take(x, i, axis=axis)
return weighted_sum
def rmdiag(m):
return m - np.diag(np.diag(m))
def r2z(r):
"""
Function that calculates the Fisher z-transformation
Parameters
----------
r : int or ndarray
Correlation value
Returns
----------
result : int or ndarray
Fishers z transformed correlation value
"""
return 0.5*(
|
np.log(1+r)
|
numpy.log
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The module contains tools for centroiding sources.
"""
import inspect
import warnings
from astropy.nddata.utils import overlap_slices
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from ..utils._round import _py2intround
__all__ = ['centroid_com', 'centroid_quadratic', 'centroid_sources']
def centroid_com(data, mask=None, oversampling=1):
"""
Calculate the centroid of an n-dimensional array as its "center of
mass" determined from moments.
Non-finite values (e.g., NaN or inf) in the ``data`` array are
automatically masked.
Parameters
----------
data : array_like
The input n-dimensional array.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
oversampling : int or tuple of two int, optional
Oversampling factors of pixel indices. If ``oversampling`` is
a scalar this is treated as both x and y directions having
the same oversampling factor; otherwise it is treated as
``(x_oversamp, y_oversamp)``.
Returns
-------
centroid : `~numpy.ndarray`
The coordinates of the centroid in pixel order (e.g., ``(x, y)``
or ``(x, y, z)``), not numpy axis order.
"""
data = data.astype(float)
if mask is not None and mask is not np.ma.nomask:
mask = np.asarray(mask, dtype=bool)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data[mask] = 0.
oversampling = np.atleast_1d(oversampling)
if len(oversampling) == 1:
oversampling = np.repeat(oversampling, 2)
oversampling = oversampling[::-1] # reverse to (y, x) order
if np.any(oversampling <= 0):
raise ValueError('Oversampling factors must all be positive numbers.')
badmask = ~np.isfinite(data)
if np.any(badmask):
warnings.warn('Input data contains non-finite values (e.g., NaN or '
'inf) that were automatically masked.',
AstropyUserWarning)
data[badmask] = 0.
total = np.sum(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
# note the output array is reversed to give (x, y) order
return np.array([np.sum(indices[axis] * data) / total / oversampling[axis]
for axis in range(data.ndim)])[::-1]
def centroid_quadratic(data, xpeak=None, ypeak=None, fit_boxsize=5,
search_boxsize=None, mask=None):
"""
Calculate the centroid of an n-dimensional array by fitting a 2D
quadratic polynomial.
A second degree 2D polynomial is fit within a small region of the
data defined by ``fit_boxsize`` to calculate the centroid position.
The initial center of the fitting box can specified using the
``xpeak`` and ``ypeak`` keywords. If both ``xpeak`` and ``ypeak``
are `None`, then the box will be centered at the position of the
maximum value in the input ``data``.
If ``xpeak`` and ``ypeak`` are specified, the ``search_boxsize``
optional keyword can be used to further refine the initial center of
the fitting box by searching for the position of the maximum pixel
within a box of size ``search_boxsize``.
`Vakili & Hogg (2016) <https://arxiv.org/abs/1610.05873>`_
demonstrate that 2D quadratic centroiding comes very
close to saturating the `Cramér-Rao lower bound
<https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93Rao_bound>`_ in a
wide range of conditions.
Parameters
----------
data : numpy.ndarray
Image data.
xpeak, ypeak : float or `None`, optional
The initial guess of the position of the centroid. If either
``xpeak`` or ``ypeak`` is `None` then the position of the
maximum value in the input ``data`` will be used as the initial
guess.
fit_boxsize : int or tuple of int, optional
The size (in pixels) of the box used to define the fitting
region. If ``fit_boxsize`` has two elements, they should be in
``(ny, nx)`` order. If ``fit_boxsize`` is a scalar then a square
box of size ``fit_boxsize`` will be used.
search_boxsize : int or tuple of int, optional
The size (in pixels) of the box used to search for the maximum
pixel value if ``xpeak`` and ``ypeak`` are both specified. If
``fit_boxsize`` has two elements, they should be in ``(ny,
nx)`` order. If ``fit_boxsize`` is a scalar then a square box
of size ``fit_boxsize`` will be used. This parameter is ignored
if either ``xpeak`` or ``ypeak`` is `None`. In that case, the
entire array is search for the maximum value.
mask : bool `~numpy.ndarray`, optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked data are excluded from calculations.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
Notes
-----
Use ``fit_boxsize = (3, 3)`` to match the work of `Vakili &
Hogg (2016) <https://arxiv.org/abs/1610.05873>`_ for their 2D
second-order polynomial centroiding method.
References
----------
.. [1] Vakili and Hogg 2016; arXiv:1610.05873
(https://arxiv.org/abs/1610.05873)
"""
if ((xpeak is None and ypeak is not None)
or (xpeak is not None and ypeak is None)):
raise ValueError('xpeak and ypeak must both be input or "None"')
if xpeak is not None and ((xpeak < 0) or (xpeak > data.shape[1] - 1)):
raise ValueError('xpeak is outside of the input data')
if ypeak is not None and ((ypeak < 0) or (ypeak > data.shape[0] - 1)):
raise ValueError('ypeak is outside of the input data')
data = np.asanyarray(data, dtype=float).copy()
ny, nx = data.shape
badmask = ~np.isfinite(data)
if np.any(badmask):
warnings.warn('Input data contains non-finite values (e.g., NaN or '
'inf) that were automatically masked.',
AstropyUserWarning)
data[badmask] = np.nan
if mask is not None:
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data[mask] = np.nan
fit_boxsize = _process_boxsize(fit_boxsize, data.shape)
if np.product(fit_boxsize) < 6:
raise ValueError('fit_boxsize is too small. 6 values are required '
'to fit a 2D quadratic polynomial.')
if xpeak is None or ypeak is None:
yidx, xidx = np.unravel_index(np.nanargmax(data), data.shape)
else:
xidx = _py2intround(xpeak)
yidx = _py2intround(ypeak)
if search_boxsize is not None:
search_boxsize = _process_boxsize(search_boxsize, data.shape)
slc_data, _ = overlap_slices(data.shape, search_boxsize,
(yidx, xidx), mode='trim')
cutout = data[slc_data]
yidx, xidx = np.unravel_index(np.nanargmax(cutout), cutout.shape)
xidx += slc_data[1].start
yidx += slc_data[0].start
# if peak is at the edge of the data, return the position of the maximum
if xidx == 0 or xidx == nx - 1 or yidx == 0 or yidx == ny - 1:
warnings.warn('maximum value is at the edge of the data and its '
'position was returned; no quadratic fit was '
'performed', AstropyUserWarning)
return np.array((xidx, yidx), dtype=float)
# extract the fitting region
slc_data, _ = overlap_slices(data.shape, fit_boxsize, (yidx, xidx),
mode='trim')
xidx0, xidx1 = (slc_data[1].start, slc_data[1].stop)
yidx0, yidx1 = (slc_data[0].start, slc_data[0].stop)
# shift the fitting box if it was clipped by the data edge
if (xidx1 - xidx0) < fit_boxsize[1]:
if xidx0 == 0:
xidx1 = min(nx, xidx0 + fit_boxsize[1])
if xidx1 == nx:
xidx0 = max(0, xidx1 - fit_boxsize[1])
if (yidx1 - yidx0) < fit_boxsize[0]:
if yidx0 == 0:
yidx1 = min(ny, yidx0 + fit_boxsize[0])
if yidx1 == ny:
yidx0 = max(0, yidx1 - fit_boxsize[0])
cutout = data[yidx0:yidx1, xidx0:xidx1].ravel()
if np.count_nonzero(~np.isnan(cutout)) < 6:
warnings.warn('at least 6 unmasked data points are required to '
'perform a 2D quadratic fit',
AstropyUserWarning)
return np.array((np.nan, np.nan))
# fit a 2D quadratic polynomial to the fitting region
xi = np.arange(xidx0, xidx1)
yi =
|
np.arange(yidx0, yidx1)
|
numpy.arange
|
import os
import numpy as np
import pickle
from tensorrtserver.api import *
import cv2
import time
import json
import xlrd
import json
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
def softmax(x):
"""
Compute the softmax function for each row of the input x.
Arguments:
x -- A N dimensional vector or M x N dimensional numpy matrix.
Return:
x -- You are allowed to modify x in-place
"""
orig_shape = x.shape
if len(x.shape) > 1:
# Matrix
exp_minmax = lambda x: np.exp(x - np.max(x))
denom = lambda x: 1.0 / np.sum(x)
x = np.apply_along_axis(exp_minmax, 1, x)
denominator = np.apply_along_axis(denom, 1, x)
if len(denominator.shape) == 1:
denominator = denominator.reshape((denominator.shape[0], 1))
x = x * denominator
else:
# Vector
x_max = np.max(x)
x = x - x_max
numerator = np.exp(x)
denominator = 1.0 / np.sum(numerator)
x = numerator.dot(denominator)
assert x.shape == orig_shape
return x
def plot_roc(y_true, y_pred):
log = []
from sklearn import metrics
def calc_metrics_table(y_true, y_pred, thresholds):
metrics_list = list()
for threshold in thresholds:
y_pred_binary = np.zeros(y_pred.shape, dtype=np.uint8)
y_pred_binary[y_pred>threshold] = 1
tn, fp, fn, tp = metrics.confusion_matrix(y_true, y_pred_binary).ravel()
print('tn:{:.3f}\tfp:{:.3f}\tfn:{:.3f}\ttp:{:.3f}\t'.format(tn, fp, fn, tp))
accuracy = (tp+tn)/(tn+fp+fn+tp)
sensitivity = tp/(tp+fn)
specificity = tn/(fp+tn)
ppv = tp/(tp+fp)
npv = tn/(tn+fn)
metrics_list.append([threshold, accuracy, sensitivity, specificity, ppv, npv])
metrics_table = pd.DataFrame(np.array(metrics_list), columns=['threshold','accuracy','sensitivity','specificity','ppv','npv'])
return metrics_table
fpr, tpr, thres = metrics.roc_curve(y_true, y_pred)
auc = metrics.auc(fpr, tpr)
thresholds = np.arange(0.05, 1., 0.05)
metrics_table = calc_metrics_table(y_true, y_pred, thresholds)
print('AUC:%.4f'% auc)
log.append('AUC:%.4f'% auc)
plt.title('roc curve')
plt.plot(fpr, tpr, 'r')
plt.xlabel('fpr')
plt.ylabel('tpr')
plt.xticks(np.arange(0, 1.1, step=0.1))
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.grid(ls='--')
# plt.show()
print(metrics_table)
log.append(metrics_table)
return log
def get_img(infile):
img = cv2.imread(infile, cv2.IMREAD_GRAYSCALE)
img = np.array(img, dtype=np.float32)
img = img/255
img =
|
np.expand_dims(img, 0)
|
numpy.expand_dims
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, GRUCell
import sys
class hierarchical_attention_network(object):
'''
hierarchical attention network for document classification
https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf
parameters:
- embedding_matrix: numpy array
numpy array of word embeddings
each row should represent a word embedding
NOTE: the word index 0 is dropped, so the first row is ignored
- num classes: int
number of output classes
- max_sents: int
maximum number of sentences per document
- max_words: int
maximum number of words per sentence
- rnn_type: string (default: "gru")
rnn cells to use, can be "gru" or "lstm"
- rnn_units: int (default: 200)
number of rnn units to use for embedding layers
- attention_size: int (default: 300)
number of dimensions to use for attention context layer
- dropout_keep: float (default: 0.5)
dropout keep rate for final softmax layer
methods:
- train(data,labels,epochs=30,savebest=False,filepath=None)
train network on given data
- predict(data)
return the one-hot-encoded predicted labels for given data
- score(data,labels,bootstrap=False,bs_samples=100)
return the accuracy of predicted labels on given data
- save(filepath)
save the model weights to a file
- load(filepath)
load model weights from a file
'''
def __init__(self,embedding_matrix,num_classes,max_sents,max_words,rnn_type="gru",
rnn_units=200,attention_size=300,dropout_keep=0.5):
self.rnn_units = rnn_units
if rnn_type == "gru":
self.rnn_cell = GRUCell
elif rnn_type == "lstm":
self.rnn_cell = LSTMCell
else:
raise Exception("rnn_type parameter must be set to gru or lstm")
self.dropout_keep = dropout_keep
self.dropout = tf.placeholder(tf.float32)
self.ms = max_sents
self.mw = max_words
#doc input and mask
self.doc_input = tf.placeholder(tf.int32, shape=[max_sents,max_words])
words_per_line = tf.reduce_sum(tf.sign(self.doc_input),1)
num_lines = tf.reduce_sum(tf.sign(words_per_line))
max_words_ = tf.reduce_max(words_per_line)
doc_input_reduced = self.doc_input[:num_lines,:max_words_]
num_words = words_per_line[:num_lines]
#word rnn layer
word_embeds = tf.gather(tf.get_variable('embeddings',initializer=
embedding_matrix.astype(np.float32),dtype=tf.float32),doc_input_reduced)
with tf.variable_scope('words'):
[word_outputs_fw,word_outputs_bw],_ = \
tf.nn.bidirectional_dynamic_rnn(
self.rnn_cell(self.rnn_units),self.rnn_cell(self.rnn_units),
word_embeds,sequence_length=num_words,dtype=tf.float32)
word_outputs = tf.concat((word_outputs_fw, word_outputs_bw),2)
#word attention
seq_mask = tf.reshape(tf.sequence_mask(num_words,max_words_),[-1])
word_u = tf.layers.dense(tf.reshape(word_outputs,[-1,self.rnn_units*2]),attention_size,tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
word_exps = tf.layers.dense(word_u,1,tf.exp,False,kernel_initializer=tf.contrib.layers.xavier_initializer())
word_exps = tf.where(seq_mask,word_exps,tf.ones_like(word_exps)*0.000000001)
word_alpha = tf.reshape(word_exps,[-1,max_words_,1])
word_alpha /= tf.reshape(tf.reduce_sum(word_alpha,1),[-1,1,1])
sent_embeds = tf.reduce_sum(word_outputs*word_alpha,1)
sent_embeds = tf.expand_dims(sent_embeds,0)
#sentence rnn layer
with tf.variable_scope('sentence'):
[sent_outputs_fw,sent_outputs_bw],_ = \
tf.nn.bidirectional_dynamic_rnn(
self.rnn_cell(self.rnn_units),self.rnn_cell(self.rnn_units),
sent_embeds,sequence_length=tf.expand_dims(num_lines,0),dtype=tf.float32)
sent_outputs = tf.concat((tf.squeeze(sent_outputs_fw,[0]),tf.squeeze(sent_outputs_bw,[0])),1)
#sentence attention
sent_u = tf.layers.dense(sent_outputs,attention_size,tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
sent_exp = tf.layers.dense(sent_u,1,tf.exp,False,kernel_initializer=tf.contrib.layers.xavier_initializer())
sent_atten = sent_exp/tf.reduce_sum(sent_exp)
doc_embed = tf.transpose(tf.matmul(tf.transpose(sent_outputs),sent_atten))
#classification functions
logits = tf.layers.dense(doc_embed,num_classes,kernel_initializer=tf.orthogonal_initializer())
self.prediction = tf.nn.softmax(logits)
#loss, accuracy, and training functions
self.labels = tf.placeholder(tf.float32, shape=[num_classes])
self.labels_rs = tf.expand_dims(self.labels,0)
self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=self.labels_rs)
self.optimizer = tf.train.AdamOptimizer(0.0001,0.9,0.99).minimize(self.loss)
#init op
self.init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(self.init_op)
def _list_to_numpy(self,inputval):
'''
convert variable length lists of input values to zero padded numpy array
'''
if type(inputval) == list:
retval = np.zeros((self.ms,self.mw))
for i,line in enumerate(inputval):
for j, word in enumerate(line):
retval[i,j] = word
return retval
elif type(inputval) == np.ndarray:
return inputval
else:
raise Exception("invalid input type")
def train(self,data,labels,epochs=30,validation_data=None,savebest=False,filepath=None):
'''
train network on given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- labels: numpy array
2d numpy array of one-hot-encoded labels
- epochs: int (default: 30)
number of epochs to train for
- validation_data: tuple (optional)
tuple of numpy arrays (X,y) representing validation data
- savebest: boolean (default: False)
set to True to save the best model based on validation score per epoch
- filepath: string (optional)
path to save model if savebest is set to True
outputs:
None
'''
if savebest==True and filepath==None:
raise Exception("Please enter a path to save the network")
if validation_data:
validation_size = len(validation_data[0])
else:
validation_size = len(data)
print('training network on %i documents, validating on %i documents' \
% (len(data), validation_size))
#track best model for saving
prevbest = 0
for i in range(epochs):
correct = 0.
#train
for doc in range(len(data)):
inputval = self._list_to_numpy(data[doc])
feed_dict = {self.doc_input:inputval,self.labels:labels[doc],self.dropout:self.dropout_keep}
pred,cost,_ = self.sess.run([self.prediction,self.loss,self.optimizer],feed_dict=feed_dict)
if np.argmax(pred) ==
|
np.argmax(labels[doc])
|
numpy.argmax
|
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
class Network:
HEIGHT, WIDTH = 28, 28
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args):
self.z_dim = args.z_dim
with self.session.graph.as_default():
if args.recodex:
tf.get_variable_scope().set_initializer(tf.glorot_uniform_initializer(seed=42))
# Inputs
self.images = tf.placeholder(tf.float32, [None, self.HEIGHT, self.WIDTH, 1])
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
# Generator
def generator(z):
# Define a generator as a sequence of:
# - dense layer with 128 neurons and ReLU activation
# - dense layer with as many neurons as there are pixels in an image
# with sigmoid activation.
#
# Consider the output of the last hidden layer to be the logits of
# individual pixels. Reshape them into a correct shape for a grayscale
# image of size self.WIDTH x self.HEIGHT and return them.
x = tf.layers.dense(z, 128, activation=tf.nn.relu)
x = tf.layers.dense(x, self.HEIGHT * self.WIDTH, activation=tf.sigmoid)
return tf.reshape(x, [-1, self.HEIGHT, self.WIDTH, 1])
with tf.variable_scope("generator"):
# Define `self.generated_images` as a result of `generator` applied to `self.z`.
self.generated_images = generator(self.z)
# Discriminator
def discriminator(image):
# Define a discriminator as a sequence of:
# - flattening layer
# - dense layer with 128 neurons and ReLU activation
# - dense layer with 1 neuron without activation
#
# Consider the last hidden layer output to be the logit of whether the input
# images comes from real data. Change its shape to remove the last dimension
# (i.e., [batch_size] instead of [batch_size, 1]) and return it.
x = tf.layers.flatten(image)
x = tf.layers.dense(x, 128, activation=tf.nn.relu)
x = tf.layers.dense(x, 1)
return tf.squeeze(x)
with tf.variable_scope("discriminator"):
# Define `discriminator_logit_real` as a result of
# `discriminator` applied to `self.images`.
discriminator_logit_real = discriminator(self.images)
with tf.variable_scope("discriminator", reuse = True):
# Define `discriminator_logit_fake` as a result of
# `discriminator` applied to `self.generated_images`.
#
# Note the discriminator is called in the same variable
# scope as several lines above -- it will try to utilize the
# same variables. In order to allow reusing them, we need to explicitly
# pass the `reuse=True` flag.
discriminator_logit_fake = discriminator(self.generated_images)
# Losses
# Define `self.discriminator_loss` as a sum of
# - sigmoid cross entropy loss with gold labels of ones (1.0) and discriminator_logit_real
# - sigmoid cross entropy loss with gold labels of zeros (0.0) and discriminator_logit_fake
self.discriminator_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(discriminator_logit_real),
discriminator_logit_real) + \
tf.losses.sigmoid_cross_entropy(tf.zeros_like(discriminator_logit_fake),
discriminator_logit_fake)
# Define `self.generator_loss` as a sigmoid cross entropy
# loss with gold labels of ones (1.0) and discriminator_logit_fake.
self.generator_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(discriminator_logit_fake),
discriminator_logit_fake)
# Training
global_step = tf.train.create_global_step()
# Create `self.discriminator_training` as an AdamOptimizer.minimize
# for discriminator_loss and variables in the "discriminator" namespace using
# the option var_list=tf.global_variables("discriminator").
# Do *not* pass global_step as argument to AdamOptimizer.minimize.
self.discriminator_training = tf.train.AdamOptimizer().minimize(self.discriminator_loss,
var_list=tf.global_variables("discriminator"))
# Create `self.generator_training` as an AdamOptimizer.minimize
# for generator_loss and variables in "generator" namespace.
# This time *do* pass global_step as argument to AdamOptimizer.minimize.
self.generator_training = tf.train.AdamOptimizer().minimize(self.generator_loss,
global_step=global_step,
var_list=tf.global_variables("generator"))
# Summaries
discriminator_accuracy = tf.reduce_mean(tf.to_float(tf.concat([
tf.greater(discriminator_logit_real, 0), tf.less(discriminator_logit_fake, 0)], axis=0)))
summary_writer = tf.contrib.summary.create_file_writer(args.logdir, flush_millis=10 * 1000)
with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(100):
self.discriminator_summary = [tf.contrib.summary.scalar("gan/discriminator_loss", self.discriminator_loss),
tf.contrib.summary.scalar("gan/discriminator_accuracy", discriminator_accuracy)]
self.generator_summary = tf.contrib.summary.scalar("gan/generator_loss", self.generator_loss)
self.generated_image_data = tf.placeholder(tf.float32, [None, None, 1])
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
self.generated_image_summary = tf.contrib.summary.image("gan/generated_image",
tf.expand_dims(self.generated_image_data, axis=0))
# Initialize variables
self.session.run(tf.global_variables_initializer())
with summary_writer.as_default():
tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
def sample_z(self, batch_size):
# Return uniform random noise in -1, 1 range using `np.random.uniform`
# call, with shape [batch_size, self.z_dim].
return
|
np.random.uniform(-1, 1, [batch_size, self.z_dim])
|
numpy.random.uniform
|
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import numpy as np
import numpy.matlib
import isceobj
logger = logging.getLogger('isce.alos2insar.runIonFilt')
def runIonFilt(self):
'''compute and filter ionospheric phase
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
if not self.doIon:
catalog.printToLog(logger, "runIonFilt")
self._insar.procDoc.addAllFromCatalog(catalog)
return
masterTrack = self._insar.loadTrack(master=True)
slaveTrack = self._insar.loadTrack(master=False)
from isceobj.Alos2Proc.runIonSubband import defineIonDir
ionDir = defineIonDir()
subbandPrefix = ['lower', 'upper']
ionCalDir = os.path.join(ionDir['ion'], ionDir['ionCal'])
os.makedirs(ionCalDir, exist_ok=True)
os.chdir(ionCalDir)
############################################################
# STEP 1. compute ionospheric phase
############################################################
from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
###################################
#SET PARAMETERS HERE
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
corThresholdAdj = 0.97
corOrderAdj = 20
###################################
print('\ncomputing ionosphere')
#get files
ml2 = '_{}rlks_{}alks'.format(self._insar.numberRangeLooks1*self._insar.numberRangeLooksIon,
self._insar.numberAzimuthLooks1*self._insar.numberAzimuthLooksIon)
lowerUnwfile = subbandPrefix[0]+ml2+'.unw'
upperUnwfile = subbandPrefix[1]+ml2+'.unw'
corfile = 'diff'+ml2+'.cor'
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(lowerUnwfile + '.xml')
width = img.width
length = img.length
lowerUnw = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
upperUnw = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
#amp = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
#masked out user-specified areas
if self.maskedAreasIon != None:
maskedAreas = reformatMaskedAreas(self.maskedAreasIon, length, width)
for area in maskedAreas:
lowerUnw[area[0]:area[1], area[2]:area[3]] = 0
upperUnw[area[0]:area[1], area[2]:area[3]] = 0
cor[area[0]:area[1], area[2]:area[3]] = 0
#remove possible wired values in coherence
cor[np.nonzero(cor<0)] = 0.0
cor[np.nonzero(cor>1)] = 0.0
#compute ionosphere
fl = SPEED_OF_LIGHT / self._insar.subbandRadarWavelength[0]
fu = SPEED_OF_LIGHT / self._insar.subbandRadarWavelength[1]
adjFlag = 1
cor[np.nonzero(cor<corThresholdAdj)] = 0.0
ionos = computeIonosphere(lowerUnw, upperUnw, cor**corOrderAdj, fl, fu, adjFlag, 0)
#dump ionosphere
ionfile = 'ion'+ml2+'.ion'
# ion = np.zeros((length*2, width), dtype=np.float32)
# ion[0:length*2:2, :] = amp
# ion[1:length*2:2, :] = ionos
# ion.astype(np.float32).tofile(ionfile)
# img.filename = ionfile
# img.extraFilename = ionfile + '.vrt'
# img.renderHdr()
ionos.astype(np.float32).tofile(ionfile)
create_xml(ionfile, width, length, 'float')
############################################################
# STEP 2. filter ionospheric phase
############################################################
#################################################
#SET PARAMETERS HERE
#if applying polynomial fitting
#False: no fitting, True: with fitting
fit = self.fitIon
#gaussian filtering window size
size_max = self.filteringWinsizeMaxIon
size_min = self.filteringWinsizeMinIon
if size_min >= size_max:
print('\n\nWARNING: minimum window size for filtering ionosphere phase {} >= maximum window size {}'.format(size_min, size_max))
print(' resetting maximum window size to {}\n\n'.format(size_min+5))
size_max = size_min + 5
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
#corThresholdFit = 0.85
#Now changed to use lower band coherence. crl, 23-apr-2020.
useDiffCoherence = False
if useDiffCoherence:
#parameters for using diff coherence
corfile = 'diff'+ml2+'.cor'
corThresholdFit = 0.95
# 1 is not good for low coherence case, changed to 20
#corOrderFit = 1
corOrderFit = 20
corOrderFilt = 20
else:
#parameters for using lower/upper band coherence
corfile = subbandPrefix[0]+ml2+'.cor'
corThresholdFit = 0.4
corOrderFit = 10
corOrderFilt = 10
#################################################
print('\nfiltering ionosphere')
ionfile = 'ion'+ml2+'.ion'
#corfile = 'diff'+ml2+'.cor'
ionfiltfile = 'filt_ion'+ml2+'.ion'
img = isceobj.createImage()
img.load(ionfile + '.xml')
width = img.width
length = img.length
#ion = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
ion = np.fromfile(ionfile, dtype=np.float32).reshape(length, width)
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
#amp = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
#masked out user-specified areas
if self.maskedAreasIon != None:
maskedAreas = reformatMaskedAreas(self.maskedAreasIon, length, width)
for area in maskedAreas:
ion[area[0]:area[1], area[2]:area[3]] = 0
cor[area[0]:area[1], area[2]:area[3]] = 0
#remove possible wired values in coherence
cor[np.nonzero(cor<0)] = 0.0
cor[np.nonzero(cor>1)] = 0.0
# #applying water body mask here
# waterBodyFile = 'wbd'+ml2+'.wbd'
# if os.path.isfile(waterBodyFile):
# print('applying water body mask to coherence used to compute ionospheric phase')
# wbd = np.fromfile(waterBodyFile, dtype=np.int8).reshape(length, width)
# cor[np.nonzero(wbd!=0)] = 0.00001
if fit:
import copy
wgt = copy.deepcopy(cor)
wgt[np.nonzero(wgt<corThresholdFit)] = 0.0
ion_fit = weight_fitting(ion, wgt**corOrderFit, width, length, 1, 1, 1, 1, 2)
ion -= ion_fit * (ion!=0)
#minimize the effect of low coherence pixels
#cor[np.nonzero( (cor<0.85)*(cor!=0) )] = 0.00001
#filt = adaptive_gaussian(ion, cor, size_max, size_min)
#cor**14 should be a good weight to use. 22-APR-2018
filt = adaptive_gaussian(ion, cor**corOrderFilt, size_max, size_min)
if fit:
filt += ion_fit * (filt!=0)
# ion = np.zeros((length*2, width), dtype=np.float32)
# ion[0:length*2:2, :] = amp
# ion[1:length*2:2, :] = filt
# ion.astype(np.float32).tofile(ionfiltfile)
# img.filename = ionfiltfile
# img.extraFilename = ionfiltfile + '.vrt'
# img.renderHdr()
filt.astype(np.float32).tofile(ionfiltfile)
create_xml(ionfiltfile, width, length, 'float')
############################################################
# STEP 3. resample ionospheric phase
############################################################
from contrib.alos2proc_f.alos2proc_f import rect
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
from scipy.interpolate import interp1d
import shutil
#################################################
#SET PARAMETERS HERE
#interpolation method
interpolationMethod = 1
#################################################
print('\ninterpolate ionosphere')
ml3 = '_{}rlks_{}alks'.format(self._insar.numberRangeLooks1*self._insar.numberRangeLooks2,
self._insar.numberAzimuthLooks1*self._insar.numberAzimuthLooks2)
ionfiltfile = 'filt_ion'+ml2+'.ion'
#ionrectfile = 'filt_ion'+ml3+'.ion'
ionrectfile = self._insar.multilookIon
img = isceobj.createImage()
img.load(ionfiltfile + '.xml')
width2 = img.width
length2 = img.length
img = isceobj.createImage()
img.load(os.path.join('../../', ionDir['insar'], self._insar.multilookDifferentialInterferogram) + '.xml')
width3 = img.width
length3 = img.length
#number of range looks output
nrlo = self._insar.numberRangeLooks1*self._insar.numberRangeLooks2
#number of range looks input
nrli = self._insar.numberRangeLooks1*self._insar.numberRangeLooksIon
#number of azimuth looks output
nalo = self._insar.numberAzimuthLooks1*self._insar.numberAzimuthLooks2
#number of azimuth looks input
nali = self._insar.numberAzimuthLooks1*self._insar.numberAzimuthLooksIon
if (self._insar.numberRangeLooks2 != self._insar.numberRangeLooksIon) or \
(self._insar.numberAzimuthLooks2 != self._insar.numberAzimuthLooksIon):
#this should be faster using fortran
if interpolationMethod == 0:
rect(ionfiltfile, ionrectfile,
width2,length2,
width3,length3,
nrlo/nrli, 0.0,
0.0, nalo/nali,
(nrlo-nrli)/(2.0*nrli),
(nalo-nali)/(2.0*nali),
'REAL','Bilinear')
#finer, but slower method
else:
ionfilt = np.fromfile(ionfiltfile, dtype=np.float32).reshape(length2, width2)
index2 = np.linspace(0, width2-1, num=width2, endpoint=True)
index3 = np.linspace(0, width3-1, num=width3, endpoint=True) * nrlo/nrli + (nrlo-nrli)/(2.0*nrli)
ionrect = np.zeros((length3, width3), dtype=np.float32)
for i in range(length2):
f = interp1d(index2, ionfilt[i,:], kind='cubic', fill_value="extrapolate")
ionrect[i, :] = f(index3)
index2 = np.linspace(0, length2-1, num=length2, endpoint=True)
index3 = np.linspace(0, length3-1, num=length3, endpoint=True) * nalo/nali + (nalo-nali)/(2.0*nali)
for j in range(width3):
f = interp1d(index2, ionrect[0:length2, j], kind='cubic', fill_value="extrapolate")
ionrect[:, j] = f(index3)
ionrect.astype(np.float32).tofile(ionrectfile)
del ionrect
create_xml(ionrectfile, width3, length3, 'float')
os.rename(ionrectfile, os.path.join('../../insar', ionrectfile))
os.rename(ionrectfile+'.vrt', os.path.join('../../insar', ionrectfile)+'.vrt')
os.rename(ionrectfile+'.xml', os.path.join('../../insar', ionrectfile)+'.xml')
os.chdir('../../insar')
else:
shutil.copyfile(ionfiltfile, os.path.join('../../insar', ionrectfile))
os.chdir('../../insar')
create_xml(ionrectfile, width3, length3, 'float')
#now we are in 'insar'
############################################################
# STEP 4. correct interferogram
############################################################
from isceobj.Alos2Proc.Alos2ProcPublic import renameFile
from isceobj.Alos2Proc.Alos2ProcPublic import runCmd
if self.applyIon:
print('\ncorrect interferogram')
if os.path.isfile(self._insar.multilookDifferentialInterferogramOriginal):
print('original interferogram: {} is already here, do not rename: {}'.format(self._insar.multilookDifferentialInterferogramOriginal, self._insar.multilookDifferentialInterferogram))
else:
print('renaming {} to {}'.format(self._insar.multilookDifferentialInterferogram, self._insar.multilookDifferentialInterferogramOriginal))
renameFile(self._insar.multilookDifferentialInterferogram, self._insar.multilookDifferentialInterferogramOriginal)
cmd = "imageMath.py -e='a*exp(-1.0*J*b)' --a={} --b={} -s BIP -t cfloat -o {}".format(
self._insar.multilookDifferentialInterferogramOriginal,
self._insar.multilookIon,
self._insar.multilookDifferentialInterferogram)
runCmd(cmd)
else:
print('\nionospheric phase estimation finished, but correction of interfeorgram not requested')
os.chdir('../')
catalog.printToLog(logger, "runIonFilt")
self._insar.procDoc.addAllFromCatalog(catalog)
def computeIonosphere(lowerUnw, upperUnw, wgt, fl, fu, adjFlag, dispersive):
'''
This routine computes ionosphere and remove the relative phase unwrapping errors
lowerUnw: lower band unwrapped interferogram
upperUnw: upper band unwrapped interferogram
wgt: weight
fl: lower band center frequency
fu: upper band center frequency
adjFlag: method for removing relative phase unwrapping errors
0: mean value
1: polynomial
dispersive: compute dispersive or non-dispersive
0: dispersive
1: non-dispersive
'''
#use image size from lower unwrapped interferogram
(length, width)=lowerUnw.shape
##########################################################################################
# ADJUST PHASE USING MEAN VALUE
# #ajust phase of upper band to remove relative phase unwrapping errors
# flag = (lowerUnw!=0)*(cor>=ionParam.corThresholdAdj)
# index = np.nonzero(flag!=0)
# mv = np.mean((lowerUnw - upperUnw)[index], dtype=np.float64)
# print('mean value of phase difference: {}'.format(mv))
# flag2 = (lowerUnw!=0)
# index2 = np.nonzero(flag2)
# #phase for adjustment
# unwd = ((lowerUnw - upperUnw)[index2] - mv) / (2.0*np.pi)
# unw_adj = np.around(unwd) * (2.0*np.pi)
# #ajust phase of upper band
# upperUnw[index2] += unw_adj
# unw_diff = lowerUnw - upperUnw
# print('after adjustment:')
# print('max phase difference: {}'.format(np.amax(unw_diff)))
# print('min phase difference: {}'.format(np.amin(unw_diff)))
##########################################################################################
#adjust phase using mean value
if adjFlag == 0:
flag = (lowerUnw!=0)*(wgt!=0)
index = np.nonzero(flag!=0)
mv = np.mean((lowerUnw - upperUnw)[index], dtype=np.float64)
print('mean value of phase difference: {}'.format(mv))
diff = mv
#adjust phase using a surface
else:
diff = weight_fitting(lowerUnw - upperUnw, wgt, width, length, 1, 1, 1, 1, 2)
flag2 = (lowerUnw!=0)
index2 = np.nonzero(flag2)
#phase for adjustment
unwd = ((lowerUnw - upperUnw) - diff)[index2] / (2.0*np.pi)
unw_adj =
|
np.around(unwd)
|
numpy.around
|
import numpy as np
import scipy as sp
def mask_sparse_matrix_by_rows(M, row_mask):
M_masked = sp.sparse.coo_matrix(M.shape, dtype=M.dtype)
if np.sum(row_mask):
M_coo = M.tocoo()
entries_to_keep = row_mask[M_coo.row]
M_masked = sp.sparse.coo_matrix((M_coo.data[entries_to_keep], (M_coo.row[entries_to_keep], M_coo.col[entries_to_keep])), shape=M.shape, dtype=M.dtype)
return M_masked
def mask_sparse_matrix_by_columns(M, column_mask):
M_masked = sp.sparse.coo_matrix(M.shape, dtype=M.dtype)
if
|
np.sum(column_mask)
|
numpy.sum
|
# Prototype, showing that threads share memory effectively between each other
from threading import Thread
from queue import Queue
import numpy as np
import time
MBs = 20000
size = MBs // 4
def produce(array, q: Queue):
while True:
#array = q.get()
time.sleep(0.2)
q.put(array[size//4:(size*3)//4])
def process(q: Queue):
while True:
array = q.get()
print(array.mean())
def main():
n = 4
array =
|
np.ones((size, 1024, 1024), dtype=np.float32)
|
numpy.ones
|
"""
Test the nifti_masker module
Functions in this file only test features added by the NiftiMasker class,
not the underlying functions used (e.g. clean()). See test_masking.py and
test_signal.py for this.
"""
# Author: <NAME>, <NAME>
# License: simplified BSD
import os
import shutil
from tempfile import mkdtemp
import nibabel
import numpy as np
import pytest
from nibabel import Nifti1Image
from numpy.testing import assert_array_equal
from nilearn._utils import testing
from nilearn._utils import data_gen
from nilearn._utils.class_inspect import get_params
from nilearn._utils.exceptions import DimensionError
from nilearn.image import index_img
from nilearn.maskers import NiftiMasker
from nilearn.maskers.nifti_masker import _filter_and_mask
from nilearn.image import get_data
def test_auto_mask():
# This mostly a smoke test
data = np.zeros((9, 9, 9))
data[3:-3, 3:-3, 3:-3] = 10
img = Nifti1Image(data, np.eye(4))
masker = NiftiMasker()
# Smoke test the fit
masker.fit(img)
# Smoke test the transform
# With a 4D img
masker.transform([img, ])
# With a 3D img
masker.transform(img)
# check exception when transform() called without prior fit()
masker2 = NiftiMasker(mask_img=img)
with pytest.raises(ValueError, match='has not been fitted. '):
masker2.transform(img)
def test_detrend():
# Check that detrending doesn't do something stupid with 3D images
data = np.zeros((9, 9, 9))
data[3:-3, 3:-3, 3:-3] = 10
img = Nifti1Image(data, np.eye(4))
mask = data.astype(np.int)
mask_img = Nifti1Image(mask, np.eye(4))
masker = NiftiMasker(mask_img=mask_img, detrend=True)
# Smoke test the fit
X = masker.fit_transform(img)
assert np.any(X != 0)
def test_resample():
# Check that target_affine triggers the right resampling
data = np.zeros((9, 9, 9))
data[3:-3, 3:-3, 3:-3] = 10
img = Nifti1Image(data, np.eye(4))
mask = data.astype(np.int)
mask_img = Nifti1Image(mask, np.eye(4))
masker = NiftiMasker(mask_img=mask_img, target_affine=2 * np.eye(3))
# Smoke test the fit
X = masker.fit_transform(img)
assert np.any(X != 0)
def test_with_files():
# Standard masking
data = np.zeros((40, 40, 40, 2))
data[20, 20, 20] = 1
data_img = Nifti1Image(data, np.eye(4))
with testing.write_tmp_imgs(data_img) as filename:
masker = NiftiMasker()
masker.fit(filename)
masker.transform(filename)
def test_nan():
data = np.ones((9, 9, 9))
data[0] = np.nan
data[:, 0] = np.nan
data[:, :, 0] = np.nan
data[-1] = np.nan
data[:, -1] = np.nan
data[:, :, -1] = np.nan
data[3:-3, 3:-3, 3:-3] = 10
img = Nifti1Image(data, np.eye(4))
masker = NiftiMasker(mask_args=dict(opening=0))
masker.fit(img)
mask = get_data(masker.mask_img_)
assert mask[1:-1, 1:-1, 1:-1].all()
assert not mask[0].any()
assert not mask[:, 0].any()
assert not mask[:, :, 0].any()
assert not mask[-1].any()
assert not mask[:, -1].any()
assert not mask[:, :, -1].any()
def test_matrix_orientation():
"""Test if processing is performed along the correct axis."""
# the "step" kind generate heavyside-like signals for each voxel.
# all signals being identical, standardizing along the wrong axis
# would leave a null signal. Along the correct axis, the step remains.
fmri, mask = data_gen.generate_fake_fmri(shape=(40, 41, 42), kind="step")
masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True)
timeseries = masker.fit_transform(fmri)
assert(timeseries.shape[0] == fmri.shape[3])
assert(timeseries.shape[1] == get_data(mask).sum())
std = timeseries.std(axis=0)
assert(std.shape[0] == timeseries.shape[1]) # paranoid
assert(not np.any(std < 0.1))
# Test inverse transform
masker = NiftiMasker(mask_img=mask, standardize=False, detrend=False)
masker.fit()
timeseries = masker.transform(fmri)
recovered = masker.inverse_transform(timeseries)
np.testing.assert_array_almost_equal(get_data(recovered), get_data(fmri))
def test_mask_3d():
# Dummy mask
data = np.zeros((40, 40, 40, 2))
data[20, 20, 20] = 1
data_img = Nifti1Image(data, np.eye(4))
with testing.write_tmp_imgs(data_img, create_files=True)\
as filename:
masker = NiftiMasker(mask_img=filename)
pytest.raises(TypeError, masker.fit)
def test_mask_4d():
# Dummy mask
mask = np.zeros((10, 10, 10), dtype=int)
mask[3:7, 3:7, 3:7] = 1
mask_bool = mask.astype(bool)
mask_img = Nifti1Image(mask, np.eye(4))
# Dummy data
data = np.zeros((10, 10, 10, 5), dtype=int)
data[..., 0] = 1
data[..., 1] = 2
data[..., 2] = 3
data_img_4d = Nifti1Image(data, np.eye(4))
data_imgs = [index_img(data_img_4d, 0), index_img(data_img_4d, 1),
index_img(data_img_4d, 2)]
# check whether transform is indeed selecting niimgs subset
sample_mask = np.array([0, 2])
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
data_trans = masker.transform(data_imgs, sample_mask=sample_mask)
data_trans_img = index_img(data_img_4d, sample_mask)
data_trans_direct = get_data(data_trans_img)[mask_bool, :]
data_trans_direct = np.swapaxes(data_trans_direct, 0, 1)
assert_array_equal(data_trans, data_trans_direct)
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
data_trans2 = masker.transform(data_img_4d, sample_mask=sample_mask)
assert_array_equal(data_trans2, data_trans_direct)
diff_sample_mask = np.array([2, 4])
data_trans_img_diff = index_img(data_img_4d, diff_sample_mask)
data_trans_direct_diff = get_data(data_trans_img_diff)[mask_bool, :]
data_trans_direct_diff = np.swapaxes(data_trans_direct_diff, 0, 1)
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
data_trans3 = masker.transform(
data_img_4d, sample_mask=diff_sample_mask
)
assert_array_equal(data_trans3, data_trans_direct_diff)
def test_4d_single_scan():
mask = np.zeros((10, 10, 10))
mask[3:7, 3:7, 3:7] = 1
mask_img = Nifti1Image(mask, np.eye(4))
# Test that, in list of 4d images with last dimension=1, they are
# considered as 3d
rng = np.random.RandomState(42)
data_5d = [rng.random_sample((10, 10, 10, 1)) for i in range(5)]
data_4d = [d[..., 0] for d in data_5d]
data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d]
data_4d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_4d]
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
data_trans_5d = masker.transform(data_5d)
data_trans_4d = masker.transform(data_4d)
assert_array_equal(data_trans_4d, data_trans_5d)
def test_5d():
mask = np.zeros((10, 10, 10))
mask[3:7, 3:7, 3:7] = 1
mask_img = Nifti1Image(mask, np.eye(4))
# Test that, in list of 4d images with last dimension=1, they are
# considered as 3d
rng = np.random.RandomState(42)
data_5d = [rng.random_sample((10, 10, 10, 3)) for i in range(5)]
data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d]
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
with pytest.raises(
DimensionError,
match="Input data has incompatible dimensionality: "
"Expected dimension is 4D and you provided "
"a list of 4D images \\(5D\\)."):
masker.transform(data_5d)
def test_sessions():
# Test the sessions vector
data = np.ones((40, 40, 40, 4))
# Create a border, so that the masking work well
data[0] = 0
data[-1] = 0
data[:, -1] = 0
data[:, 0] = 0
data[..., -1] = 0
data[..., 0] = 0
data[20, 20, 20] = 1
data_img = Nifti1Image(data, np.eye(4))
masker = NiftiMasker(runs=np.ones(3, dtype=np.int))
pytest.raises(ValueError, masker.fit_transform, data_img)
def test_joblib_cache():
from joblib import hash, Memory
mask = np.zeros((40, 40, 40))
mask[20, 20, 20] = 1
mask_img = Nifti1Image(mask, np.eye(4))
with testing.write_tmp_imgs(mask_img, create_files=True) as filename:
masker = NiftiMasker(mask_img=filename)
masker.fit()
mask_hash = hash(masker.mask_img_)
get_data(masker.mask_img_)
assert mask_hash == hash(masker.mask_img_)
# Test a tricky issue with memmapped joblib.memory that makes
# imgs return by inverse_transform impossible to save
cachedir = mkdtemp()
try:
masker.memory = Memory(location=cachedir, mmap_mode='r',
verbose=0)
X = masker.transform(mask_img)
# inverse_transform a first time, so that the result is cached
out_img = masker.inverse_transform(X)
out_img = masker.inverse_transform(X)
out_img.to_filename(os.path.join(cachedir, 'test.nii'))
finally:
# enables to delete "filename" on windows
del masker
shutil.rmtree(cachedir, ignore_errors=True)
def test_mask_strategy_errors():
# Error with unknown mask_strategy
mask = NiftiMasker(mask_strategy='oops')
with pytest.raises(
ValueError,
match="Unknown value of mask_strategy 'oops'"):
mask.fit()
# Warning with deprecated 'template' strategy
img = np.random.RandomState(42).uniform(size=(9, 9, 5))
img = Nifti1Image(img, np.eye(4))
mask = NiftiMasker(mask_strategy='template')
with pytest.warns(UserWarning,
match="Masking strategy 'template' is deprecated."):
mask.fit(img)
def test_compute_epi_mask():
# Taken from test_masking.py, but used to test that the masker class
# is passing parameters appropriately.
mean_image = np.ones((9, 9, 3))
mean_image[3:-2, 3:-2, :] = 10
mean_image[5, 5, :] = 11
mean_image = Nifti1Image(mean_image.astype(float), np.eye(4))
masker = NiftiMasker(mask_strategy='epi',
mask_args=dict(opening=False))
masker.fit(mean_image)
mask1 = masker.mask_img_
masker2 = NiftiMasker(mask_strategy='epi',
mask_args=dict(opening=False, exclude_zeros=True))
masker2.fit(mean_image)
mask2 = masker2.mask_img_
# With an array with no zeros, exclude_zeros should not make
# any difference
np.testing.assert_array_equal(get_data(mask1), get_data(mask2))
# Check that padding with zeros does not change the extracted mask
mean_image2 = np.zeros((30, 30, 3))
mean_image2[3:12, 3:12, :] = get_data(mean_image)
mean_image2 = Nifti1Image(mean_image2, np.eye(4))
masker3 = NiftiMasker(mask_strategy='epi',
mask_args=dict(opening=False, exclude_zeros=True))
masker3.fit(mean_image2)
mask3 = masker3.mask_img_
np.testing.assert_array_equal(get_data(mask1),
get_data(mask3)[3:12, 3:12])
# However, without exclude_zeros, it does
masker4 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False))
masker4.fit(mean_image2)
mask4 = masker4.mask_img_
assert not np.allclose(get_data(mask1),
get_data(mask4)[3:12, 3:12])
def _get_random_img(shape):
img = np.random.RandomState(42).uniform(size=shape)
return Nifti1Image(img, np.eye(4))
@pytest.fixture
def expected_mask(mask_args):
mask = np.zeros((9, 9, 5))
if mask_args == dict():
return mask
else:
mask[2:7, 2:7, 2] = 1
return mask
@pytest.mark.parametrize('strategy',
[f'{p}-template' for p in
['whole-brain', 'gm', 'wm']])
@pytest.mark.parametrize('mask_args',
[dict(), dict(threshold=0.)])
def test_compute_brain_mask(strategy, mask_args, expected_mask):
# Check masker for template masking strategy
img = _get_random_img((9, 9, 5))
masker = NiftiMasker(mask_strategy=strategy,
mask_args=mask_args)
masker.fit(img)
np.testing.assert_array_equal(get_data(masker.mask_img_),
expected_mask)
def test_filter_and_mask_error():
data = np.zeros([20, 30, 40, 5])
mask = np.zeros([20, 30, 40, 2])
mask[10, 15, 20, :] = 1
data_img = nibabel.Nifti1Image(data, np.eye(4))
mask_img = nibabel.Nifti1Image(mask, np.eye(4))
masker = NiftiMasker()
params = get_params(NiftiMasker, masker)
with pytest.raises(
DimensionError,
match="Input data has incompatible dimensionality: "
"Expected dimension is 3D and you provided "
"a 4D image."):
_filter_and_mask(data_img, mask_img, params)
def test_filter_and_mask():
data = np.zeros([20, 30, 40, 5])
mask = np.ones([20, 30, 40])
data_img = nibabel.Nifti1Image(data, np.eye(4))
mask_img = nibabel.Nifti1Image(mask, np.eye(4))
masker = NiftiMasker()
params = get_params(NiftiMasker, masker)
# Test return_affine = False
data = _filter_and_mask(data_img, mask_img, params)
assert data.shape == (5, 24000)
def test_dtype():
data_32 = np.zeros((9, 9, 9), dtype=np.float32)
data_64 = np.zeros((9, 9, 9), dtype=np.float64)
data_32[2:-2, 2:-2, 2:-2] = 10
data_64[2:-2, 2:-2, 2:-2] = 10
affine_32 = np.eye(4, dtype=np.float32)
affine_64 = np.eye(4, dtype=np.float64)
img_32 = Nifti1Image(data_32, affine_32)
img_64 = Nifti1Image(data_64, affine_64)
masker_1 = NiftiMasker(dtype='auto')
assert(masker_1.fit_transform(img_32).dtype == np.float32)
assert(masker_1.fit_transform(img_64).dtype == np.float32)
masker_2 = NiftiMasker(dtype='float64')
assert(masker_2.fit_transform(img_32).dtype == np.float64)
assert(masker_2.fit_transform(img_64).dtype == np.float64)
def test_standardization():
rng = np.random.RandomState(42)
data_shape = (9, 9, 5)
n_samples = 500
signals = rng.standard_normal(size=(np.prod(data_shape), n_samples))
means = rng.standard_normal(size=(np.prod(data_shape), 1)) * 50 + 1000
signals += means
img = Nifti1Image(signals.reshape(data_shape + (n_samples,)),
|
np.eye(4)
|
numpy.eye
|
'''
@author: <NAME>
@copyright: Copyright 2016-2019, <NAME>.
@license: MIT
@contact: <EMAIL>
'''
import numpy as np
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in
|
np.arange(0,nb_classes)
|
numpy.arange
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import numpy as np
import sympy as sym
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyqtgraph.dockarea import *
import serial
import time
import thread
import qdarkstyle
# Define the robot class
class robot:
def __init__(self,rp_vector):
try:
self.ser = serial.Serial('COM6', 9600)
except serial.serialutil.SerialException as e:
print("No serial connected")
else:
pass
finally:
pass
self.rp_vector = rp_vector
self.size = len(rp_vector)
self.joints = ["0"]*self.size
self.alpha = np.zeros(self.size)
self.a = np.zeros(self.size)
self.d = np.zeros(self.size)
self.theta = np.zeros(self.size)
self.T = np.zeros((4,4,self.size))
self.T_full = np.zeros((4,4))
self.joint_values = np.zeros(self.size+1)
self.joint_values_d = np.zeros(self.size)
self.O =
|
np.zeros((3,self.size+1))
|
numpy.zeros
|
import sys
import math
import numpy as np
## the radius of the earth
R = 6.371*10**6
def K0(x1,x2,pars):
"""
Args: x1,x2 are 1d numpy arrays with length 2
x1 = [a,b], with a as timestamp and b as latitude or longitude
pars, a list of parameters
Return: a scalar, a smi
"""
[l1,l2,l3,a1,a2,b1,b2,b3] = pars
k1 = np.exp(-abs(x1[0]-x2[0])/l1)*np.exp(-(np.sin(abs(x1[0]-x2[0])/86400*math.pi))**2/a1)
k2 = np.exp(-abs(x1[0]-x2[0])/l2)*np.exp(-(np.sin(abs(x1[0]-x2[0])/604800*math.pi))**2/a2)
k3 = np.exp(-abs(x1[1]-x2[1])/l3)
return b1*k1+b2*k2+b3*k3
## all the functions below are based on the equations in [Csato and Opper (2002)]
## ref: http://www.cs.ubbcluj.ro/~csatol/SOGP/thesis/Gaussian_Process.html#SECTION00521000000000000000
## ref: http://www.cs.ubbcluj.ro/~csatol/SOGP/thesis/Sparsity_in.html#cha:sparse
def update_K(bv,K,pars):
"""
similarity matrix between bv's
Args: bv, a list of basis vectors
K, 2d numpy array
Return: 2d numpy array
"""
if len(bv)==0:
mat = np.array([1])
else:
d = np.shape(K)[0]
row = np.ones(d)
column = np.ones([d+1,1])
for i in range(d):
row[i] = column[i,0] = K0(bv[-1][:-1],bv[i][:-1],pars)
mat = np.hstack([np.vstack([K,row]),column])
return mat
def update_k(bv,x_current,pars):
"""
similarity vector between the current input with all bv's, t starts from 0
Args: bv, a list of basis vectors
x_current, current input, X[i,:], 1d array
Return: 1d numpy array
"""
d = len(bv)
if d==0:
out = np.array([0])
if d>=1:
out = np.zeros(d)
for i in range(d):
out[i] = K0(x_current,bv[i][:-1],pars)
return out
def update_e_hat(Q,k):
"""
Args: Q, 2d numpy array
k, 1d numpy array
Return: 1d numpy array
"""
if np.shape(Q)[0]==0:
out = np.array([0])
else:
out = np.dot(Q,k)
return out
def update_gamma(k,e_hat):
"""
Args: k, 1d numpy array
e_hat, 1d numpy array
Return: scalar
"""
return 1-np.dot(k,e_hat)
def update_q(k,alpha,sigmax,y_current):
"""
Args: k, alpha, e_hat: 1d numpy array
sigmax, y_current: scalar
Return: scalar
"""
if len(alpha)==0:
out = y_current/sigmax
else:
out = (y_current-np.dot(k,alpha))/sigmax
return out
def update_s_hat(C,k,e_hat):
"""
Args: C: 2d numpy array
k, e_hat: 1d numpy array
Return: 1d numpy array
"""
return np.dot(C,k)+e_hat
def update_eta(gamma,sigmax):
"""
Args: gamma and sigmax: scalar
Return: scalar
"""
r = -1/sigmax
return 1/(1+gamma*r)
def update_alpha_hat(alpha,q,eta,s_hat):
"""
Args: q, eta: scalar
alpha, s_hat: 1d numpy array
Return: 1d numpy array
"""
return alpha+q*eta*s_hat
def update_c_hat(C,sigmax,eta,s_hat):
"""
Args: sigmax, eta: scalar
C: 2d array
s_hat: 1d array
Return: 2d array
"""
r = -1/sigmax
return C+r*eta*np.outer(s_hat,s_hat)
def update_s(C,k):
"""
Args: C: 2d array
k: 1d array
Return: 1d array
"""
if np.shape(C)[0]==0:
s = np.array([1])
else:
temp = np.dot(C,k)
s = np.append(temp,1)
return s
def update_alpha(alpha,q,s):
"""
Args: alpha, s: 1d array
q: scalar
Return: 1d array
"""
T_alpha = np.append(alpha,0)
new_alpha = T_alpha + q*s
return new_alpha
def update_c(C,sigmax,s):
"""
Args: C: 2d array
sigmax: scalar
s: 1d array
Return: 1d array
"""
d = np.shape(C)[0]
if d==0:
U_c = np.array([0])
else:
U_c = np.hstack([np.vstack([C,
|
np.zeros(d)
|
numpy.zeros
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
import numpy as np
from sklearn.datasets import load_digits, load_boston
from sklearn.model_selection import train_test_split
from autoflow.estimator.wrap_lightgbm import LGBMRegressor, LGBMClassifier
from autoflow.tests.base import EstimatorTestCase
def calc_balanced_sample_weight(y_train: np.ndarray):
unique, counts =
|
np.unique(y_train, return_counts=True)
|
numpy.unique
|
"""
@package ion_functions.test.adcp_functions
@file ion_functions/test/test_adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Unit tests for adcp_functions module
"""
from nose.plugins.attrib import attr
from ion_functions.test.base_test import BaseUnitTestCase
import numpy as np
from ion_functions.data import adcp_functions as af
from ion_functions.data.adcp_functions import ADCP_FILLVALUE
from ion_functions.data.generic_functions import SYSTEM_FILLVALUE
@attr('UNIT', group='func')
class TestADCPFunctionsUnit(BaseUnitTestCase):
def setUp(self):
"""
Implemented by:
2014-02-06: <NAME>. Initial Code.
2015-06-12: <NAME>. Changed raw beam data to type int. This
change did not affect any previously written unit tests.
"""
# set test inputs -- values from DPS
self.b1 = np.array([[-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]]) * 1000
self.b2 = np.array([[0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]]) * 1000
self.b3 = np.array([[-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]]) * 1000
self.b4 = np.array([[-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]]) * 1000
# the data type of the raw beam velocities is int;
# set b1-b4 to int so that fill replacement can be tested.
self.b1 = self.b1.astype(int)
self.b2 = self.b2.astype(int)
self.b3 = self.b3.astype(int)
self.b4 = self.b4.astype(int)
#
self.echo = np.array([[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250]])
self.sfactor = 0.45
# units of compass data are in centidegrees.
self.heading = 9841
self.pitch = 69
self.roll = -254
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([[0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063]])
self.vv = np.array([[-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461]])
self.ww = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
# set expected results -- magnetic variation correction applied
# (computed in Matlab using above values and mag_var.m)
self.uu_cor = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
self.vv_cor = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
# set the expected results -- error velocity
self.ee = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# set the expected results -- echo intensity conversion from counts to dB
self.dB = np.array([[0.00, 11.25, 22.50, 33.75, 45.00, 56.25, 67.50,
78.75, 90.00, 101.25, 112.50]])
def test_adcp_beam(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error.
Tests adcp_beam2ins, adcp_ins2earth and magnetic_correction functions
for ADCPs that output data in beam coordinates. All three functions
must return the correct output for final tests cases to work.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-06: <NAME>. Added tests to confirm arrays of
arrays can be processed (in other words, vectorized the
code).
2015-06-23: <NAME>. Revised documentation. Added unit test
for the function adcp_beam_error.
Notes:
The original suite of tests within this function did not provide a
test for adcp_beam_error. However, adcp_beam_error and vadcp_beam_error
are identical functions, and vadcp_beam_error is implicitly tested in the
test_vadcp_beam function when the 4th output argument of adcp_beam2inst
is tested. Therefore values to directly test adcp_beam_error were
then derived from the function itself and included as part of the unit
test within this code (test_adcp_beam).
"""
# single record case
got_uu_cor = af.adcp_beam_eastward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_vv_cor = af.adcp_beam_northward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_ww = af.adcp_beam_vertical(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient)
got_ee = af.adcp_beam_error(self.b1, self.b2, self.b3, self.b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, self.ww, 4)
np.testing.assert_array_almost_equal(got_ee, self.ee, 4)
# reset the test inputs for multiple records
b1 = np.tile(self.b1, (24, 1))
b2 = np.tile(self.b2, (24, 1))
b3 = np.tile(self.b3, (24, 1))
b4 = np.tile(self.b4, (24, 1))
heading = np.ones(24, dtype=np.int) * self.heading
pitch = np.ones(24, dtype=np.int) * self.pitch
roll = np.ones(24, dtype=np.int) * self.roll
orient = np.ones(24, dtype=np.int) * self.orient
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
depth = np.ones(24) * self.depth
ntp = np.ones(24) * self.ntp
# reset outputs for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
ww = np.tile(self.ww, (24, 1))
ee = np.tile(self.ee, (24, 1))
# multiple record case
got_uu_cor = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_vv_cor = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_ww = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
got_ee = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, ww, 4)
np.testing.assert_array_almost_equal(got_ee, ee, 4)
def test_adcp_beam_with_fill(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error when system fill values and
ADCP fill values (bad value sentinels) are present in the data stream.
Non-fill values are based on those used in test_adcp_beam in this module.
Implemented by:
2013-06-24: <NAME>. Initial code.
Notes:
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### set input data
# units of compass data are in centidegrees.
heading = np.array([9841])
pitch = np.array([69])
roll = np.array([-254])
missingroll = np.array([sfill])
orient = np.array([1])
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
###
# for positional clarity, input beam and expected velocities will be explicitly
# enumerated for each single time record test case.
###
### single time record case; missing roll data
## the ADCP does not use its bad flag sentinel for compass data, only beam data.
## however, it is possible that CI could supply the system fillvalue for missing compass data.
# input data
# beam velocity units are mm/s
b1_x1 = np.array([[-30, -295, -514, -234, -188, 203, -325, 305, -204, -294]])
b2_x1 = np.array([[180, -132, 213, 309, 291, 49, 188, 373, -2, 172]])
b3_x1 = np.array([[-398, -436, -131, -473, -443, 188, -168, 291, -179, 8]])
b4_x1 = np.array([[-216, -605, -92, -58, 484, -5, 338, 175, -80, -549]])
# expected results if all good beam and compass data
# these will be used later in the multiple time record test
uu_x0 = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
vv_x0 = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
ww_x0 = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
ee_x0 = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# expected results for all good beam data, missing roll data;
# nans for all results except for the error velocity, which does not depend on the compass
uu_x1 = uu_x0 * np.nan
vv_x1 = vv_x0 * np.nan
ww_x1 = ww_x0 * np.nan
ee_x1 = np.copy(ee_x0)
uu_calc = af.adcp_beam_eastward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient)
ee_calc = af.adcp_beam_error(b1_x1, b2_x1, b3_x1, b4_x1)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x1, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x1, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x1, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x1, 4)
### single time record case; missing and bad-flagged beam data, good compass data
# input data
b1_x2 = np.array([[sfill, -295, -514, -234, -188, 203, -325, afill, -204, -294]])
b2_x2 = np.array([[sfill, -132, 213, 309, 291, 49, 188, afill, -2, sfill]])
b3_x2 = np.array([[sfill, -436, -131, -473, -443, 188, -168, afill, -179, 8]])
b4_x2 = np.array([[sfill, -605, -92, -58, afill, -5, 338, afill, -80, -549]])
# expected
uu_x2 = np.array([[np.nan, -0.3221, -0.4025, 0.2092, np.nan,
-0.1595, 0.3471, np.nan, 0.0053, np.nan]])
vv_x2 = np.array([[np.nan, -0.0916, -0.9773, -0.9707, np.nan,
0.3188, -0.9940, np.nan, -0.3229, np.nan]])
ww_x2 = np.array([[np.nan, 0.3977, 0.1870, 0.1637, np.nan,
-0.1290, 0.0334, np.nan, 0.1384, np.nan]])
ee_x2 = np.array([[np.nan, 0.634704, -0.080630, 0.626434, np.nan,
0.071326, -0.317352, np.nan, 0.054787, np.nan]])
# calculated
uu_calc = af.adcp_beam_eastward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1_x2, b2_x2, b3_x2, b4_x2)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x2, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x2, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x2, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x2, 4)
### multiple (5) record case
## reset the test inputs for 5 time records
# 1st time record is the bad/missing beam data case above
# 2nd time record is a missing heading data case
# 3rd time record is all good data
# 4th time record is bad/missing beam and missing pitch data.
# 5th time record is missing orientation data
b1 = np.vstack((b1_x2, b1_x1, b1_x1, b1_x2, b1_x1))
b2 = np.vstack((b2_x2, b2_x1, b2_x1, b2_x2, b2_x1))
b3 = np.vstack((b3_x2, b3_x1, b3_x1, b3_x2, b3_x1))
b4 = np.vstack((b4_x2, b4_x1, b4_x1, b4_x2, b4_x1))
heading = np.hstack((heading, sfill, heading, heading, heading))
pitch = np.hstack((pitch, pitch, pitch, sfill, pitch))
roll = np.tile(roll, 5)
orient = np.hstack((orient, orient, orient, orient, sfill))
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
# set expected outputs for these 5 records
# notes:
# (1) heading is not used in the calculation of vertical velocity,
# therefore the second entry to ww_xpctd is good data out (ww_x0),
# not nans as resulted from the missingroll test.
# (2) pitch is not used in the calculation of error velocity, so that
# in the mixed case (time record 4) the error velocity should be
# the same as that for the pure bad/missing beam case (ee_x2, 1st
# and 4th entries in ee_xpctd).
# (3) the orientation argument affects the roll calculation, so that
# when its value is missing (5th time record) the expected result
# would be the same as if the roll value were missing. therefore
# the 5th column entries are all x1 results.
uu_xpctd = np.vstack((uu_x2, uu_x1, uu_x0, uu_x1, uu_x1))
vv_xpctd = np.vstack((vv_x2, vv_x1, vv_x0, vv_x1, vv_x1))
ww_xpctd = np.vstack((ww_x2, ww_x0, ww_x0, ww_x1, ww_x1))
ee_xpctd = np.vstack((ee_x2, ee_x1, ee_x0, ee_x2, ee_x1))
# calculated
uu_calc = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_xpctd, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_xpctd, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_xpctd, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_xpctd, 4)
def test_adcp_earth(self):
"""
Tests magnetic_correction function for ADCPs set to output data in the
Earth Coordinate system.
Values were not defined in DPS, were recreated using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2014-02-06: <NAME>. Initial code.
2015-06-10: <NAME>.
Changed adcp_ins2earth to require the units of the compass
data to be in centidegrees.
"""
# set the test data
u, v, w, e = af.adcp_beam2ins(self.b1, self.b2, self.b3, self.b4)
### old adcp_ins2earth returned 3 variables (CEW)
# adcp_ins2earth now requires compass data in units of centidegrees (RAD)
uu, vv, ww = af.adcp_ins2earth(u, v, w, self.heading, self.pitch,
self.roll, self.orient)
# test the magnetic variation correction
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
def test_adcp_earth_int_input_velocity_data(self):
"""
Tests adcp_earth_eastward and adcp_earth_northward using int type raw velocity data,
as will be supplied by CI. Also tests the almost trivial functions adcp_earth_vertical
and adcp_earth_error (unit change).
Input raw velocity values were derived from the float unit test in test_adcp_earth
by rounding the uu and vv float output from adcp_ins2earth. These int inputs failed
the assert_array_almost_equal unit tests (decimals=4) in test_adcp_earth because of
round-off error but passed when the agreement precision was relaxed to decimals=3.
This is taken as justification to more precisely calculate the expected values for
unit tests in the current module from adcp_earth_eastward and adcp_earth_northward
themselves (the very modules being tested), using as input the type int raw velocity
data. Because these DPA functions were used to derive their own check data, the
original (float type input velocity data) unit tests are retained in the
test_adcp_earth function.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-16: <NAME>. Initial code.
"""
# set the input test data [mm/sec]
uu = np.array([[218, -281, -100, 483, 1238, -245, 622, -181, 99, -906]])
vv = np.array([[-337, -182, -1052, -868, -892, 258, -850, -87, -307, -546]])
ww = np.array([[140, 398, 187, 164, 9, -129, 33, -302, 138, 197]])
ee = np.array([[790, 635, 81, 626, 64, 71, -317, 219, 55, 433]])
# expected values, calculated using adcp_earth_eastward and adcp_earth_northward
uu_cor = np.array([[0.11031103, -0.32184604, -0.40227939, 0.20903718, 0.92426103,
-0.15916447, 0.34724837, -0.19849871, 0.00522179, -1.02580274]])
vv_cor = np.array([[-0.38590734, -0.09219615, -0.97717720, -0.97109035, -1.21410442,
0.31820696, -0.99438552, -0.03046741, -0.32252555, -0.25822614]])
# expected values, calculated by changing units from mm/s to m/s
ww_vel = ww / 1000.0
ee_vel = ee / 1000.0
# test the magnetic variation correction using type integer inputs for the velocities.
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
# and the unit change functions
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
ww = np.tile(ww, (24, 1))
ee = np.tile(ee, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(uu_cor, (24, 1))
vv_cor = np.tile(vv_cor, (24, 1))
ww_vel = np.tile(ww_vel, (24, 1))
ee_vel = np.tile(ee_vel, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
# and the unit change functions
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
def test_adcp_earth_with_fill(self):
"""
Tests adcp_earth_eastward, adcp_earth_northward, adcp_earth_vertical and
adcp_earth_error when system fill values and ADCP fill values (bad value
sentinels) are present in the data stream.
Non-fill test values come from the function test_adcp_earth_int_input_velocity_data
in this module.
Implemented by:
2014-06-25: <NAME>. Initial code.
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### scalar time case
# set the input test data
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
# input velocities [mm/sec]
uu_in0 = np.array([[218, sfill, -100, 483, afill, -245]])
vv_in0 = np.array([[sfill, -182, -1052, -868, -892, afill]])
ww_in0 = np.array([[sfill, 398, afill, 164, 9, -129]])
ee_in0 = np.array([[afill, 635, 81, 626, sfill, 71]])
# expected values [m/sec]
uu_x0 = np.array([[np.nan, np.nan, -0.40227, 0.20903, np.nan, np.nan]])
vv_x0 = np.array([[np.nan, np.nan, -0.97717, -0.97109, np.nan, np.nan]])
ww_x0 = np.array([[np.nan, 0.398, np.nan, 0.164, 0.009, -0.129]])
ee_x0 = np.array([[np.nan, 0.635, 0.081, 0.626, np.nan, 0.071]])
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
### multiple time record case
# set the input test data
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
uu_in0 = np.tile(uu_in0, (5, 1))
vv_in0 = np.tile(vv_in0, (5, 1))
ww_in0 = np.tile(ww_in0, (5, 1))
ee_in0 = np.tile(ee_in0, (5, 1))
# expected
uu_x0 = np.tile(uu_x0, (5, 1))
vv_x0 = np.tile(vv_x0, (5, 1))
ww_x0 = np.tile(ww_x0, (5, 1))
ee_x0 = np.tile(ee_x0, (5, 1))
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
def test_adcp_backscatter(self):
"""
Tests echo intensity scaling function (adcp_backscatter) for ADCPs
in order to convert from echo intensity in counts to dB.
Values were not defined in DPS, were created using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by <NAME>, 2014-02-06
<NAME>, 2015-06-25. Added tests for fill values.
"""
# the single record case
got = af.adcp_backscatter(self.echo, self.sfactor)
np.testing.assert_array_almost_equal(got, self.dB, 4)
# the multi-record case -- inputs
raw = np.tile(self.echo, (24, 1))
sf = np.ones(24) * self.sfactor
# the multi-record case -- outputs
dB = np.tile(self.dB, (24, 1))
got = af.adcp_backscatter(raw, sf)
np.testing.assert_array_almost_equal(got, dB, 4)
### test fill value replacement with nan
# for convenience
sfill = SYSTEM_FILLVALUE
# the adcp bad sentinel fillvalue (requires 2 bytes) is not used for echo
# intensity, which is stored in 1 byte.
# the single time record case
echo_with_fill, xpctd = np.copy(self.echo), np.copy(self.dB)
echo_with_fill[0, 3], xpctd[0, 3] = sfill, np.nan
echo_with_fill[0, 6], xpctd[0, 6] = sfill, np.nan
echo_with_fill[0, 7], xpctd[0, 7] = sfill, np.nan
got = af.adcp_backscatter(echo_with_fill, self.sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
# the multiple time record case
echo_with_fill = np.vstack((echo_with_fill, self.echo, echo_with_fill))
xpctd = np.vstack((xpctd, self.dB, xpctd))
sfactor = np.tile(self.sfactor, (3, 1))
got = af.adcp_backscatter(echo_with_fill, sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
def test_vadcp_beam(self):
"""
Indirectly tests vadcp_beam_eastward, vadcp_beam_northward,
vadcp_beam_vertical_est, and vadcp_beam_vertical_true functions (which
call adcp_beam2ins and adcp_ins2earth) and vadcp_beam_error (which only
calls adcp_beam2ins) for the specialized 5-beam ADCP. Application of
the magnetic correction and conversion from mm/s to m/s is not applied.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELTURB_OOI.pdf)
Implemented by:
2014-07-24: <NAME>. Initial code.
2015-06-10: <NAME>.
adcp_ins2earth now requires the units of the compass
data to be in centidegrees.
"""
# test inputs
b1 = np.ones((10, 10)).astype(int) * -325
b2 = np.ones((10, 10)).astype(int) * 188
b3 = np.ones((10, 10)).astype(int) * 168
b4 = np.ones((10, 10)).astype(int) * -338
b5 = np.ones((10, 10)).astype(int) * -70
# units of centidegrees
heading = np.array([30, 30, 30, 30, 30,
32, 32, 32, 32, 32]) * 100
pitch = np.array([0, 2, 3, 3, 1, 2, 2, 3, 3, 1]) * 100
roll = np.array([0, 4, 3, 4, 3, 3, 4, 3, 4, 3]) * 100
orient = np.ones(10, dtype=np.int)
# expected outputs
vle = np.array([279.6195, 282.6881, 281.8311, 282.7147,
282.1188, 246.2155, 246.9874, 246.1226,
247.0156, 246.4276]).reshape(-1, 1)
vle = np.reshape(np.tile(vle, 10), (10, 10))
vln = np.array([-1015.5964, -1018.0226, -1018.2595, -1017.9765,
-1017.7612, -1027.3264, -1027.2681, -1027.4749,
-1027.2230, -1026.9870]).reshape(-1, 1)
vln = np.reshape(np.tile(vln, 10), (10, 10))
vlu = np.array([81.6756, 3.3916, 3.5950, -9.4974,
29.4154, 16.5077, 3.3916, 3.5950,
-9.4974, 29.4154]).reshape(-1, 1)
vlu = np.reshape(np.tile(vlu, 10), (10, 10))
evl = np.array([34.1128, 34.1128, 34.1128, 34.1128,
34.1128, 34.1128, 34.1128, 34.1128,
34.1128, 34.1128]).reshape(-1, 1)
evl = np.reshape(np.tile(evl, 10), (10, 10))
w5 = np.array([70.0000, -8.2485, -8.0487, -21.1287,
17.7575, 4.8552, -8.2485, -8.0487,
-21.1287, 17.7575]).reshape(-1, 1)
w5 = np.reshape(np.tile(w5, 10), (10, 10))
# test the transformations
u, v, w_est, e = af.adcp_beam2ins(b1, b2, b3, b4)
uu, vv, ww_est = af.adcp_ins2earth(u, v, w_est, heading, pitch, roll, orient)
_, _, ww_true = af.adcp_ins2earth(u, v, b5, heading, pitch, roll, orient)
# compare the results
np.testing.assert_array_almost_equal(uu, vle, 4)
np.testing.assert_array_almost_equal(vv, vln, 4)
np.testing.assert_array_almost_equal(ww_est, vlu, 4)
np.testing.assert_array_almost_equal(e, evl, 4)
np.testing.assert_array_almost_equal(ww_true, w5, 4)
#### KEEP: RAD 2015-06-22:
"""
## Given that these unit tests have validated the VADCP DPA functions, use these
## vadcp functions to generate values for unit tests with (a) type integer inputs
## (b) that use the vadcp functions themselves, instead of their constituent sub-
## routines, so that unit tests checking the trapping of CI fill values (-999999999)
## and ADCP instrument bad value sentinels (-32768) can be constructed.
#lat = np.ones(10) * self.lat
#lon = np.ones(10) * self.lon
#z = np.ones(10) * self.depth
#dt = np.ones(10) * self.ntp
#
#vle = af.vadcp_beam_eastward(b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
#vln = af.vadcp_beam_northward(b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
#vlu_4bm = af.vadcp_beam_vertical_est(b1, b2, b3, b4, heading, pitch, roll, orient)
#vlu_5bm = af.vadcp_beam_vertical_true(b1, b2, b3, b4, b5, heading, pitch, roll, orient)
#err = af.vadcp_beam_error(b1, b2, b3, b4)
#
#print vle.T
#print vln.T
#print vlu_4bm.T
#print vlu_5bm.T
#print err.T
"""
#### RAD 2015-06-22
def test_vadcp_beam_int_input_velocity_data(self):
"""
Tests vadcp_beam_eastward, vadcp_beam_northward, vadcp_beam_vertical_est,
vadcp_beam_vertical_true and vadcp_beam_error functions for the specialized 5-beam ADCP
using int type raw velocity data, as will be supplied by CI.
Test values come from the function test_vadcp_beam, in this module.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-22: <NAME>. Initial code.
"""
# inputs
b1 = np.ones((10, 10), dtype=np.int) * -325
b2 = np.ones((10, 10), dtype=np.int) * 188
b3 = np.ones((10, 10), dtype=np.int) * 168
b4 = np.ones((10, 10), dtype=np.int) * -338
b5 = np.ones((10, 10), dtype=np.int) * -70
# units of centidegrees
heading = np.array([30, 30, 30, 30, 30,
32, 32, 32, 32, 32]) * 100
pitch = np.array([0, 2, 3, 3, 1, 2, 2, 3, 3, 1]) * 100
roll = np.array([0, 4, 3, 4, 3, 3, 4, 3, 4, 3]) * 100
orient = np.ones(10, dtype=np.int)
lat = np.ones(10) * self.lat
lon = np.ones(10) * self.lon
z =
|
np.ones(10)
|
numpy.ones
|
import numpy as _np
from scipy.stats import multivariate_normal as _mvn
from . import signalsource
from .. utils import spacegeometry as sg
class PointSource(signalsource.SignalSource):
def __init__(
self,
RA,
DEC,
extent=None,
attitudeStateName='attitude',
useUnitVector=True
):
self.useUnitVector = useUnitVector
signalsource.SignalSource.__init__(self)
self.__RA__ = RA
self.__DEC__ = DEC
self.__RaDec__ = {'RA': RA, 'DEC': DEC}
self.attitudeStateName = attitudeStateName
self.lastPDF = None
self.extent = extent
return
def RaDec(self):
return(self.__RaDec__)
def unitVec(
self,
RaDec=None):
if RaDec is None:
RaDec = self.__RaDec__
cosD = _np.cos(RaDec['DEC'])
sinD = _np.sin(RaDec['DEC'])
cosRA =
|
_np.cos(RaDec['RA'])
|
numpy.cos
|
import numpy as np
def kernel_function(t,tk,h):
# tk can be a sequence
return 1/((2 * np.pi)**0.5 * h) * np.exp( - (t - tk)**2 / (2 * h**2))
def kernel_smooth(game_matrix_list,h,T_list = None):
T, N = game_matrix_list.shape[0:2]
smoothed = game_matrix_list + 0
if T_list is None:
T_list =
|
np.arange(T)
|
numpy.arange
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import List, Dict
import networkx as nx
import numpy as np
import pyomo.environ as pe
from networkx import Graph
from overrides import overrides
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn.instance.base import Instance
class ChallengeA:
def __init__(
self,
seed: int = 42,
n_training_instances: int = 500,
n_test_instances: int = 50,
) -> None:
np.random.seed(seed)
self.generator = MaxWeightStableSetGenerator(
w=uniform(loc=100.0, scale=50.0),
n=randint(low=200, high=201),
p=uniform(loc=0.05, scale=0.0),
fix_graph=True,
)
np.random.seed(seed + 1)
self.training_instances = self.generator.generate(n_training_instances)
|
np.random.seed(seed + 2)
|
numpy.random.seed
|
from __future__ import absolute_import, division, print_function
import argparse
import importlib
import itertools
import matplotlib
matplotlib.use('Agg')
import time
from multiprocessing import Pool
import numpy as np
import os
import pdb
import pickle
import subprocess
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import threading
import scipy.misc
from skimage import color
import init_paths
from models.sample_models import *
from lib.data.synset import *
import scipy
import skimage
import skimage.io
import transforms3d
import math
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import random
import utils
import models.architectures as architectures
from data.load_ops import resize_rescale_image
from data.load_ops import rescale_image
import utils
import lib.data.load_ops as load_ops
import task_viz
parser = utils.create_parser("Viz Single Task")
tf.logging.set_verbosity(tf.logging.ERROR)
list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d colorization jigsaw \
reshade rgb2depth rgb2mist rgb2sfnorm \
room_layout segment25d segment2d vanishing_point \
segmentsemantic class_1000 class_places inpainting_whole'
list_of_tasks = list_of_tasks.split()
def prepare_image(task, im_name, cfg):
img = task_viz.load_raw_image_center_crop( im_name )
img = skimage.img_as_float(img)
scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(im_name)
if task == 'jigsaw' :
img = cfg[ 'input_preprocessing_fn' ]( img, target=cfg['target_dict'][random.randint(0,99)],
**cfg['input_preprocessing_fn_kwargs'] )
else:
img = cfg[ 'input_preprocessing_fn' ]( img, **cfg['input_preprocessing_fn_kwargs'] )
img = img[np.newaxis,:]
return img
def run_to_task():
import general_utils
from general_utils import RuntimeDeterminedEnviromentVars
tf.logging.set_verbosity(tf.logging.ERROR)
args = parser.parse_args()
task = args.task
if task not in list_of_tasks:
raise ValueError('Task not supported')
cfg = utils.generate_cfg(task)
# Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value
low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d \
reshade rgb2depth rgb2mist rgb2sfnorm \
segment25d segment2d room_layout'.split()
if task in low_sat_tasks:
cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat
print("Doing {task}".format(task=task))
general_utils = importlib.reload(general_utils)
tf.reset_default_graph()
training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }
############## Set Up Inputs ##############
# tf.logging.set_verbosity( tf.logging.INFO )
setup_input_fn = utils.setup_input
inputs = setup_input_fn( cfg, is_training=False, use_filename_queue=False )
RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
RuntimeDeterminedEnviromentVars.populate_registered_variables()
start_time = time.time()
############## Set Up Model ##############
model = utils.setup_model( inputs, cfg, is_training=False )
m = model[ 'model' ]
model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )
############## Single Image ##############
if args.imgs_list:
with open(args.imgs_list) as imgs_list:
all_prediction = []
all_representation = []
for line in imgs_list:
filename = args.dir_name + line.strip().split(',')[0] # FIXME
img = prepare_image(task, filename, cfg)
predicted, representation = training_runners['sess'].run(
[ m.decoder_output, m.encoder_output ], feed_dict={m.input_images: img} )
utils.tasks(task, args, predicted, os.path.join(args.store_name + line.split(os.path.sep)[-1].strip() + '.jpg'), img=img)
all_prediction.append(np.squeeze(predicted))
all_representation.append(np.squeeze(representation))
if args.store_rep:
s_name, file_extension = os.path.splitext(args.store_name)
with open('{}.npy'.format(s_name), 'wb') as fp:
np.save(fp, np.array(all_representation))
if args.store_pred:
s_name, file_extension = os.path.splitext(args.store_name)
with open('{}_pred.npy'.format(s_name), 'wb') as fp:
np.save(fp, np.array(all_prediction))
else:
img = prepare_image(task, args.im_name, cfg)
predicted, representation = training_runners['sess'].run(
[ m.decoder_output, m.encoder_output ], feed_dict={m.input_images: img} )
utils.tasks(task, args, predicted, representation, img)
if args.store_rep:
s_name, file_extension = os.path.splitext(args.store_name)
with open('{}.npy'.format(s_name), 'wb') as fp:
np.save(fp,
|
np.squeeze(representation)
|
numpy.squeeze
|
# Created byMartin.cz
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm
def print_accuracy(model, X, Y, threshold=0.5, label="Training"):
"""
Prints accuracy by comparing predicted and expected.
Args:
model: miniml.Model
Trained model.
X: np.ndarray
Input data of shape (m, ...).
Y: np.ndarray
Expected output of shape (m, C).
threshold: float
Threshold to convert probabilities to either 0 or 1.
label: str
Dataset label.
"""
# get shape
m = X.shape[0]
C = Y.shape[1]
# predict by model
A = model.predict(X)
# convert to 0/1 predictions
p = A > threshold
# calc accuracy
accuracy = np.sum((p == Y))/m/C * 100
# print accuracy
print("%s Accuracy: %.2f %%" % (label, accuracy))
def plot_costs(epochs, **series):
"""
Plots learning curve of the model.
Args:
epochs: int
Number of iterations.
*series: list of (float,)
Collection of individual series to plot. All the series are expected
to have the same length.
"""
# init plot
plt.figure()
plt.title("Learning Curve")
plt.xlabel('Iterations')
# plot curves
steps = 1
labels = []
for label, data in series.items():
s, = plt.plot(np.squeeze(data), label=label)
labels.append(s)
steps = int(epochs / len(data))
# set legend
plt.legend(handles=labels)
# set x-ticks
locs, labels = plt.xticks()
plt.xticks(locs[1:-1], tuple(np.array(locs[1:-1], dtype='int')*steps))
plt.xticks()
# show plot
plt.show()
def plot_boundaries(model, X, Y, threshold=0.5):
"""
Plots decision boundaries for 2D data.
Args:
model: miniml.Model
Trained model.
X: np.ndarray
Input data of shape (m, 2).
Y: np.ndarray
Expected output of shape (m, C).
threshold: float
Threshold to convert probabilities to either 0 or 1.
"""
# check data
if len(X.shape) != 2 or X.shape[1] != 2:
raise ValueError("Input data (X) should be of shape (m, 2).")
if len(Y.shape) != 2:
raise ValueError("Expected output data (Y) should be of shape (m, C).")
# get size
NX = 1000
NY = 1000
C = Y.shape[1]
# get range
min_x = min(X[:, 0])
max_x = max(X[:, 0])
min_y = min(X[:, 1])
max_y = max(X[:, 1])
f = 0.3
x_range = (min_x-f*(max_x-min_x), max_x+f*(max_x-min_x))
y_range = (min_y-f*(max_y-min_y), max_y+f*(max_y-min_y))
# generate a grid of points
xs = np.linspace(x_range[0], x_range[1], NX)
ys =
|
np.linspace(y_range[1], y_range[0], NY)
|
numpy.linspace
|
""" Copyright (c) 2020, <NAME> and IBM Research
* Licensed under The MIT License [see LICENSE for details]
- Quantum population classes.
"""
import numpy as np
from chromosome import QChromosomeParams, QChromosomeNetwork
class QPopulation(object):
""" QNAS Population to be evolved. """
def __init__(self, num_quantum_ind, repetition, update_quantum_rate):
""" Initialize QPopulation.
Args:
num_quantum_ind: (int) number of quantum individuals.
repetition: (int) ratio between the number of classic individuals in the classic
population and the quantum individuals in the quantum population.
update_quantum_rate: (float) probability that a quantum gene will be updated.
"""
self.dtype = np.float64 # Type of quantum population arrays.
self.chromosome = None
self.current_pop = None
self.num_ind = num_quantum_ind
self.repetition = repetition
self.update_quantum_rate = update_quantum_rate
def initialize_qpop(self):
raise NotImplementedError('initialize_qpop() must be implemented in sub classes')
def generate_classical(self):
raise NotImplementedError('generate_classical() must be implemented in sub classes')
def update_quantum(self, intensity):
raise NotImplementedError('update_quantum() must be implemented in sub classes')
class QPopulationParams(QPopulation):
""" QNAS Chromosomes for the hyperparameters to be evolved. """
def __init__(self, num_quantum_ind, params_ranges, repetition, crossover_rate,
update_quantum_rate):
""" Initialize QPopulationParams.
Args:
num_quantum_ind: (int) number of quantum individuals.
params_ranges: {'parameter_name': [parameter_lower_limit, parameter_upper_limit]}.
repetition: (int) ratio between the number of classic individuals in the classic
population and the quantum individuals in the quantum population.
crossover_rate: (float) crossover rate.
update_quantum_rate: (float) probability that a quantum gene will be updated.
"""
super(QPopulationParams, self).__init__(num_quantum_ind, repetition,
update_quantum_rate)
self.tolerance = 1.e-15 # Tolerance to compare floating point
self.lower = None
self.upper = None
self.crossover = crossover_rate
self.chromosome = QChromosomeParams(params_ranges, self.dtype)
self.initial_lower, self.initial_upper = self.chromosome.initialize_qgenes()
self.initialize_qpop()
def initialize_qpop(self):
""" Initialize quantum population with *self.num_ind* individuals. """
self.lower = np.tile(self.initial_lower, (self.num_ind, 1))
self.upper = np.tile(self.initial_upper, (self.num_ind, 1))
def classic_crossover(self, new_pop, distance):
""" Perform arithmetic crossover of the old classic population with the new one.
Args:
new_pop: float numpy array representing the new classical population.
distance: (float) random distance for arithmetic crossover (range = [0, 1]).
"""
mask = np.random.rand(self.num_ind * self.repetition, self.chromosome.num_genes)
idx = np.where(mask <= self.crossover)
new_pop[idx] = new_pop[idx] + (self.current_pop[idx] - new_pop[idx]) * distance
return new_pop
def generate_classical(self):
""" Generate a specific number of classical individuals from the observation of quantum
individuals. This number is equal to (*num_ind* x *repetition*).
"""
random_numbers = np.random.rand(self.num_ind * self.repetition,
self.chromosome.num_genes).astype(self.dtype)
new_pop = random_numbers * np.tile(self.upper - self.lower, (self.repetition, 1)) \
+ np.tile(self.lower, (self.repetition, 1))
return new_pop
def update_quantum(self, intensity):
""" Update self.lower and self.upper.
Args:
intensity: (float) value defining the maximum intensity of the update.
"""
random = np.random.rand(self.num_ind, self.chromosome.num_genes)
mask = np.where(random <= self.update_quantum_rate)
max_genes =
|
np.max(self.current_pop, axis=0)
|
numpy.max
|
"""
A full working spin-orbital CCSD(T) code generated with pdaggerq
If you want to run the example here you should install pyscf openfermion
and openfermion-pyscf The actual CCSD(T) code (ccsd_energy, singles_residual,
doubles_residual, triples_residual, t3_energy, kernel) do not depend on those
packages but you must obtain integrals from somehwere.
The total energy for this example been checked against that produced
by the CCSD(T) implementation in psi4.
(T) energy = -0.003382913092468
* CCSD(T) total energy = -100.009057558929399
the main() function is fairly straightforward.
"""
# set allow numpy built with MKL to consume more threads for tensordot
import os
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
import numpy as np
from numpy import einsum
def ccsd_energy(t1, t2, f, g, o, v):
"""
< 0 | e(-T) H e(T) | 0> :
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 1.0000 f(i,i)
energy = 1.0 * einsum('ii', f[o, o])
# 1.0000 f(i,a)*t1(a,i)
energy += 1.0 * einsum('ia,ai', f[o, v], t1)
# -0.5000 <j,i||j,i>
energy += -0.5 * einsum('jiji', g[o, o, o, o])
# 0.2500 <j,i||a,b>*t2(a,b,j,i)
energy += 0.25 * einsum('jiab,abji', g[o, o, v, v], t2)
# -0.5000 <j,i||a,b>*t1(a,i)*t1(b,j)
energy += -0.5 * einsum('jiab,ai,bj', g[o, o, v, v], t1, t1,
optimize=['einsum_path', (0, 1), (0, 1)])
return energy
def t_energy(l1, l2, t3, f, g, o, v):
"""
E(t)
:param l1: transpose of spin-orbital t1 amplitudes (nocc x nvirt)
:param l2: transpose of spin-orbital t2 amplitudes (nocc x nocc x nvirt x nvirt)
:param 3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 0.2500 <k,j||b,c>*l1(i,a)*t3(b,c,a,i,k,j)
energy = 0.25 * einsum('kjbc,ia,bcaikj', g[o, o, v, v], l1, t3, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.2500 <l,k||c,j>*l2(i,j,b,a)*t3(c,b,a,i,l,k)
energy += 0.25 * einsum('lkcj,ijba,cbailk', g[o, o, v, o], l2, t3, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.2500 <k,b||c,d>*l2(i,j,b,a)*t3(c,d,a,i,j,k)
energy += 0.25 * einsum('kbcd,ijba,cdaijk', g[o, v, v, v], l2, t3, optimize=['einsum_path', (0, 2), (0, 1)])
return energy
def singles_residual(t1, t2, t3, f, g, o, v):
"""
< 0 | m* e e(-T) H e(T) | 0>
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param t3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 1.0000 f(a,i)
singles_res = 1.0 * einsum('ai->ai', f[v, o])
# -1.0000 f(j,i)*t1(a,j)
singles_res += -1.0 * einsum('ji,aj->ai', f[o, o], t1)
# 1.0000 f(a,b)*t1(b,i)
singles_res += 1.0 * einsum('ab,bi->ai', f[v, v], t1)
# -1.0000 f(j,b)*t2(b,a,i,j)
singles_res += -1.0 * einsum('jb,baij->ai', f[o, v], t2)
# -1.0000 f(j,b)*t1(b,i)*t1(a,j)
singles_res += -1.0 * einsum('jb,bi,aj->ai', f[o, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,a||b,i>*t1(b,j)
singles_res += 1.0 * einsum('jabi,bj->ai', g[o, v, v, o], t1)
# -0.5000 <k,j||b,i>*t2(b,a,k,j)
singles_res += -0.5 * einsum('kjbi,bakj->ai', g[o, o, v, o], t2)
# -0.5000 <j,a||b,c>*t2(b,c,i,j)
singles_res += -0.5 * einsum('jabc,bcij->ai', g[o, v, v, v], t2)
# 1.0000 <k,j||b,i>*t1(b,j)*t1(a,k)
singles_res += 1.0 * einsum('kjbi,bj,ak->ai', g[o, o, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,a||b,c>*t1(b,j)*t1(c,i)
singles_res += 1.0 * einsum('jabc,bj,ci->ai', g[o, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <k,j||b,c>*t1(b,j)*t2(c,a,i,k)
singles_res += 1.0 * einsum('kjbc,bj,caik->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 0.5000 <k,j||b,c>*t1(b,i)*t2(c,a,k,j)
singles_res += 0.5 * einsum('kjbc,bi,cakj->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.5000 <k,j||b,c>*t1(a,j)*t2(b,c,i,k)
singles_res += 0.5 * einsum('kjbc,aj,bcik->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 <k,j||b,c>*t1(b,j)*t1(c,i)*t1(a,k)
singles_res += 1.0 * einsum('kjbc,bj,ci,ak->ai', g[o, o, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return singles_res
def doubles_residual(t1, t2, t3, f, g, o, v):
"""
< 0 | m* n* f e e(-T) H e(T) | 0>
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param t3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# -1.0000 P(i,j)f(k,j)*t2(a,b,i,k)
contracted_intermediate = -1.0 * einsum('kj,abik->abij', f[o, o], t2)
doubles_res = 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 1.0000 P(a,b)f(a,c)*t2(c,b,i,j)
contracted_intermediate = 1.0 * einsum('ac,cbij->abij', f[v, v], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# -1.0000 P(i,j)f(k,c)*t1(c,j)*t2(a,b,i,k)
contracted_intermediate = -1.0 * einsum('kc,cj,abik->abij', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# -1.0000 P(a,b)f(k,c)*t1(a,k)*t2(c,b,i,j)
contracted_intermediate = -1.0 * einsum('kc,ak,cbij->abij', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# 1.0000 <a,b||i,j>
doubles_res += 1.0 * einsum('abij->abij', g[v, v, o, o])
# 1.0000 P(a,b)<k,a||i,j>*t1(b,k)
contracted_intermediate = 1.0 * einsum('kaij,bk->abij', g[o, v, o, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# 1.0000 P(i,j)<a,b||c,j>*t1(c,i)
contracted_intermediate = 1.0 * einsum('abcj,ci->abij', g[v, v, v, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 0.5000 <l,k||i,j>*t2(a,b,l,k)
doubles_res += 0.5 * einsum('lkij,ablk->abij', g[o, o, o, o], t2)
# 1.0000 P(i,j)*P(a,b)<k,a||c,j>*t2(c,b,i,k)
contracted_intermediate = 1.0 * einsum('kacj,cbik->abij', g[o, v, v, o], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# 0.5000 <a,b||c,d>*t2(c,d,i,j)
doubles_res += 0.5 * einsum('abcd,cdij->abij', g[v, v, v, v], t2)
# -1.0000 <l,k||i,j>*t1(a,k)*t1(b,l)
doubles_res += -1.0 * einsum('lkij,ak,bl->abij', g[o, o, o, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(i,j)*P(a,b)<k,a||c,j>*t1(c,i)*t1(b,k)
contracted_intermediate = 1.0 * einsum('kacj,ci,bk->abij', g[o, v, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# -1.0000 <a,b||c,d>*t1(c,j)*t1(d,i)
doubles_res += -1.0 *
|
einsum('abcd,cj,di->abij', g[v, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
|
numpy.einsum
|
import unittest
from math import sqrt, exp, log
import numpy as np
from pmacparser.pmac_parser import PMACParser, ParserError
class TestParser(unittest.TestCase):
def test_QP(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=P1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["Q1"], 42)
def test_QP2(self):
input_dict = {"Q1": 42}
lines = []
lines.append("P(1)=Q(1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["Q1"], 42)
def test_QP3(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q(1)=P(1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["Q1"], 42)
def test_IM(self):
input_dict = {"I1": 42}
lines = []
lines.append("M1=I1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["I1"], 42)
self.assertEqual(output_dict["M1"], 42)
def test_IM2(self):
input_dict = {"M1": 42}
lines = []
lines.append("I(1)=M(1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["I1"], 42)
self.assertEqual(output_dict["M1"], 42)
def test_float(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=56.254")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 56.254)
def test_add(self):
input_dict = {"P1": 42, "P2": 9}
lines = []
lines.append("Q1=P1+P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 9)
self.assertEqual(output_dict["Q1"], 51)
def test_subtraction(self):
input_dict = {"P1": 5.5, "P2": 3}
lines = []
lines.append("P3=P1-P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 5.5)
self.assertEqual(output_dict["P2"], 3)
self.assertEqual(output_dict["P3"], 2.5)
def test_subtraction_negative(self):
input_dict = {"P1": 5, "P2": 11}
lines = []
lines.append("P3=P1-P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 5)
self.assertEqual(output_dict["P2"], 11)
self.assertEqual(output_dict["P3"], -6)
def test_multiply(self):
input_dict = {"Q1": 3, "P2": 22}
lines = []
lines.append("I1=Q1*P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["Q1"], 3)
self.assertEqual(output_dict["P2"], 22)
self.assertEqual(output_dict["I1"], 66)
def test_divide(self):
input_dict = {"I1": 22, "Q5": 7}
lines = []
lines.append("P99=I1/Q5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["I1"], 22)
self.assertEqual(output_dict["Q5"], 7)
self.assertEqual(output_dict["P99"], 22/7.0)
def test_multiply_precedence1(self):
input_dict = {}
lines = []
lines.append("Q1=3+4*5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 23)
def test_multiply_precedence2(self):
input_dict = {}
lines = []
lines.append("P1=3*4+5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["P1"], 17)
def test_multiply_precedence3(self):
input_dict = {}
lines = []
lines.append("Q1=3+4*5+6")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 29)
def test_multiply_precedence4(self):
input_dict = {}
lines = []
lines.append("Q1=2*3*4+5*6*7")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 234)
def test_divide_precedence1(self):
input_dict = {}
lines = []
lines.append("Q1=3+20/5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 7)
def test_divide_precedence2(self):
input_dict = {}
lines = []
lines.append("P1=8/4+5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["P1"], 7)
def test_divide_precedence3(self):
input_dict = {}
lines = []
lines.append("P1=16/4+5/5")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["P1"], 5)
def test_parenthesis_precedence(self):
input_dict = {}
lines = []
lines.append("Q1=2*3*(4+5)*6*7")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 1)
self.assertEqual(output_dict["Q1"], 2268)
def test_mod(self):
input_dict = {"P1": 8}
lines = []
lines.append("Q1=P1%3")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 8)
self.assertEqual(output_dict["Q1"], 2)
def test_bitand(self):
input_dict = {"P1": 54254323}
lines = []
lines.append("Q1=P1&213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 54254323)
self.assertEqual(output_dict["Q1"], 213155)
def test_bitor(self):
input_dict = {"P1": 54254323}
lines = []
lines.append("Q1=P1|213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 54254323)
self.assertEqual(output_dict["Q1"], 54254579)
def test_bitxor(self):
input_dict = {"P1": 54254323}
lines = []
lines.append("Q1=P1^213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 54254323)
self.assertEqual(output_dict["Q1"], 54041424)
def test_abs(self):
input_dict = {"P1": -3}
lines = []
lines.append("Q1=ABS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], -3)
self.assertEqual(output_dict["Q1"], 3)
def test_abs2(self):
input_dict = {"P1": 4.5}
lines = []
lines.append("Q1=ABS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 4.5)
self.assertEqual(output_dict["Q1"], 4.5)
def test_int(self):
input_dict = {"P32": 3.141}
lines = []
lines.append("Q3=INT(P32)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P32"], 3.141)
self.assertEqual(output_dict["Q3"], 3)
def test_int_2(self):
input_dict = {"P32": 3.6}
lines = []
lines.append("Q3=INT(P32)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P32"], 3.6)
self.assertEqual(output_dict["Q3"], 3)
def test_int_neg(self):
input_dict = {"P32": -10.244}
lines = []
lines.append("Q3=INT(P32)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P32"], -10.244)
self.assertEqual(output_dict["Q3"], -11)
def test_int_neg_2(self):
input_dict = {"P32": -3.6}
lines = []
lines.append("Q3=INT(P32)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P32"], -3.6)
self.assertEqual(output_dict["Q3"], -4)
def test_sqrt(self):
input_dict = {"P45": 25}
lines = []
lines.append("Q2=SQRT(P45)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P45"], 25)
self.assertEqual(output_dict["Q2"], 5)
def test_sqrt2(self):
input_dict = {"P2": 2}
lines = []
lines.append("Q2=SQRT(P2)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P2"], 2)
self.assertEqual(output_dict["Q2"], sqrt(2))
def test_exp(self):
input_dict = {"P1": 33}
lines = []
lines.append("Q1=EXP(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 33)
self.assertEqual(output_dict["Q1"], exp(33))
def test_ln(self):
input_dict = {"P1": 33}
lines = []
lines.append("Q1=LN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 33)
self.assertEqual(output_dict["Q1"], log(33))
def test_sin_deg(self):
input_dict = {"P1": 30}
lines = []
lines.append("Q1=SIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 30)
self.assertAlmostEqual(output_dict["Q1"], 0.5)
def test_sin_rad(self):
input_dict = {"P1": 30, "I15": 1}
lines = []
lines.append("Q1=SIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 30)
self.assertAlmostEqual(output_dict["Q1"], -0.988031624)
self.assertEqual(output_dict["I15"], 1)
def test_cos_deg(self):
input_dict = {"P1": 60}
lines = []
lines.append("Q1=COS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 60)
self.assertAlmostEqual(output_dict["Q1"], 0.5)
def test_cos_rad(self):
input_dict = {"P1": 60, "I15": 1}
lines = []
lines.append("Q1=COS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 60)
self.assertAlmostEqual(output_dict["Q1"], -0.95241298)
self.assertEqual(output_dict["I15"], 1)
def test_tan_deg(self):
input_dict = {"P1": 60}
lines = []
lines.append("Q1=TAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 60)
self.assertAlmostEqual(output_dict["Q1"], 1.732050808)
def test_tan_rad(self):
input_dict = {"P1": 60, "I15": 1}
lines = []
lines.append("Q1=TAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 60)
self.assertAlmostEqual(output_dict["Q1"], 0.320040389)
self.assertEqual(output_dict["I15"], 1)
def test_asin_deg(self):
input_dict = {"P1": 0.5}
lines = []
lines.append("Q1=ASIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 30)
def test_asin_rad(self):
input_dict = {"P1": 0.5, "I15": 1}
lines = []
lines.append("Q1=ASIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 0.523598776)
self.assertEqual(output_dict["I15"], 1)
def test_acos_deg(self):
input_dict = {"P1": 0.5}
lines = []
lines.append("Q1=ACOS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 60)
def test_acos_rad(self):
input_dict = {"P1": 0.5, "I15": 1}
lines = []
lines.append("Q1=ACOS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 1.047197551)
self.assertEqual(output_dict["I15"], 1)
def test_atan_deg(self):
input_dict = {"P1": 0.5}
lines = []
lines.append("Q1=ATAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 26.565051177)
def test_atan_rad(self):
input_dict = {"P1": 0.5, "I15": 1}
lines = []
lines.append("Q1=ATAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 0.463647609)
self.assertEqual(output_dict["I15"], 1)
def test_atan2_deg(self):
input_dict = {"P1": 0.5, "Q0": 2}
lines = []
lines.append("Q1=ATAN2(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 14.036243467934)
self.assertEqual(output_dict["Q0"], 2)
def test_atan2_rad(self):
input_dict = {"P1": 0.5, "I15": 1, "Q0": 2}
lines = []
lines.append("Q1=ATAN2(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 0.5)
self.assertAlmostEqual(output_dict["Q1"], 0.244978663127)
self.assertEqual(output_dict["I15"], 1)
self.assertEqual(output_dict["Q0"], 2)
def test_return(self):
input_dict = {"P1": 42, "P2": 9}
lines = []
lines.append("Q1=P1+P2")
lines.append("RETURN")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 9)
self.assertEqual(output_dict["Q1"], 51)
def test_ret(self):
input_dict = {"P1": 42, "P2": 9}
lines = []
lines.append("Q1=P1+P2")
lines.append("RET")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 9)
self.assertEqual(output_dict["Q1"], 51)
def test_multi_line(self):
input_dict = {"P1": 42, "P2": 9}
lines = []
lines.append("Q1=P1+P2")
lines.append("Q2=Q1+6")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 9)
self.assertEqual(output_dict["Q1"], 51)
self.assertEqual(output_dict["Q2"], 57)
def test_if(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=42)")
lines.append("P2=222")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_endi(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=42)")
lines.append("P2=222")
lines.append("ENDI")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=43)")
lines.append("P2=222")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_else(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_else(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_equal_true(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!=40)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_equal_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!=42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_gt_true(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1>40)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_gt_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1>42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_gt_true(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!>42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_gt_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!>41)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_lt_true(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1<43)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_lt_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1<42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_lt_true(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!<42)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_not_lt_false(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1!<43)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_if_and_inline_true(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1ANDI1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_inline_false1(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2ANDI1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_inline_false2(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1ANDI1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_inline_false3(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2ANDI1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_inline_true(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1ORI1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_inline_false1(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2ORI1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_inline_false2(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1ORI1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_inline_false3(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2ORI1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_newline_true(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("AND(I1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_newline_false1(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("AND(I1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_newline_false2(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("AND(I1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_and_newline_false3(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("AND(I1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_newline_true(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("OR(I1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_newline_false1(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("OR(I1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_newline_false2(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("OR(I1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_or_newline_false3(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("OR(I1=2)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_andor_precedence(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("OR(I1=2)")
lines.append("AND(P1=1)")
lines.append("OR(I1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested1(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("P2=222")
# The nested if
lines.append("IF(I1=1)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDIF")
# End of nested if
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 7)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["P4"], 444)
self.assertEqual(output_dict["P6"], 666)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested2(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("P2=222")
# The nested if
lines.append("IF(I1=2)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDIF")
# End of nested if
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 7)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["P4"], 444)
self.assertEqual(output_dict["P7"], 777)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested3(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("P2=222")
# The nested if
lines.append("IF(I1=1)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDIF")
# End of nested if
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 6)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["P5"], 555)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested4(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("P2=222")
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
# The nested if
lines.append("IF(I1=1)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDIF")
# End of nested if
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 7)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["P5"], 555)
self.assertEqual(output_dict["P6"], 666)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested5(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=2)")
lines.append("P2=222")
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
# The nested if
lines.append("IF(I1=2)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDIF")
# End of nested if
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 7)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["P5"], 555)
self.assertEqual(output_dict["P7"], 777)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_if_nested6(self):
input_dict = {"P1": 1, "I1": 1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("P2=222")
lines.append("P4=444")
lines.append("ELSE")
lines.append("P3=333")
# The nested if
lines.append("IF(I1=1)")
lines.append("P6=666")
lines.append("ELSE")
lines.append("P7=777")
lines.append("ENDI")
# End of nested if
lines.append("P5=555")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 6)
self.assertEqual(output_dict["P1"], 1)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["P4"], 444)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
self.assertEqual(output_dict["I1"], 1)
def test_while(self):
input_dict = {"P1": 1, "P2": 2}
lines = []
lines.append("Q1=1")
lines.append("Q2=2")
lines.append("Q1=Q1+1")
lines.append("WHILE(P1<10)")
lines.append("P1=P1+1")
lines.append("P2=P2+2")
lines.append("ENDWHILE")
lines.append("Q2=Q2+1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 10)
self.assertEqual(output_dict["P2"], 20)
self.assertEqual(output_dict["Q1"], 2)
self.assertEqual(output_dict["Q2"], 3)
def test_while_endw(self):
input_dict = {"P1": 1, "P2": 2}
lines = []
lines.append("Q1=1")
lines.append("Q2=2")
lines.append("Q1=Q1+1")
lines.append("WHILE(P1<10)")
lines.append("P1=P1+1")
lines.append("P2=P2+2")
lines.append("ENDW")
lines.append("Q2=Q2+1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 10)
self.assertEqual(output_dict["P2"], 20)
self.assertEqual(output_dict["Q1"], 2)
self.assertEqual(output_dict["Q2"], 3)
def test_while_nested(self):
input_dict = {"P1": 1, "P2": 2, "P3": 0, "P4": 0}
lines = []
lines.append("Q1=1")
lines.append("Q2=2")
lines.append("Q1=Q1+1")
lines.append("WHILE(P1<10)")
lines.append("P1=P1+1")
# Nested while
lines.append("P3=0")
lines.append("WHILE(P3<5)")
lines.append("P3=P3+1")
lines.append("P4=P4+2")
lines.append("ENDWHILE")
# End nested while
lines.append("P2=P2+2")
lines.append("ENDWHILE")
lines.append("Q2=Q2+1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 6)
self.assertEqual(output_dict["P1"], 10)
self.assertEqual(output_dict["P2"], 20)
self.assertEqual(output_dict["P3"], 5)
self.assertEqual(output_dict["P4"], 90)
self.assertEqual(output_dict["Q1"], 2)
self.assertEqual(output_dict["Q2"], 3)
def test_parser_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=6")
lines.append("TAN")
parser = PMACParser(lines)
self.assertRaises(Exception, parser.parse, input_dict)
def test_unrecognised_token_exception(self):
lines = []
lines.append("Q1=5G+3")
self.assertRaises(Exception, PMACParser, lines)
def test_float_expected_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("IAND=Q1")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_unexpected_i_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=ENDIF")
parser = PMACParser(lines)
self.assertRaises(Exception, parser.parse, input_dict)
def test_bad_comparitor_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("IF(Q1COS44)")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_bad_unclosed_bracket_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("IF(Q1>44COS(3)")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_unexpected_endif_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=3")
lines.append("ENDIF")
lines.append("Q1=4")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_unexpected_else_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=3")
lines.append("ELSE")
lines.append("Q1=4")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_unexpected_endwhile_error(self):
input_dict = {"Q1": 42}
lines = []
lines.append("Q1=3")
lines.append("ENDWHILE")
lines.append("Q1=4")
parser = PMACParser(lines)
self.assertRaises(ParserError, parser.parse, input_dict)
def test_multiple_runs(self):
input_dict = {"P1": 42}
lines = []
lines.append("Q1=1")
lines.append("IF(P1=1)")
lines.append("P2=222")
lines.append("ELSE")
lines.append("P3=333")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"], 42)
self.assertEqual(output_dict["P3"], 333)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
output_dict_2 = parser.parse(input_dict)
self.assertEqual(len(output_dict_2), 4)
self.assertEqual(output_dict_2["P1"], 42)
self.assertEqual(output_dict_2["P3"], 333)
self.assertEqual(output_dict_2["Q1"], 1)
self.assertEqual(output_dict_2["Q2"], 2)
input_dict = {"P1": 1}
output_dict_3 = parser.parse(input_dict)
self.assertEqual(len(output_dict_3), 4)
self.assertEqual(output_dict_3["P1"], 1)
self.assertEqual(output_dict_3["P2"], 222)
self.assertEqual(output_dict_3["Q1"], 1)
self.assertEqual(output_dict_3["Q2"], 2)
def test_real_example1(self):
input_dict = {"P5": 2, "P6": 4, "P4805": 8, "P4905": 16, "P4806": 32, "P4906": 64}
lines = []
lines.append("Q7=((P(4800+5)*P5+P(4900+5))+(P(4800+6)*P6+P(4900+6)))/2")
lines.append("Q8=(P(4800+5)*P5+P(4900+5))-(P(4800+6)*P6+P(4900+6))")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 8)
self.assertEqual(output_dict["P5"], 2)
self.assertEqual(output_dict["P6"], 4)
self.assertEqual(output_dict["P4805"], 8)
self.assertEqual(output_dict["P4905"], 16)
self.assertEqual(output_dict["P4806"], 32)
self.assertEqual(output_dict["P4906"], 64)
self.assertEqual(output_dict["Q7"], 112)
self.assertEqual(output_dict["Q8"], -160)
def test_real_example2(self):
input_dict = {"P1": 21, "P2": 21.5, "P3": 22, "P4": 22.5, "P5": 23, "P6": 23.5, "P7": 24, "P8": 24.5,
"P17": 26, "P4801": 1, "P4802": 2, "P4803": 3, "P4804": 4, "P4805": 5, "P4806": 6, "P4807": 7,
"P4808": 8, "P4817": 9, "P4901": 10, "P4902": 11, "P4903": 12, "P4904": 13, "P4905": 14,
"P4906": 15, "P4907": 16, "P4908": 17, "P4917": 18, "Q21": 31, "Q22": 32, "Q23": 33, "Q24": 34,
"Q25": 35, "Q26": 36, "Q27": 37, "Q28": 38, "Q29": 3900, "Q30": 40}
lines = []
lines.append("Q1=(P(4800+1)*P1+P(4900+1))")
lines.append("Q5=(P(4800+2)*P2+P(4900+2))")
lines.append("Q9=(P(4800+7)*P7+P(4900+7))")
lines.append("IF(Q27=0)")
lines.append("Q2=(P(4800+3)*P3+P(4900+3))")
lines.append("Q3=(P(4800+5)*P5+P(4900+5))")
lines.append("Q4=(P(4800+3)*P3+P(4900+3))+(P(4800+8)*P8+P(4900+8))")
lines.append("Q6=(P(4800+4)*P4+P(4900+4))")
lines.append("Q7=(P(4800+6)*P6+P(4900+6))")
lines.append("Q8=(P(4800+4)*P4+P(4900+4))+(P(4800+17)*P17+P(4900+17))")
lines.append("ELSE")
lines.append("Q130=SQRT((Q24+Q29)*(Q24+Q29)-(Q28+(P(4800+17)*P17+P(4900+17))-Q30)*"
"(Q28+(P(4800+17)*P17+P(4900+17))-Q30))")
lines.append("Q128=TAN(Q26)*(Q130+Q21)")
lines.append("Q131=(P(4800+3)*P3+P(4900+3))-(P(4800+1)*P1+P(4900+1))-Q128")
lines.append("Q6=(ATAN(Q131/(Q130+Q22))+Q26)/2")
lines.append("Q133=(P(4800+5)*P5+P(4900+5))-(P(4800+1)*P1+P(4900+1))-Q128")
lines.append("Q7=(ATAN(Q133/(Q130+Q23))+Q26)/2")
lines.append("Q4=(P(4800+1)*P1+P(4900+1))+(P(4800+8)*P8+P(4900+8))")
lines.append("Q129=TAN(Q25)*(Q130+Q21)")
lines.append("Q132=(P(4800+4)*P4+P(4900+4))-(P(4800+2)*P2+P(4900+2))-Q129")
lines.append("Q2=(ATAN(Q132/(Q130+Q22))+Q25)/2")
lines.append("Q134=(P(4800+6)*P6+P(4900+6))-(P(4800+2)*P2+P(4900+2))-Q129")
lines.append("Q3=(ATAN(Q134/(Q130+Q23))+Q25)/2")
lines.append("Q8=(P(4800+2)*P2+P(4900+2))+(P(4800+17)*P17+P(4900+17))")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 53)
self.assertEqual(output_dict["P1"], 21)
self.assertEqual(output_dict["P2"], 21.5)
self.assertEqual(output_dict["P3"], 22)
self.assertEqual(output_dict["P4"], 22.5)
self.assertEqual(output_dict["P5"], 23)
self.assertEqual(output_dict["P6"], 23.5)
self.assertEqual(output_dict["P7"], 24)
self.assertEqual(output_dict["P8"], 24.5)
self.assertEqual(output_dict["P17"], 26)
self.assertEqual(output_dict["P4801"], 1)
self.assertEqual(output_dict["P4802"], 2)
self.assertEqual(output_dict["P4803"], 3)
self.assertEqual(output_dict["P4804"], 4)
self.assertEqual(output_dict["P4805"], 5)
self.assertEqual(output_dict["P4806"], 6)
self.assertEqual(output_dict["P4807"], 7)
self.assertEqual(output_dict["P4808"], 8)
self.assertEqual(output_dict["P4817"], 9)
self.assertEqual(output_dict["P4901"], 10)
self.assertEqual(output_dict["P4902"], 11)
self.assertEqual(output_dict["P4903"], 12)
self.assertEqual(output_dict["P4904"], 13)
self.assertEqual(output_dict["P4905"], 14)
self.assertEqual(output_dict["P4906"], 15)
self.assertEqual(output_dict["P4907"], 16)
self.assertEqual(output_dict["P4908"], 17)
self.assertEqual(output_dict["P4917"], 18)
self.assertEqual(output_dict["Q21"], 31)
self.assertEqual(output_dict["Q22"], 32)
self.assertEqual(output_dict["Q23"], 33)
self.assertEqual(output_dict["Q24"], 34)
self.assertEqual(output_dict["Q25"], 35)
self.assertEqual(output_dict["Q26"], 36)
self.assertEqual(output_dict["Q27"], 37)
self.assertEqual(output_dict["Q28"], 38)
self.assertEqual(output_dict["Q29"], 3900)
self.assertEqual(output_dict["Q30"], 40)
self.assertEqual(output_dict["Q1"], 31)
self.assertEqual(output_dict["Q5"], 54)
self.assertEqual(output_dict["Q9"], 184)
self.assertAlmostEqual(output_dict["Q130"], 3926.048395015)
self.assertAlmostEqual(output_dict["Q128"], 2874.963944354)
self.assertAlmostEqual(output_dict["Q131"], -2827.963944354)
self.assertAlmostEqual(output_dict["Q6"], 0.227391949)
self.assertAlmostEqual(output_dict["Q133"], -2776.963944354)
self.assertAlmostEqual(output_dict["Q7"], 0.476666233)
self.assertEqual(output_dict["Q4"], 244)
self.assertAlmostEqual(output_dict["Q129"], 2770.75511525)
self.assertAlmostEqual(output_dict["Q132"], -2721.75511525)
self.assertAlmostEqual(output_dict["Q2"], 0.242805324)
self.assertAlmostEqual(output_dict["Q134"], -2668.75511525)
self.assertAlmostEqual(output_dict["Q3"], 0.508241199)
self.assertEqual(output_dict["Q8"], 306)
def test_numpy_add(self):
p1 = np.array([1, 2, 3, 4])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1+4")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 1)
self.assertEqual(output_dict["P1"][1], 2)
self.assertEqual(output_dict["P1"][2], 3)
self.assertEqual(output_dict["P1"][3], 4)
self.assertEqual(output_dict["Q1"][0], 5)
self.assertEqual(output_dict["Q1"][1], 6)
self.assertEqual(output_dict["Q1"][2], 7)
self.assertEqual(output_dict["Q1"][3], 8)
def test_numpy_subtract(self):
p1 = np.array([10, 20, 30])
p2 = np.array([1, 2, 3])
input_dict = {"P1": p1, "P2": p2}
lines = []
lines.append("Q1=P1-P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 10)
self.assertEqual(output_dict["P1"][1], 20)
self.assertEqual(output_dict["P1"][2], 30)
self.assertEqual(output_dict["P2"][0], 1)
self.assertEqual(output_dict["P2"][1], 2)
self.assertEqual(output_dict["P2"][2], 3)
self.assertEqual(output_dict["Q1"][0], 9)
self.assertEqual(output_dict["Q1"][1], 18)
self.assertEqual(output_dict["Q1"][2], 27)
def test_numpy_multiply(self):
p1 = np.array([1, 2, 3, 4])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1*3")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 1)
self.assertEqual(output_dict["P1"][1], 2)
self.assertEqual(output_dict["P1"][2], 3)
self.assertEqual(output_dict["P1"][3], 4)
self.assertEqual(output_dict["Q1"][0], 3)
self.assertEqual(output_dict["Q1"][1], 6)
self.assertEqual(output_dict["Q1"][2], 9)
self.assertEqual(output_dict["Q1"][3], 12)
def test_numpy_divide(self):
p1 = np.array([10, 20, 30])
p2 = np.array([2, 3, 4])
input_dict = {"P1": p1, "P2": p2}
lines = []
lines.append("Q1=P1/P2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 10)
self.assertEqual(output_dict["P1"][1], 20)
self.assertEqual(output_dict["P1"][2], 30)
self.assertEqual(output_dict["P2"][0], 2)
self.assertEqual(output_dict["P2"][1], 3)
self.assertEqual(output_dict["P2"][2], 4)
self.assertEqual(output_dict["Q1"][0], 5)
self.assertAlmostEqual(output_dict["Q1"][1], 6.666666666)
self.assertAlmostEqual(output_dict["Q1"][2], 7.5)
def test_numpy_mod(self):
p1 = np.array([8, 13])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1%3")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 8)
self.assertEqual(output_dict["P1"][1], 13)
self.assertEqual(output_dict["Q1"][0], 2)
self.assertEqual(output_dict["Q1"][1], 1)
def test_numpy_bitand(self):
p1 = np.array([54254323, 92364])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1&213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 54254323)
self.assertEqual(output_dict["P1"][1], 92364)
self.assertEqual(output_dict["Q1"][0], 213155)
self.assertEqual(output_dict["Q1"][1], 82048)
def test_numpy_bitor(self):
p1 = np.array([54254323, 92364])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1|213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 54254323)
self.assertEqual(output_dict["P1"][1], 92364)
self.assertEqual(output_dict["Q1"][0], 54254579)
self.assertEqual(output_dict["Q1"][1], 223727)
def test_numpy_bitxor(self):
p1 = np.array([54254323, 92364])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=P1^213411")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 54254323)
self.assertEqual(output_dict["P1"][1], 92364)
self.assertEqual(output_dict["Q1"][0], 54041424)
self.assertEqual(output_dict["Q1"][1], 141679)
def test_numpy_abs(self):
p1 = np.array([-3, 45])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=ABS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], -3)
self.assertEqual(output_dict["P1"][1], 45)
self.assertEqual(output_dict["Q1"][0], 3)
self.assertEqual(output_dict["Q1"][1], 45)
def test_numpy_int(self):
p32 = np.array([3.141, -45.39])
input_dict = {"P32": p32}
lines = []
lines.append("Q3=INT(P32)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P32"][0], 3.141)
self.assertEqual(output_dict["P32"][1], -45.39)
self.assertEqual(output_dict["Q3"][0], 3)
self.assertEqual(output_dict["Q3"][1], -46)
def test_numpy_sqrt(self):
p45 = np.array([25, 2])
input_dict = {"P45": p45}
lines = []
lines.append("Q2=SQRT(P45)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P45"][0], 25)
self.assertEqual(output_dict["P45"][1], 2)
self.assertEqual(output_dict["Q2"][0], 5)
self.assertAlmostEqual(output_dict["Q2"][1], 1.414213562)
def test_numpy_exp(self):
p1 = np.array([33, 18])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=EXP(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 33)
self.assertEqual(output_dict["P1"][1], 18)
self.assertAlmostEqual(output_dict["Q1"][0], 214643579785916.06)
self.assertAlmostEqual(output_dict["Q1"][1], 65659969.13733051)
def test_numpy_ln(self):
p1 = np.array([33, 18])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=LN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 33)
self.assertEqual(output_dict["P1"][1], 18)
self.assertAlmostEqual(output_dict["Q1"][0], 3.4965075614)
self.assertAlmostEqual(output_dict["Q1"][1], 2.8903717578)
def test_numpy_sin_deg(self):
p1 = np.array([30, -66])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=SIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 30)
self.assertEqual(output_dict["P1"][1], -66)
self.assertAlmostEqual(output_dict["Q1"][0], 0.5)
self.assertAlmostEqual(output_dict["Q1"][1], -0.913545457)
def test_numpy_sin_rad(self):
p1 = np.array([30, -66])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=SIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 30)
self.assertEqual(output_dict["P1"][1], -66)
self.assertAlmostEqual(output_dict["Q1"][0], -0.988031624)
self.assertAlmostEqual(output_dict["Q1"][1], 0.026551154)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_cos_deg(self):
p1 = np.array([60, -63])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=COS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 60)
self.assertEqual(output_dict["P1"][1], -63)
self.assertAlmostEqual(output_dict["Q1"][0], 0.5)
self.assertAlmostEqual(output_dict["Q1"][1], 0.453990499)
def test_numpy_cos_rad(self):
p1 = np.array([60, -63])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=COS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 60)
self.assertEqual(output_dict["P1"][1], -63)
self.assertAlmostEqual(output_dict["Q1"][0], -0.95241298)
self.assertAlmostEqual(output_dict["Q1"][1], 0.985896581)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_tan_deg(self):
p1 = np.array([60, -63])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=TAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 60)
self.assertEqual(output_dict["P1"][1], -63)
self.assertAlmostEqual(output_dict["Q1"][0], 1.732050808)
self.assertAlmostEqual(output_dict["Q1"][1], -1.96261050)
def test_numpy_tan_rad(self):
p1 = np.array([60, -63])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=TAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 60)
self.assertEqual(output_dict["P1"][1], -63)
self.assertAlmostEqual(output_dict["Q1"][0], 0.320040389)
self.assertAlmostEqual(output_dict["Q1"][1], -0.169749752)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_asin_deg(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=ASIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 30)
self.assertAlmostEqual(output_dict["Q1"][1], -12.298218098055672)
def test_numpy_asin_rad(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=ASIN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 0.523598776)
self.assertAlmostEqual(output_dict["Q1"][1], -0.214644397)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_acos_deg(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=ACOS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 60)
self.assertAlmostEqual(output_dict["Q1"][1], 102.298218098)
def test_numpy_acos_rad(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=ACOS(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 1.047197551)
self.assertAlmostEqual(output_dict["Q1"][1], 1.7854407247)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_atan_deg(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=ATAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 2)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 26.565051177)
self.assertAlmostEqual(output_dict["Q1"][1], -12.02430666)
def test_numpy_atan_rad(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1, "I15": 1}
lines = []
lines.append("Q1=ATAN(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 0.463647609)
self.assertAlmostEqual(output_dict["Q1"][1], -0.209863741)
self.assertEqual(output_dict["I15"], 1)
def test_numpy_atan2_deg(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1, "Q0": 2}
lines = []
lines.append("Q1=ATAN2(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 14.036243467934)
self.assertAlmostEqual(output_dict["Q1"][1], -6.079086119)
self.assertEqual(output_dict["Q0"], 2)
def test_numpy_atan2_rad(self):
p1 = np.array([0.5, -0.213])
input_dict = {"P1": p1, "I15": 1, "Q0": 2}
lines = []
lines.append("Q1=ATAN2(P1)")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"][0], 0.5)
self.assertEqual(output_dict["P1"][1], -0.213)
self.assertAlmostEqual(output_dict["Q1"][0], 0.244978663127)
self.assertAlmostEqual(output_dict["Q1"][1], -0.106100068)
self.assertEqual(output_dict["I15"], 1)
self.assertEqual(output_dict["Q0"], 2)
def test_numpy_if_all_true(self):
p1 = np.array([42, 45])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1>40)")
lines.append("P2=222")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 4)
self.assertEqual(output_dict["P1"][0], 42)
self.assertEqual(output_dict["P1"][1], 45)
self.assertEqual(output_dict["P2"], 222)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_numpy_if_all_false(self):
p1 = np.array([42, 45])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1<40)")
lines.append("P2=222")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 3)
self.assertEqual(output_dict["P1"][0], 42)
self.assertEqual(output_dict["P1"][1], 45)
self.assertEqual(output_dict["Q1"], 1)
self.assertEqual(output_dict["Q2"], 2)
def test_numpy_if_all_different(self):
p1 = np.array([42, 45])
input_dict = {"P1": p1}
lines = []
lines.append("Q1=1")
lines.append("IF(P1<43)")
lines.append("P2=222")
lines.append("ENDIF")
lines.append("Q2=2")
parser = PMACParser(lines)
self.assertRaises(Exception, parser.parse, input_dict)
def test_numpy_while_all_true(self):
p1 = np.array([20, 40])
p3 = np.array([1, 1])
input_dict = {"P1": p1, "P2": 2, "P3": p3}
lines = []
lines.append("Q1=1")
lines.append("Q2=2")
lines.append("Q1=Q1+1")
lines.append("WHILE(P3<10)")
lines.append("P1=P1+1")
lines.append("P2=P2+2")
lines.append("P3=P3+1")
lines.append("ENDWHILE")
lines.append("Q2=Q2+1")
parser = PMACParser(lines)
output_dict = parser.parse(input_dict)
self.assertEqual(len(output_dict), 5)
self.assertEqual(output_dict["P1"][0], 29)
self.assertEqual(output_dict["P1"][1], 49)
self.assertEqual(output_dict["P2"], 20)
self.assertEqual(output_dict["P3"][0], 10)
self.assertEqual(output_dict["P3"][1], 10)
self.assertEqual(output_dict["Q1"], 2)
self.assertEqual(output_dict["Q2"], 3)
def test_numpy_while_different(self):
p1 = np.array([20, 40])
p3 = np.array([1, 2])
input_dict = {"P1": p1, "P2": 2, "P3": p3}
lines = []
lines.append("Q1=1")
lines.append("Q2=2")
lines.append("Q1=Q1+1")
lines.append("WHILE(P3<10)")
lines.append("P1=P1+1")
lines.append("P2=P2+2")
lines.append("P3=P3+1")
lines.append("ENDWHILE")
lines.append("Q2=Q2+1")
parser = PMACParser(lines)
self.assertRaises(Exception, parser.parse, input_dict)
def test_real_example2_numpy(self):
p1 = np.array([21, 41])
p2 = np.array([21.5, 41.5])
p3 = np.array([22, 42])
p4 = np.array([22.5, 42.5])
p5 = np.array([23, 43])
p6 = np.array([23.5, 43.5])
p7 =
|
np.array([24, 44])
|
numpy.array
|
# Author: <NAME>
# Date: February 8, 2021
# Purpose: Collection of custom functions for running CMA simulations of KdV,
# analysis functions, and renormalization scripts. Generates some images
# from past papers, but not all (didn't want / need to duplicate everything)
# Translation of code from UW PhD in Matlab.
# import libraries
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib import animation
import glob
import re
def fftnorm(u_full):
"""Computes normalized FFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete FFT is to be computed
Returns
-------
normalizedFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
N = u_full.shape[0]
normalizedFFT = np.fft.fft(u_full)*1/N
return normalizedFFT
def ifftnorm(u_full):
"""Computes normalized IFFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete IFFT is to be computed
Returns
-------
normalizedIFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
N = u_full.shape[0]
normalizedIFFT = np.real(np.fft.ifft(u_full)*N)
return normalizedIFFT
def convolutionSumKdV(u,v,alpha):
"""Computes convolution sum associated with RHS of KdV ODE
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k} u_i v_j
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u : 1D Numpy Array (N,)
One of the two vectors being convolved
v : 1D Numpy Array (N,)
One of the two vectors being convolved
alpha : float
Degree of nonlinearity in KdV
Returns
-------
convo : 1D Numpy Array (N,)
Convolution of the two vectors
"""
# generate array of wavenumbers
L = u.shape[0]
k = np.concatenate([np.arange(0,L/2),np.arange(-L/2,0)])
if v.shape[0]!=L:
raise NameError('u and v must be the same length.')
# compute double sum in real space, then apply scalar multiplier
convo = fftnorm(ifftnorm(u)*ifftnorm(v))
convo = -alpha/2*1j*k*convo
return convo
# RHS: Right hand side functions for CMA and non-renormalized KdV
def markovKdV(u,M,alpha):
"""Computes nonlinear part of Markov term in KdV
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k} u_i v_j
where the sum of i and j is over a "full" system with M positive modes (user specified)
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u : 1D Numpy Array (N,)
Positive modes of state vector whose RHS is being computed
M : int
Number of positive modes in "full" model for intermediary calculations
alpha : float
Degree of nonlinearity in KdV
Returns
-------
nonlin0 : 1D Numpy Array (2*M,)
Nonlinear part of Markov term for given state vector
u_full : 1D Numpy array (2*M,)
"full" state vector for use in later computations
"""
# construct full Fourier vector from only the positive modes
u_full = np.zeros(2*M) +1j*np.zeros(2*M)
u_full[0:u.shape[0]] = u
u_full[2*M-u.shape[0]+1:] = np.conj(np.flip(u[1:]))
# compute the convolution sum
nonlin0 = convolutionSumKdV(u_full,u_full,alpha)
return nonlin0,u_full
def tModelKdV(u_full,nonlin0,alpha,F_modes):
"""Computes t-model term in KdV
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k, i and j in F} u_i v_j
where the sum of i and j is over a "full" system with M positive modes (user specified)
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u_full : Numpy array (2M,1)
Current state of u in full form
nonlin0 : Numpy array (2M,1)
Markov term (for convolving)
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
Returns
-------
nonlin1 : 1D Numpy Array (2*M,)
t-model term
uuStar : 1D Numpy array (2*M,)
unresolved modes of state vector convolved with itself
"""
uuStar = np.copy(nonlin0)
uuStar[F_modes] = 0
nonlin1 = 2*convolutionSumKdV(u_full, uuStar, alpha)
return nonlin1,uuStar
def t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon):
"""Computes second order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u_full : Numpy array (2M,1)
Current state of u in full form
nonlin0 : Numpy array (2M,1)
Markov term (for convolving)
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
Returns
-------
nonlin2 : 1D Numpy Array (2*M,)
t2-model term
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
A, AStar, B, BStar, C, CStar, D, DStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
"""
# compute inner convolutions
uu = np.copy(nonlin0)
uu[G_modes] = 0
uk3 = k**3*u_full
A = k**3*uu
AStar = k**3*uuStar
B = convolutionSumKdV(1j*epsilon**2*uk3+uu,u_full,alpha)
BStar = np.copy(B)
B[G_modes] = 0
BStar[F_modes] = 0
C = convolutionSumKdV(uuStar,u_full,alpha)
CStar = np.copy(C)
C[G_modes] = 0
CStar[F_modes] = 0
D = convolutionSumKdV(uuStar,uuStar,alpha)
DStar = np.copy(D)
D[G_modes] = 0
DStar[F_modes] = 0
# compute actual term
nonlin2 = -2*convolutionSumKdV(u_full,1j*epsilon**2*AStar - 2*BStar + 2*CStar,alpha) - 2*D
return nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar
def t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar):
"""Computes third order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
u_full : Numpy array (2M,1)
Current state of u in full form
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
A, AStar, B, BStar, C, CStar, DStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
Returns
-------
nonlin3 : 1D Numpy Array (2*M,)
t3-model term
uk6 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^6
nonlin3,uk6,E,EStar,F,FStar
E, EStar, F, FStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
"""
# compute internal convolutions
uk6 = k**3*uk3
E = convolutionSumKdV(1j*epsilon**2*uk3+uu,1j*epsilon**2*uk3+uu,alpha)
EStar = np.copy(E)
E[G_modes] = 0
EStar[F_modes] = 0
F = convolutionSumKdV(uuStar,1j*epsilon**2*uk3+uu,alpha)
FStar = np.copy(F)
F[G_modes] = 0
FStar[F_modes] = 0
int1 = -2*BStar+CStar
int2 = (convolutionSumKdV(u_full,
-epsilon**4*uk6
+1j*epsilon**2*(A+AStar)
+2*(B-2*C)
+2*(CStar-2*BStar),
alpha))
int2[F_modes] = 0
int3 = EStar-FStar
int4 = np.copy(DStar)
int5 = CStar-BStar
# compute actual 3rd order term
nonlin3 = (2*convolutionSumKdV(u_full,-k**3*epsilon**4*AStar
+2*1j*epsilon**2*k**3*int1
+2*int2+2*int3+2*int4,alpha)
+6*convolutionSumKdV(uuStar,1j*epsilon**2*AStar + 2*int5,alpha))
return nonlin3,uk6,E,EStar,F,FStar
def t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar):
"""Computes fourth order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
u_full : Numpy array (2M,1)
Current state of u in full form
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
uk6 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^6
A, AStar, B, BStar, C, CStar, DStar, E, EStar, F, FStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
Returns
-------
nonlin4 : 1D Numpy Array (2*M,)
t4-model term
"""
# compute internal convolutions
internal1 = (convolutionSumKdV(u_full,-epsilon**4*uk6+1j*epsilon**2*(A+AStar)
+2*B-4*C-4*BStar+2*CStar,alpha))
internal1[F_modes] = 0
internal2 = (1j*epsilon**2*k**3*convolutionSumKdV(u_full,-3*epsilon**4*uk6
+1j*epsilon**2*(3*A+AStar)
-2*(-3*B+5*C)
+2*(-3*BStar+CStar),alpha))
internal2[F_modes] = 0
auxiliary1 = 2*convolutionSumKdV(u_full,epsilon**4*uk6-1j*epsilon**2*(A+3*AStar)
+2*(3*C-B)+2*(5*BStar-3*CStar),alpha)
auxiliary1[G_modes] = 0
auxiliary2 = 2*convolutionSumKdV(u_full,-3*epsilon**4*uk6+1j*epsilon**2*(3*A+AStar)
+2*(3*B-5*C)+2*(-3*BStar+CStar),alpha)
auxiliary2[F_modes] = 0
internal3 = convolutionSumKdV(u_full,1j*k**3*uk6*epsilon**6
+k**3*epsilon**4*(A-AStar)
+2*1j*epsilon**2*k**3*(3*C-B)
+2*1j*epsilon**2*k**3*(-3*BStar+CStar)
+auxiliary1+auxiliary2
-2*(E-2*F)
+2*(3*EStar-2*FStar)
-6*D+2*DStar,alpha)
internal3[F_modes]= 0
internal4 = convolutionSumKdV(1j*epsilon**2*uk3+uu,3*epsilon**4*uk6-1j*epsilon**2*(3*A+AStar)
+2*(-3*B+5*C)+2*(3*BStar-CStar),alpha)
internal4[F_modes] = 0
internal5 = convolutionSumKdV(uuStar,-epsilon**4*uk6+1j*epsilon**2*(A+3*AStar)
+2*B-6*C-10*BStar+6*CStar,alpha)
internal5[F_modes] = 0
# compute actual fourth order term
nonlin4 = (2*convolutionSumKdV(u_full,-1j*epsilon**6*k**6*AStar
+2*k**6*epsilon**4*(3*BStar-CStar)
+2*internal2
+2*internal3
+2*internal4
-2*k**3*1j*epsilon**2*(2*FStar-3*EStar)
+2*k**3*1j*epsilon**2*DStar
+2*internal5,alpha)
+8*convolutionSumKdV(uuStar,-k**3*epsilon**4*AStar
+2*1j*epsilon**2*k**3*(-2*BStar+CStar)
+2*internal1
+2*(EStar-FStar)
+2*DStar,alpha)
-48*convolutionSumKdV(BStar,1j*epsilon**2*AStar+2*CStar,alpha)
+6*convolutionSumKdV(1j*epsilon**2*AStar+2*(BStar+CStar),
1j*epsilon**2*AStar+2*(BStar+CStar),alpha)
)
nonlin4 = -nonlin4
return nonlin4
def RHSKdV(t,u,params):
"""
Computes the RHS for a full KdV or ROM simulation. For use in solver.
Parameters
----------
t : float
Current time
u : Numpy array (N,)
Current state vector
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
M : float, number of positive modes in "full" intermediate compuation
alpha : float, degree of nonlinearity in KdV
epsilon : float, size of linear term (stiffness)
tau : float, time decay modifier
coeffs : Numpy array, renormalization coefficients for ROM (None if no ROM)
Returns
-------
RHS : 1D Numpy array (N,)
Derivative of each positive mode in state vector
"""
# extract parameters from dictionary
N = params['N']
M = params['M']
alpha = params['alpha']
epsilon = params['epsilon']
tau = params['tau']
coeffs = params['coeffs']
# construct wavenumber array
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Linear and Markov term
nonlin0,u_full = markovKdV(u,M,alpha)
RHS = 1j*k[0:N]**3*epsilon**2*u + nonlin0[0:N]
if (np.any(coeffs == None)):
order = 0
else:
order = coeffs.shape[0]
if (order >= 1):
# compute t-model term
# define which modes are resolved / unresolved in full array
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
# compute t-model term
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
RHS = RHS + coeffs[0]*nonlin1[0:N]*t**(1-tau)
order = coeffs.shape[0]
if (order >= 2):
# compute t2-model term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
RHS = RHS + coeffs[1]*nonlin2[0:N]*t**(2*(1-tau))
if (order >= 3):
# compute t3-model term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
RHS = RHS + coeffs[2]*nonlin3[0:N]*t**(3*(1-tau))
if (order == 4):
# compute t4-model term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
RHS = RHS + coeffs[3]*nonlin4[0:N]*t**(4*(1-tau))
return RHS
def getMass(u,N):
"""Computes mass in first N modes for all timesteps from solution array u
Parameters
----------
u : 2D Numpy Array (M,tList)
Positive modes of state vector for all timesteps
N : int
Number of positive modes to include in mass measurement
Returns
-------
mass : 1D Numpy Array (tList,)
Energy in first N modes at all timesteps
"""
mass = np.sum(2*(abs(u[0:N,]))**2,0)
return mass
def runSim(params):
"""
Runs an actual ROM or non-ROM simulation of KdV
Parameters
----------
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
M : float, number of positive modes in "full" intermediate compuation
alpha : float, degree of nonlinearity in KdV
epsilon : float, size of linear term (stiffness)
tau : float, time decay modifier
coeffs : Numpy array, renormalization coefficients for ROM (None if no ROM)
IC : function handle, initial condition of simulation
endtime : float, final time to simulate to
timesteps: Numpy array, specific timesteps for which to save solution
Returns
-------
uSim : ODE solver output
Output solution from sp.integrate.solve_ivp (includes state vector at all timesteps, time vector, etc.)
"""
# unpack parameters from dictionary
N = params['N']
IC = params['IC']
endtime = params['endtime']
timesteps = params['timesteps']
# generate initial condition
x = np.linspace(0,2*np.pi-2*np.pi/(2*N),2*N)
y = IC(x)
uFull = fftnorm(y)
u = uFull[0:N]
# define RHS in form appropriate for solve_ivp
def myRHS(t,y):
out = RHSKdV(t,y,params)
return out
# solve the IVP
uSim = sp.integrate.solve_ivp(fun = myRHS, t_span = [0,endtime], y0 = u,method = "BDF", t_eval = timesteps)
return uSim
def makeRealSpace(u,N):
"""Takes a completed simulation and finds the real space solution at all timesteps for a chosen subset of modes
Parameters
----------
u : Numpy array (M,t)
Output of simulation giving energy in first M positive modes for all timesteps t
N : int
Number of positive modes to use in real space
Returns
-------
x : Numpy vector (2xN,1)
x-grid for plotting purposes
uReal : Numpy array (2xN,t)
Real space solution at all times
"""
# identify shapes of arrays
uShape = u.shape
numTimes = uShape[1]
# drop modes we don't wish to keep
uNew = u[0:N,:]
# generate full vector (with negative modes)
uFull = np.zeros((2*N,numTimes)) + 1j*0
uFull[0:N,:] = uNew
uFull[2*N-N+1:,:] = np.conj(np.flip(uNew[1:,:],0))
# initialize output
uReal = np.zeros(uFull.shape)
# take inverse transform for each timestep
# NOTE: is there a vectorized way to do this?
for i in np.arange(0,numTimes):
uReal[:,i] = ifftnorm(uFull[:,i])
return uReal
def makeAnimations(uList,t,legendList):
"""
Creates an animation from a list of simulations
Parameters
----------
uList : List of Numpy arrays of size (N,T)
Set of state vector evolutions to animate
t : Numpy array (T,)
Timesteps associated with simulations (must all be the same)
legendList : List of strings
Labels for each simulation
Returns
-------
anim : animation object
output from animation.FuncAnimation
"""
# identify the resolution to use for plots and generate x grid
N = min([x.shape[0] for x in uList])
xgrid = np.linspace(0,2*np.pi*(2*N-1)/(2*N),2*N)
# generate real space solutions
realSols = [makeRealSpace(x,N) for x in uList]
# initialize figure
myFig = plt.figure()
ax = plt.subplot()
ax.axis(xmin = 0,xmax = 2*np.pi-np.pi/N,ymin = -2, ymax = 4)
# create empty list of lines to populate each iteration
lineList = [ax.plot([],[]) for i in range(len(uList))]
# define function to draw each frame
def makeFrame(n):
for i in range(len(uList)):
lineList[i][0].set_data(xgrid,realSols[i][:,n])
plt.title('t = '+str(round(t[n],1)))
plt.legend(legendList, loc = "upper right")
return lineList
# generate animation
anim = animation.FuncAnimation(fig = myFig,func = makeFrame,frames = t.shape[0])
return anim
def renormalize(fullM, endtime, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a single simulation. If the
simulation doesn't yet exist, it creates it
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
err : Dict
Contains least-squares error for each fit for each model and resolution
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p'),uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p'),tFull)
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],1))
coeffsArray2 = np.zeros((Nlist.shape[0],2))
coeffsArray3 = np.zeros((Nlist.shape[0],3))
coeffsArray4 = np.zeros((Nlist.shape[0],4))
coeffsArray2only = np.zeros((Nlist.shape[0],1))
coeffsArray24only = np.zeros((Nlist.shape[0],2))
# recover number of timesteps
numSteps = tFull.shape[0]
# initialize least squares error output
err = {"t-model" : np.zeros((Nlist.shape[0],1)),
"t2-model" : np.zeros((Nlist.shape[0],1)),
"t3-model" : np.zeros((Nlist.shape[0],1)),
"t4-model" : np.zeros((Nlist.shape[0],1)),
"t2-model only" : np.zeros((Nlist.shape[0],1)),
"t2- and t4-models" : np.zeros((Nlist.shape[0],1))}
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy = np.zeros((N,numSteps))
R1Energy = np.zeros((N,numSteps))
R2Energy = np.zeros((N,numSteps))
R3Energy = np.zeros((N,numSteps))
R4Energy = np.zeros((N,numSteps))
# plug exact solution into exact RHS and all ROM terms and find energy contribution of each
for i in np.arange(0,numSteps):
# exact RHS
exactRHS,dummyU = markovKdV(uFull[:,i],int(fullM*3/2),alpha)
exactEnergy[:,i] = np.real(exactRHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(exactRHS[0:N])*uFull[0:N,i])
# Markov RHS
nonlin0,u_full = markovKdV(uFull[0:N,i],M,alpha)
R0RHS = nonlin0
R0Energy[:,i] = np.real(R0RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R0RHS[0:N])*uFull[0:N,i])
# First order RHS term
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
R1RHS = nonlin1*tFull[i]**(1-tau)
R1Energy[:,i] = np.real(R1RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R1RHS[0:N])*uFull[0:N,i])
# Second order RHS term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
R2RHS = nonlin2*tFull[i]**(2*(1-tau))
R2Energy[:,i] = np.real(R2RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R2RHS[0:N])*uFull[0:N,i])
# Third order RHS term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
R3RHS = nonlin3*tFull[i]**(3*(1-tau))
R3Energy[:,i] = np.real(R3RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R3RHS[0:N])*uFull[0:N,i])
# Fourth order RHS term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
R4RHS = nonlin4*tFull[i]**(4*(1-tau))
R4Energy[:,i] = np.real(R4RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R4RHS[0:N])*uFull[0:N,i])
if j == 0:
R0Energy0 = np.copy(R0Energy)
R1Energy0 = np.copy(R1Energy)
R2Energy0 = np.copy(R2Energy)
R3Energy0 = np.copy(R3Energy)
R4Energy0 = np.copy(R4Energy)
##################################################
# Use least-squares fit to identify coefficients #
##################################################
# t-model coefficient
coeffsArray1[j,:] = np.sum((exactEnergy - R0Energy)*R1Energy)/np.sum(R1Energy*R1Energy)
err["t-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray1[j,0]*R1Energy)**2)
# t2-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy))]))
coeffsArray2[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t2-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray2[j,0]*R1Energy - coeffsArray2[j,1]*R2Energy)**2)
# t3-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy))]))
coeffsArray3[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t3-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray3[j,0]*R1Energy - coeffsArray3[j,1]*R2Energy - coeffsArray3[j,2]*R3Energy)**2)
# t4-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy),np.sum(R1Energy*R4Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy),np.sum(R3Energy*R4Energy)],
[np.sum(R4Energy*R1Energy),np.sum(R4Energy*R2Energy),np.sum(R4Energy*R3Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
coeffsArray4[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t4-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray4[j,0]*R1Energy - coeffsArray4[j,1]*R2Energy - coeffsArray4[j,2]*R3Energy - coeffsArray4[j,3]*R4Energy)**2)
# t2-model with *no* t-model
coeffsArray2only[j,:] = np.sum((exactEnergy - R0Energy)*R2Energy)/np.sum(R2Energy*R2Energy)
err["t2-model only"][j] = np.sum((exactEnergy - R0Energy - coeffsArray2only[j,0]*R2Energy)**2)
# t2-model and t4-model with *no* t-model or t3-model
LSMatrix = (np.array([[np.sum(R2Energy*R2Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R4Energy*R2Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
coeffsArray24only[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t2- and t4-models"][j] = np.sum((exactEnergy - R0Energy - coeffsArray24only[j,0]*R2Energy - coeffsArray24only[j,1]*R4Energy)**2)
# Generate plots if desired
if plots:
# Plot 1: Qualitative comparison of each term contributing to energy movement
N = Nlist[0]
fig1, ax1 = plt.subplots(3,2)
ax1[0,0].plot(tFull,np.sum(exactEnergy[0:N,:],0))
ax1[0,0].set_title("Exact Energy Decay")
ax1[0,1].plot(tFull,np.sum(R0Energy0[0:N,:],0))
ax1[0,1].set_title("Markov Energy Decay")
ax1[1,0].plot(tFull,np.sum(R2Energy0[0:N,:],0))
ax1[1,0].set_title("R2 Energy Decay")
ax1[1,1].plot(tFull,np.sum(R1Energy0[0:N,:],0))
ax1[1,1].set_title("R1 Energy Decay")
ax1[2,0].plot(tFull,np.sum(R4Energy0[0:N,:],0))
ax1[2,0].set_title("R4 Energy Decay")
ax1[2,1].plot(tFull,np.sum(R3Energy0[0:N,:],0))
ax1[2,1].set_title("R3 Energy Decay")
fig1.suptitle("N = "+str(N)+" Energy Decays")
plt.tight_layout()
# remove axis labels to not crowd plots (since only qualitative comparisons desired)
for i in range(0,3):
for j in range(0,2):
#ax1[i,j].tick_params(labelbottom=False,labelleft=False)
ax1[i,j].tick_params(labelleft=False)
# compute best fit lines for coefficients in log-log space
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
fig2, ax2 = plt.subplots(2,2)
# t-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray1[:,0])))
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray1[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist))
fitLines["t-model"][:] = np.array([slope,np.exp(intercept),r_value])
# t2-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray2[:,0])),color="red")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "red")
fitLines["t2-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray2[:,1])),color="red")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "red")
fitLines["t2-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
# t3-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,0])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,1])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,0].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,2])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,2])))
ax2[1,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
# t4-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,0])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,1])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,0].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,2])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,2])))
ax2[1,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,1].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,3])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,3])))
ax2[1,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][3,:] = np.array([slope,np.exp(intercept),r_value])
# t2-model alone
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray2only[:,0])),color="cyan")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2only[:,0])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "cyan")
fitLines["t2-model only"][:] = np.array([slope,np.exp(intercept),r_value])
# t2- and t4-model alone
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray24only[:,0])),color="black")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,0])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "black")
fitLines["t2- and t4-models"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,1].scatter(np.log(Nlist),np.log(abs(coeffsArray24only[:,1])),color="black")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,1])))
ax2[1,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "black")
fitLines["t2- and t4-models"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,0].set_title("t-model")
ax2[0,1].set_title("t2-model")
ax2[1,0].set_title("t3-model")
ax2[1,1].set_title("t4-model")
customLines = [plt.Line2D([0],[0], color = "blue"),
plt.Line2D([0],[0], color = "red"),
plt.Line2D([0],[0], color = "green"),
plt.Line2D([0],[0], color = "purple"),
plt.Line2D([0],[0], color = "cyan"),
plt.Line2D([0],[0], color = "black")]
ax2[0,1].legend(customLines,["First Order Model","Second Order Model",
"Third Order Model","Fourth Order Model",
"Only Second Order","Second and Fourth Order"],
prop = {"size":5})
fig2.suptitle("Renormalization Coefficients (log(a) vs log(N))")
plt.subplots_adjust(right=0.7)
plt.tight_layout()
# calculate best fit lines if plotting didn't occur
else:
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
# t-model
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray1[:,0])))
fitLines["t-model"][:] = np.array([slope,np.exp(intercept),r_value])
# second order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,0])))
fitLines["t2-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,1])))
fitLines["t2-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
# third order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,0])))
fitLines["t3-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,1])))
fitLines["t3-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,2])))
fitLines["t3-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
# fourth order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,0])))
fitLines["t4-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,1])))
fitLines["t4-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,2])))
fitLines["t4-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,3])))
fitLines["t4-model"][3,:] = np.array([slope,np.exp(intercept),r_value])
# only t2-model
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2only[:,0])))
fitLines["t2-model only"][:] = np.array([slope,np.exp(intercept),r_value])
# only t2- and t4-models
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,0])))
fitLines["t2- and t4-models"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,1])))
fitLines["t2- and t4-models"][1,:] = np.array([slope,np.exp(intercept),r_value])
return coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only,fitLines,err
def scalingLaws(fullM, endtime, Nlist, Mlist, epsilonList, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a simulations with a range of
epsilon values.
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilonList : list of floats
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# initialize output arrays
c1 = np.zeros((len(Nlist),1,len(epsilonList)))
c2 = np.zeros((len(Nlist),2,len(epsilonList)))
c3 = np.zeros((len(Nlist),3,len(epsilonList)))
c4 = np.zeros((len(Nlist),4,len(epsilonList)))
c2only = np.zeros((len(Nlist),1,len(epsilonList)))
c24only = np.zeros((len(Nlist),2,len(epsilonList)))
# loop through all epsilon values
for i in np.arange(0,len(epsilonList)):
# renormalize for given epsilon value and save results
coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only,fitLines,err = renormalize(fullM = fullM, endtime = endtime, Nlist = Nlist, Mlist = Mlist, epsilon = epsilonList[i], alpha = alpha, tau = tau, timesteps = timesteps, IC = IC, plots = False)
c1[:,:,i] = coeffsArray1
c2[:,:,i] = coeffsArray2
c3[:,:,i] = coeffsArray3
c4[:,:,i] = coeffsArray4
c2only[:,:,i] = coeffsArray2only
c24only[:,:,i] = coeffsArray24only
# pack results into dictionary for output
coefficients = {"t-model" : c1,
"t2-model" : c2,
"t3-model" : c3,
"t4-model" : c4,
"t2-model only" : c2only,
"t2- and t4-models" : c24only}
# initialize output with best fit scaling laws
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
# find the scaling laws for each coefficient
# t-model coefficient
fitLines["t-model"][0,:] = epsilonNscalingLaw(c1[:,0,:],Nlist,epsilonList)
# Second order model coefficients
fitLines["t2-model"][0,:] = epsilonNscalingLaw(c2[:,0,:],Nlist,epsilonList)
fitLines["t2-model"][1,:] = epsilonNscalingLaw(c2[:,1,:],Nlist,epsilonList)
# Third order model coefficients
fitLines["t3-model"][0,:] = epsilonNscalingLaw(c3[:,0,:],Nlist,epsilonList)
fitLines["t3-model"][1,:] = epsilonNscalingLaw(c3[:,1,:],Nlist,epsilonList)
fitLines["t3-model"][2,:] = epsilonNscalingLaw(c3[:,2,:],Nlist,epsilonList)
# Fourth order model coefficients
fitLines["t4-model"][0,:] = epsilonNscalingLaw(c4[:,0,:],Nlist,epsilonList)
fitLines["t4-model"][1,:] = epsilonNscalingLaw(c4[:,1,:],Nlist,epsilonList)
fitLines["t4-model"][2,:] = epsilonNscalingLaw(c4[:,2,:],Nlist,epsilonList)
fitLines["t4-model"][3,:] = epsilonNscalingLaw(c4[:,3,:],Nlist,epsilonList)
# Only t2-model coefficient
fitLines["t2-model only"][0,:] = epsilonNscalingLaw(c2only[:,0,:],Nlist,epsilonList)
# Only t2- and t4-models coefficients
fitLines["t2- and t4-models"][0,:] = epsilonNscalingLaw(c24only[:,0,:],Nlist,epsilonList)
fitLines["t2- and t4-models"][1,:] = epsilonNscalingLaw(c24only[:,1,:],Nlist,epsilonList)
# make plots
fig1,ax1 = plt.subplots(1,2)
fig2,ax2 = plt.subplots(2,2)
fig3,ax3 = plt.subplots(3,2)
fig4,ax4 = plt.subplots(4,2)
fig5,ax5 = plt.subplots(1,2)
fig6,ax6 = plt.subplots(2,2)
# loop through epsilon values
for i in np.arange(len(epsilonList)):
# t-model coefficient
ax1[0].scatter(np.log(Nlist),np.log(-c1[:,0,i]))
# Second order model coefficients
ax2[0,0].scatter(np.log(Nlist),np.log(-c2[:,0,i]))
ax2[1,0].scatter(np.log(Nlist),np.log(-c2[:,1,i]))
# Third order model coefficients
ax3[0,0].scatter(np.log(Nlist),np.log(-c3[:,0,i]))
ax3[1,0].scatter(np.log(Nlist),np.log(-c3[:,1,i]))
ax3[2,0].scatter(np.log(Nlist),np.log(-c3[:,2,i]))
# Fourth order model coefficients
ax4[0,0].scatter(np.log(Nlist),np.log(-c4[:,0,i]))
ax4[1,0].scatter(np.log(Nlist),np.log(-c4[:,1,i]))
ax4[2,0].scatter(np.log(Nlist),np.log(-c4[:,2,i]))
ax4[3,0].scatter(np.log(Nlist),np.log(-c4[:,3,i]))
# Only t2-model
ax5[0].scatter(np.log(Nlist),np.log(-c2only[:,0,i]))
# Only t2- and t4-models
ax6[0,0].scatter(np.log(Nlist),np.log(-c24only[:,0,i]))
ax6[1,0].scatter(np.log(Nlist),np.log(-c24only[:,1,i]))
# plot best fit lines
myEps = epsilonList[i]
myFit = fitLines["t-model"][0,:]
ax1[0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model"][0,:]
ax2[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model"][1,:]
ax2[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][0,:]
ax3[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][1,:]
ax3[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][2,:]
ax3[2,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][0,:]
ax4[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][1,:]
ax4[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][2,:]
ax4[2,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][3,:]
ax4[3,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model only"][0,:]
ax5[0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2- and t4-models"][0,:]
ax6[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2- and t4-models"][1,:]
ax6[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
# loop through epsilon values
for j in np.arange(len(Nlist)):
# t-model coefficient
ax1[1].scatter(np.log(epsilonList),np.log(-c1[j,0,:]))
# Second order model coefficients
ax2[0,1].scatter(np.log(epsilonList),np.log(-c2[j,0,:]))
ax2[1,1].scatter(np.log(epsilonList),np.log(-c2[j,1,:]))
# Third order model coefficients
ax3[0,1].scatter(np.log(epsilonList),np.log(-c3[j,0,:]))
ax3[1,1].scatter(np.log(epsilonList),np.log(-c3[j,1,:]))
ax3[2,1].scatter(np.log(epsilonList),np.log(-c3[j,2,:]))
# Fourth order model coefficients
ax4[0,1].scatter(np.log(epsilonList),np.log(-c4[j,0,:]))
ax4[1,1].scatter(np.log(epsilonList),np.log(-c4[j,1,:]))
ax4[2,1].scatter(np.log(epsilonList),np.log(-c4[j,2,:]))
ax4[3,1].scatter(np.log(epsilonList),np.log(-c4[j,3,:]))
# Only t2-model
ax5[1].scatter(np.log(epsilonList),np.log(-c2only[j,0,:]))
# Only t2- and t4-models
ax6[0,1].scatter(np.log(epsilonList),np.log(-c24only[j,0,:]))
ax6[1,1].scatter(np.log(epsilonList),np.log(-c24only[j,1,:]))
# plot best fit lines
myN = Nlist[j]
myFit = fitLines["t-model"][0,:]
ax1[1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model"][0,:]
ax2[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model"][1,:]
ax2[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][0,:]
ax3[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][1,:]
ax3[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][2,:]
ax3[2,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][0,:]
ax4[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][1,:]
ax4[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][2,:]
ax4[2,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][3,:]
ax4[3,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model only"][0,:]
ax5[1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2- and t4-models"][0,:]
ax6[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2- and t4-models"][1,:]
ax6[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
# label all plots
fig1.suptitle("t-model")
ax1[0].set_title("log(a1) vs log(N)")
ax1[0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax1[1].set_title("log(a1) vs log(epsilon)")
ax1[1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig2.suptitle("Second Order Renormalization")
ax2[0,0].set_title("log(a1) vs log(N)")
ax2[1,0].set_title("log(a2) vs log(N)")
ax2[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax2[0,1].set_title("log(a1) vs log(epsilon)")
ax2[1,1].set_title("log(a1) vs log(epsilon)")
ax2[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig3.suptitle("Third Order Renormalization")
ax3[0,0].set_title("log(a1) vs log(N)")
ax3[1,0].set_title("log(a2) vs log(N)")
ax3[2,0].set_title("log(a3) vs log(N)")
ax3[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax3[0,1].set_title("log(a1) vs log(epsilon)")
ax3[1,1].set_title("log(a2) vs log(epsilon)")
ax3[2,1].set_title("log(a3) vs log(epsilon)")
ax3[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig4.suptitle("Fourth Order Renormalization")
ax4[0,0].set_title("log(a1) vs log(N)")
ax4[1,0].set_title("log(a2) vs log(N)")
ax4[2,0].set_title("log(a3) vs log(N)")
ax4[3,0].set_title("log(a4) vs log(N)")
ax4[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax4[0,1].set_title("log(a1) vs log(epsilon)")
ax4[1,1].set_title("log(a2) vs log(epsilon)")
ax4[2,1].set_title("log(a3) vs log(epsilon)")
ax4[3,1].set_title("log(a4) vs log(epsilon)")
ax4[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig5.suptitle("Only t2-Model Renormalization")
ax5[0].set_title("log(a2) vs log(N)")
ax5[0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax5[1].set_title("log(a2) vs log(epsilon)")
ax5[1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig6.suptitle("Second and Fourth Order Renormalization")
ax6[0,0].set_title("log(a2) vs log(N)")
ax6[1,0].set_title("log(a4) vs log(N)")
ax6[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax6[0,1].set_title("log(a2) vs log(epsilon)")
ax6[1,1].set_title("log(a4) vs log(epsilon)")
ax6[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
return coefficients,fitLines
def epsilonNscalingLaw(coeffArray,Nlist,epsilonList):
numEps = len(epsilonList)
numN = len(Nlist)
epsilonTile = np.tile(epsilonList,(numN,1))
Ntile = np.transpose(np.tile(Nlist,(numEps,1)))
LSMatrix = (np.array([[numEps*numN,np.sum(np.log(Ntile)),np.sum(np.log(epsilonTile))],
[np.sum(np.log(Ntile)),np.sum(np.log(Ntile)**2),np.sum(np.log(Ntile)*np.log(epsilonTile))],
[np.sum(np.log(epsilonTile)),np.sum(np.log(Ntile)*np.log(epsilonTile)),np.sum(np.log(epsilonTile)**2)]])
)
LSb = np.array([np.sum(np.log(np.abs(coeffArray))),np.sum(np.log(np.abs(coeffArray))*np.log(Ntile)),np.sum(np.log(np.abs(coeffArray))*np.log(epsilonTile))])
sol = np.linalg.solve(LSMatrix,LSb)
sol[0] = -np.exp(sol[0])
return sol
def findError(compareList,exact,t):
"""
Finds the two norm of the error between a list of ROMs and an exact solution.
Parameters
----------
compareList : List of Numpy arrays of size (N,T)
Set of state vector evolutions to find errors from
exact : Numpy array of size (N,T)
Exact solution for the same timesteps
t : Numpy array (T,)
Timesteps associated with simulations (must all be the same)
Returns
-------
errList : List of Numpy arrays of size (T,1)
Arrays with the two-norm of the error at all timesteps for each ROM
"""
# find the ROM size
N = compareList[0].shape[0]
# generate real space solutions
realSols = [makeRealSpace(x,N) for x in compareList]
exactSol = makeRealSpace(exact,N)
# compute two norm of error at all times
errList =[np.sum((i - exactSol)**2,0) for i in realSols]
return errList
def renormalizeRobust(fullM, endtime, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a single simulation. If the
simulation doesn't yet exist, it creates it
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,tFull)
# recover number of timesteps
numSteps = tFull.shape[0]
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],numSteps - 30,1))
coeffsArray2 = np.zeros((Nlist.shape[0],numSteps - 30,2))
coeffsArray3 = np.zeros((Nlist.shape[0],numSteps - 30,3))
coeffsArray4 = np.zeros((Nlist.shape[0],numSteps - 30,4))
coeffsArray2only = np.zeros((Nlist.shape[0],numSteps - 30,1))
coeffsArray24only = np.zeros((Nlist.shape[0],numSteps - 30,2))
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy = np.zeros((N,numSteps))
R1Energy = np.zeros((N,numSteps))
R2Energy = np.zeros((N,numSteps))
R3Energy = np.zeros((N,numSteps))
R4Energy = np.zeros((N,numSteps))
# plug exact solution into exact RHS and all ROM terms and find energy contribution of each
for i in np.arange(0,numSteps):
# exact RHS
exactRHS,dummyU = markovKdV(uFull[:,i],int(fullM*3/2),alpha)
exactEnergy[:,i] = np.real(exactRHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(exactRHS[0:N])*uFull[0:N,i])
# Markov RHS
nonlin0,u_full = markovKdV(uFull[0:N,i],M,alpha)
R0RHS = nonlin0
R0Energy[:,i] = np.real(R0RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R0RHS[0:N])*uFull[0:N,i])
# First order RHS term
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
R1RHS = nonlin1*tFull[i]**(1-tau)
R1Energy[:,i] = np.real(R1RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R1RHS[0:N])*uFull[0:N,i])
# Second order RHS term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
R2RHS = nonlin2*tFull[i]**(2*(1-tau))
R2Energy[:,i] = np.real(R2RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R2RHS[0:N])*uFull[0:N,i])
# Third order RHS term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
R3RHS = nonlin3*tFull[i]**(3*(1-tau))
R3Energy[:,i] = np.real(R3RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R3RHS[0:N])*uFull[0:N,i])
# Fourth order RHS term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
R4RHS = nonlin4*tFull[i]**(4*(1-tau))
R4Energy[:,i] = np.real(R4RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R4RHS[0:N])*uFull[0:N,i])
##################################################
# Use least-squares fit to identify coefficients #
##################################################
for i in np.arange(30,numSteps):
exactEnergySnip = exactEnergy[:,0:i]
R0EnergySnip = R0Energy[:,0:i]
R1EnergySnip = R1Energy[:,0:i]
R2EnergySnip = R2Energy[:,0:i]
R3EnergySnip = R3Energy[:,0:i]
R4EnergySnip = R4Energy[:,0:i]
# t-model coefficient
coeffsArray1[j,i-30,:] = np.sum((exactEnergySnip - R0EnergySnip)*R1EnergySnip)/np.sum(R1EnergySnip*R1EnergySnip)
# t2-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray2[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t3-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray3[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t4-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip),np.sum(R1EnergySnip*R4EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip),np.sum(R3EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R1EnergySnip),np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R3EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray4[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t2-model with *no* t-model
coeffsArray2only[j,i-30,:] = np.sum((exactEnergySnip - R0EnergySnip)*R2EnergySnip)/np.sum(R2EnergySnip*R2EnergySnip)
# t2-model and t4-model with *no* t-model or t3-model
LSMatrix = (np.array([[np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray24only[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
for ind in np.arange(Nlist.shape[0]):
fig1,ax1 = plt.subplots(2,2)
fig1.suptitle("N = "+str(Nlist[ind]))
ax1[0,0].plot(timesteps[30:],coeffsArray1[ind,:,0],color = "blue")
ax1[0,0].plot(timesteps[30:],coeffsArray2[ind,:,0],color = "red")
ax1[0,0].plot(timesteps[30:],coeffsArray3[ind,:,0],color = "green")
ax1[0,0].plot(timesteps[30:],coeffsArray4[ind,:,0],color = "black")
ax1[0,0].set_title("t-model")
ax1[0,1].plot([],[],color = "blue")
ax1[0,1].plot(timesteps[30:],coeffsArray2[ind,:,1], color = "red")
ax1[0,1].plot(timesteps[30:],coeffsArray3[ind,:,1], color = "green")
ax1[0,1].plot(timesteps[30:],coeffsArray4[ind,:,1], color = "black")
ax1[0,1].plot(timesteps[30:],coeffsArray2only[ind,:,0],color = "cyan")
ax1[0,1].plot(timesteps[30:],coeffsArray24only[ind,:,0], color = "magenta")
ax1[0,1].set_title("t2-model")
ax1[0,1].legend(["First order","Second order","Third order","Fourth order","Only t2","t2 and t4"],prop = {"size":5})
ax1[1,0].plot(timesteps[30:],coeffsArray3[ind,:,2], color = "green")
ax1[1,0].plot(timesteps[30:],coeffsArray4[ind,:,2],color = "black")
ax1[1,0].set_title("t3-model")
ax1[1,1].plot(timesteps[30:],coeffsArray4[ind,:,3], color = "black")
ax1[1,1].plot(timesteps[30:],coeffsArray24only[ind,:,1], color = "magenta")
ax1[1,1].set_title("t4-model")
plt.tight_layout()
return coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only
def renormalizeWindow(fullM, endtime, width, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients using sliding window least squares.
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
width : float
Size of sliding window to use in fitting
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,tFull)
# recover number of timesteps
numSteps = tFull.shape[0]
widthSteps = round(width/(tFull[1]-tFull[0]))
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,1))
coeffsArray2 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,2))
coeffsArray3 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,3))
coeffsArray4 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,4))
coeffsArray2only = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,1))
coeffsArray24only = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,2))
exact1 = np.zeros((Nlist.shape[0],1))
exact2 = np.zeros((Nlist.shape[0],2))
exact3 = np.zeros((Nlist.shape[0],3))
exact4 = np.zeros((Nlist.shape[0],4))
exact2o = np.zeros((Nlist.shape[0],1))
exact24o = np.zeros((Nlist.shape[0],2))
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy =
|
np.zeros((N,numSteps))
|
numpy.zeros
|
# test_arnoldi_sampling.py
import unittest
import numpy as np
import chaospy as cp
from pystatreduce.stochastic_collocation import StochasticCollocation
from pystatreduce.quantity_of_interest import QuantityOfInterest
from pystatreduce.stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
import pystatreduce.examples as examples
class ArnoldiSamplingTest(unittest.TestCase):
def test_modified_GramSchmidt_fullRank(self):
# Generate a random set of vectors and use modGramSchmidt to
# orthogonalize them
systemsize = 10
# Initialize ArnoldiSampling object
alpha = 1.0
num_sample = 4
arnoldi = ArnoldiSampling(alpha, num_sample)
# Create arrays for modified_GramSchmidt
Z = np.random.rand(systemsize, num_sample)
H = np.zeros([num_sample, num_sample-1])
# Populate Z
for i in range(-1, num_sample-1):
arnoldi.modified_GramSchmidt(i, H, Z)
# Check that the vectors are unit normal
self.assertAlmostEqual(np.linalg.norm(Z[:,i+1]), 1, places=14)
# Check that the vectors are orthogonal
for i in range(0, num_sample):
for j in range(i+1, num_sample):
self.assertAlmostEqual(np.dot(Z[:,i], Z[:,j]), 0, places=14)
def test_modified_GramSchmidt_RankDeficient(self):
# Generate a random set of vectors, make one of them a linear combination
# of the others, and use modGramSchmidt to orthogonalize them
systemsize = 10
# Initialize ArnoldiSampling object
alpha = 1.0
num_sample = 4
arnoldi = ArnoldiSampling(alpha, num_sample)
# Create arrays for modified_GramSchmidt
Z = np.random.rand(systemsize, num_sample)
Z[:,num_sample-1] = Z[:,0:num_sample-1].dot(np.random.rand(num_sample-1))
H = np.zeros([num_sample, num_sample-1])
for i in range(-1, num_sample-2):
arnoldi.modified_GramSchmidt(i, H, Z)
# Check that the vectors are unit normal
self.assertAlmostEqual(np.linalg.norm(Z[:,i+1]), 1, places=14)
# calling now should produce lin_depend flag
lin_depend = arnoldi.modified_GramSchmidt(num_sample-2, H, Z)
self.assertTrue(lin_depend)
def test_arnoldiSample_complete(self):
# Compute all of the eigenmodes of an isoprobabilistic Hadamard
# quadratic system using Arnoldi sampling and verify against the exact
# computation
systemsize = 16
eigen_decayrate = 2.0
# Create Hadmard Quadratic object
QoI = examples.HadamardQuadratic(systemsize, eigen_decayrate)
# Initialize chaospy distribution
std_dev = np.random.rand(QoI.systemsize)
sqrt_Sigma = np.diag(std_dev)
mu = np.random.rand(QoI.systemsize)
jdist = cp.MvNormal(mu, sqrt_Sigma)
# Estimate the eigenmodes of the Hessenberg matrix using Arnoldi
perturbation_size = 1.e-6
num_sample = QoI.systemsize+1
arnoldi = ArnoldiSampling(perturbation_size, num_sample)
eigenvals = np.zeros(num_sample-1)
eigenvecs =
|
np.zeros([QoI.systemsize, num_sample-1])
|
numpy.zeros
|
""" SEG-Y geometry. """
import os
from itertools import product
import numpy as np
import pandas as pd
import h5pickle as h5py
import segyio
import cv2
from .base import SeismicGeometry
from ..utils import find_min_max, lru_cache, SafeIO
from ...batchflow import Notifier
class SeismicGeometrySEGY(SeismicGeometry):
""" Class to infer information about SEG-Y cubes and provide convenient methods of working with them.
A wrapper around `segyio` to provide higher-level API.
In order to initialize instance, one must supply `path`, `headers` and `index`:
- `path` is a location of SEG-Y file
- `headers` is a sequence of trace headers to infer from the file
- `index_headers` is a subset of `headers` that is used as trace (unique) identifier:
for example, `INLINE_3D` and `CROSSLINE_3D` has a one-to-one correspondance with trace numbers.
Another example is `FieldRecord` and `TraceNumber`.
Default values of `headers` and `index_headers` are ones for post-stack seismic
(with correctly filled `INLINE_3D` and `CROSSLINE_3D` headers),
so that post-stack cube can be loaded by providing path only.
Each instance is basically built around `dataframe` attribute, which describes mapping from
indexing headers to trace numbers. It is used to, for example, get all trace indices from a desired `FieldRecord`.
`set_index` method can be called to change indexing headers of the dataframe.
One can add stats to the instance by calling `collect_stats` method, that makes a full pass through
the cube in order to analyze distribution of amplitudes. It also collects a number of trace examples
into `trace_container` attribute, that can be used for later evaluation of various statistics.
"""
#pylint: disable=attribute-defined-outside-init, too-many-instance-attributes, redefined-builtin
def __init__(self, path, headers=None, index_headers=None, **kwargs):
self.structured = False
self.quantized = False
self.dataframe = None
self.segyfile = None
self.headers = headers or self.HEADERS_POST_FULL
self.index_headers = index_headers or self.INDEX_POST
super().__init__(path, **kwargs)
def set_index(self, index_headers, sortby=None):
""" Change current index to a subset of loaded headers. """
self.dataframe.reset_index(inplace=True)
if sortby:
self.dataframe.sort_values(index_headers, inplace=True, kind='mergesort')# the only stable sorting algorithm
self.dataframe.set_index(index_headers, inplace=True)
self.index_headers = index_headers
self.add_attributes()
# Methods of inferring dataframe and amplitude stats
def process(self, collect_stats=True, recollect=False, **kwargs):
""" Create dataframe based on `segy` file headers. """
# Note that all the `segyio` structure inference is disabled
self.segyfile = SafeIO(self.path, opener=segyio.open, mode='r', strict=False, ignore_geometry=True)
self.segyfile.mmap()
self.depth = len(self.segyfile.trace[0])
self.delay = self.segyfile.header[0].get(segyio.TraceField.DelayRecordingTime)
self.sample_rate = segyio.dt(self.segyfile) / 1000
# Load all the headers
dataframe = {}
for column in self.headers:
dataframe[column] = self.segyfile.attributes(getattr(segyio.TraceField, column))[slice(None)]
dataframe = pd.DataFrame(dataframe)
dataframe.reset_index(inplace=True)
dataframe.rename(columns={'index': 'trace_index'}, inplace=True)
self.dataframe = dataframe.set_index(self.index_headers)
self.add_attributes()
# Collect stats, if needed and not collected previously
if os.path.exists(self.path_meta) and not recollect:
self.load_meta()
self.has_stats = True
elif collect_stats:
self.collect_stats(**kwargs)
# Create a matrix with ones at fully-zero traces
if self.index_headers == self.INDEX_POST and not hasattr(self, 'zero_traces'):
try:
size = self.depth // 10
slc = np.stack([self[:, :, i * size] for i in range(1, 10)], axis=0)
self.zero_traces = np.zeros(self.lens, dtype=np.int32)
self.zero_traces[np.std(slc, axis=0) == 0] = 1
except (ValueError, AttributeError): # can't reshape
pass
# Store additional segy info
self.segy_path = self.path
self.segy_text = [self.segyfile.text[i] for i in range(1 + self.segyfile.ext_headers)]
# Computed from CDP_X/CDP_Y information
try:
self.rotation_matrix = self.compute_rotation_matrix()
self.area = self.compute_area()
except (ValueError, KeyError): # single line SEG-Y
self.rotation_matrix = None
self.area = -1.
def add_attributes(self):
""" Infer info about curent index from `dataframe` attribute. """
self.index_len = len(self.index_headers)
self._zero_trace = np.zeros(self.depth)
# Unique values in each of the indexing column
self.unsorted_uniques = [np.unique(self.dataframe.index.get_level_values(i).values)
for i in range(self.index_len)]
self.uniques = [np.sort(item) for item in self.unsorted_uniques]
self.uniques_inversed = [{v: j for j, v in enumerate(self.uniques[i])}
for i in range(self.index_len)]
self.byte_no = [getattr(segyio.TraceField, h) for h in self.index_headers]
self.offsets = [np.min(item) for item in self.uniques]
self.lens = [len(item) for item in self.uniques]
self.ranges = [(np.min(item), np.max(item)) for item in self.uniques]
self.cube_shape = np.asarray([*self.lens, self.depth])
def _get_store_key(self, traceseqno):
""" get trace lateral coordinates from header """
header = self.segyfile.header[traceseqno]
# i -> id in a dataframe
keys = [header.get(field) for field in self.byte_no]
store_key = tuple(self.uniques_inversed[j][item] for j, item in enumerate(keys))
return store_key
def collect_stats(self, spatial=True, bins=25, num_keep=10000, pbar=True, **kwargs):
""" Pass through file data to collect stats:
- min/max values.
- a number of quantiles of values in the cube.
- certain amount of traces are stored in a `trace_container` attribute.
If `spatial` is True, makes an additional pass through the cube to obtain following:
- min/max/mean/std for every trace - `min_matrix`, `max_matrix` and so on.
- histogram of values for each trace: - `hist_matrix`.
- bins for histogram creation: - `bins`.
Parameters
----------
spatial : bool
Whether to collect additional stats.
bins : int or str
Number of bins or name of automatic algorithm of defining number of bins.
num_keep : int
Number of traces to store.
"""
#pylint: disable=not-an-iterable
_ = kwargs
num_traces = len(self.segyfile.header)
num_keep = min(num_keep, num_traces // 10) or 1
frequency = num_traces // num_keep
# Get min/max values, store some of the traces
trace_container = []
value_min, value_max = np.inf, -np.inf
min_matrix, max_matrix = np.full(self.lens, np.nan), np.full(self.lens, np.nan)
for i in Notifier(pbar, desc='Finding min/max')(range(num_traces)):
trace = self.segyfile.trace[i]
store_key = self._get_store_key(i)
trace_min, trace_max = find_min_max(trace)
min_matrix[store_key] = trace_min
max_matrix[store_key] = trace_max
if i % frequency == 0 and trace_min != trace_max:
trace_container.extend(trace.tolist())
#TODO: add dtype for storing
# Store everything into instance
self.min_matrix, self.max_matrix = min_matrix, max_matrix
self.zero_traces = (min_matrix == max_matrix).astype(np.int)
self.zero_traces[np.isnan(min_matrix)] = 1
value_min = np.nanmin(min_matrix)
value_max = np.nanmax(max_matrix)
# Collect more spatial stats: min, max, mean, std, histograms matrices
if spatial:
# Make bins
bins = np.histogram_bin_edges(None, bins, range=(value_min, value_max)).astype(np.float)
self.bins = bins
# Create containers
hist_matrix = np.full((*self.lens, len(bins)-1), np.nan)
# Iterate over traces
for i in Notifier(pbar, desc=f'Collecting stats for {self.displayed_name}')(range(num_traces)):
trace = self.segyfile.trace[i]
store_key = self._get_store_key(i)
# For each trace, we store an entire histogram of amplitudes
val_min, val_max = find_min_max(trace)
if val_min != val_max:
histogram = np.histogram(trace, bins=bins)[0]
hist_matrix[store_key] = histogram
# Restore stats from histogram
midpoints = (bins[1:] + bins[:-1]) / 2
probs = hist_matrix / np.sum(hist_matrix, axis=-1, keepdims=True)
mean_matrix = np.sum(probs * midpoints, axis=-1)
std_matrix = np.sqrt(np.sum((np.broadcast_to(midpoints, (*mean_matrix.shape, len(midpoints))) - \
mean_matrix.reshape(*mean_matrix.shape, 1))**2 * probs,
axis=-1))
# Store everything into instance
self.mean_matrix, self.std_matrix = mean_matrix, std_matrix
self.hist_matrix = hist_matrix
self.trace_container = np.array(trace_container)
self.v_uniques = len(np.unique(trace_container))
self.v_min, self.v_max = value_min, value_max
self.v_mean, self.v_std = np.mean(trace_container), np.std(trace_container)
self.v_q001, self.v_q01, self.v_q05 = np.quantile(trace_container, [0.001, 0.01, 0.05])
self.v_q999, self.v_q99, self.v_q95 = np.quantile(trace_container, [0.999, 0.99, 0.95])
self.has_stats = True
self.store_meta()
# Compute stats from CDP/LINES correspondence
def compute_rotation_matrix(self):
""" Compute transform from INLINE/CROSSLINE coordinates to CDP system. """
ix_points = []
cdp_points = []
for _ in range(3):
idx = np.random.randint(len(self.dataframe))
trace = self.segyfile.header[idx]
# INLINE_3D -> CDP_X, CROSSLINE_3D -> CDP_Y
ix = (trace[segyio.TraceField.INLINE_3D], trace[segyio.TraceField.CROSSLINE_3D])
cdp = (trace[segyio.TraceField.CDP_X], trace[segyio.TraceField.CDP_Y])
ix_points.append(ix)
cdp_points.append(cdp)
rotation_matrix = cv2.getAffineTransform(np.float32(ix_points), np.float32(cdp_points))
return rotation_matrix
def compute_area(self, correct=True, shift=50):
""" Compute approximate area of the cube in square kilometres.
Parameters
----------
correct : bool
Whether to correct computed area for zero traces.
"""
i = self.ilines[self.ilines_len // 2]
x = self.xlines[self.xlines_len // 2]
# Central trace coordinates
idx = self.dataframe['trace_index'][(i, x)]
trace = self.segyfile.header[idx]
cdp_x, cdp_y = (trace[segyio.TraceField.CDP_X], trace[segyio.TraceField.CDP_Y])
# Two shifted traces
idx_dx = self.dataframe['trace_index'][(i, x + shift)]
trace_dx = self.segyfile.header[idx_dx]
cdp_x_delta = abs(trace_dx[segyio.TraceField.CDP_X] - cdp_x)
idx_dy = self.dataframe['trace_index'][(i + shift, x)]
trace_dy = self.segyfile.header[idx_dy]
cdp_y_delta = abs(trace_dy[segyio.TraceField.CDP_Y] - cdp_y)
# Traces if CDP_X/CDP_Y coordinate system is rotated on 90 degrees with respect to ILINES/CROSSLINES
if cdp_x_delta == 0 and cdp_y_delta == 0:
idx_dx = self.dataframe['trace_index'][(i + shift, x)]
trace_dx = self.segyfile.header[idx_dx]
cdp_x_delta = abs(trace_dx[segyio.TraceField.CDP_X] - cdp_x)
idx_dy = self.dataframe['trace_index'][(i, x + shift)]
trace_dy = self.segyfile.header[idx_dy]
cdp_y_delta = abs(trace_dy[segyio.TraceField.CDP_Y] - cdp_y)
cdp_x_delta /= shift
cdp_y_delta /= shift
ilines_km = cdp_y_delta * self.ilines_len / 1000
xlines_km = cdp_x_delta * self.xlines_len / 1000
area = ilines_km * xlines_km
if correct and hasattr(self, 'zero_traces'):
area -= (cdp_x_delta / 1000) * (cdp_y_delta / 1000) * np.sum(self.zero_traces)
return round(area, 2)
# Methods to load actual data from SEG-Y
# 1D
def load_trace(self, index):
""" Load individual trace from segyfile.
If passed `np.nan`, returns trace of zeros.
"""
# TODO: can be improved by creating buffer and writing directly to it
if not np.isnan(index):
return self.segyfile.trace.raw[int(index)]
return self._zero_trace
def load_traces(self, trace_indices):
""" Stack multiple traces together. """
# TODO: can be improved by preallocating memory and passing it as a buffer to `load_trace`
return np.stack([self.load_trace(idx) for idx in trace_indices])
# 2D
@lru_cache(128, attributes='index_headers')
def load_slide(self, loc=None, axis=0, start=None, end=None, step=1, stable=True):
""" Create indices and load actual traces for one slide.
If the current index is 1D, then slide is defined by `start`, `end`, `step`.
If the current index is 2D, then slide is defined by `loc` and `axis`.
Parameters
----------
loc : int
Number of slide to load.
axis : int
Number of axis to load slide along.
start, end, step : ints
Parameters of slice loading for 1D index.
stable : bool
Whether or not to use the same sorting order as in the segyfile.
"""
if axis in [0, 1]:
indices = self.make_slide_indices(loc=loc, start=start, end=end, step=step, axis=axis, stable=stable)
slide = self.load_traces(indices)
elif axis == 2:
slide = self.segyfile.depth_slice[loc]
if slide.shape[0] == np.prod(self.lens):
slide = slide.reshape(self.lens)
else:
buffer = np.zeros_like(self.zero_traces, dtype=np.float32)
buffer[self.zero_traces == 0] = slide
slide = buffer
return slide
def make_slide_indices(self, loc=None, axis=0, start=None, end=None, step=1, stable=True, return_iterator=False):
""" Choose appropriate version of index creation, depending on length of the current index.
Parameters
----------
start, end, step : ints
Parameters of slice loading for 1d index.
stable : bool
Whether or not to use the same sorting order as in the segyfile.
return_iterator : bool
Whether to also return the same iterator that is used to index current `dataframe`.
Can be useful for subsequent loads from the same place in various instances.
"""
if self.index_len == 1:
_ = loc, axis
result = self.make_slide_indices_1d(start=start, end=end, step=step, stable=stable,
return_iterator=return_iterator)
elif self.index_len == 2:
_ = start, end, step
result = self.make_slide_indices_2d(loc=loc, axis=axis, stable=stable,
return_iterator=return_iterator)
elif self.index_len == 3:
raise NotImplementedError('Yet to be done!')
else:
raise ValueError('Index lenght must be less than 4. ')
return result
def make_slide_indices_1d(self, start=None, end=None, step=1, stable=True, return_iterator=False):
""" 1D version of index creation. """
start = start or self.offsets[0]
end = end or self.uniques[0][-1]
if stable:
iterator = self.dataframe.index[(self.dataframe.index >= start) & (self.dataframe.index <= end)]
iterator = iterator.values[::step]
else:
iterator = np.arange(start, end+1, step)
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
if return_iterator:
return indices, iterator
return indices
def make_slide_indices_2d(self, loc, axis=0, stable=True, return_iterator=False):
""" 2D version of index creation. """
other_axis = 1 - axis
location = self.uniques[axis][loc]
if stable:
others = self.dataframe[self.dataframe.index.get_level_values(axis) == location]
others = others.index.get_level_values(other_axis).values
else:
others = self.uniques[other_axis]
iterator = list(zip([location] * len(others), others) if axis == 0 else zip(others, [location] * len(others)))
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
#TODO: keep only uniques, when needed, with `nan` filtering
if stable:
indices = np.unique(indices)
if return_iterator:
return indices, iterator
return indices
# 3D
def _load_crop(self, locations):
""" Load 3D crop from the cube.
Parameters
----------
locations : sequence of slices
List of desired slices to load: along the first index, the second, and depth.
Example
-------
If the current index is `INLINE_3D` and `CROSSLINE_3D`, then to load
5:110 ilines, 100:1105 crosslines, 0:700 depths, locations must be::
[slice(5, 110), slice(100, 1105), slice(0, 700)]
"""
shape = np.array([((slc.stop or stop) - (slc.start or 0)) for slc, stop in zip(locations, self.cube_shape)])
indices = self.make_crop_indices(locations)
crop = self.load_traces(indices)[..., locations[-1]].reshape(shape)
return crop
def make_crop_indices(self, locations):
""" Create indices for 3D crop loading. """
iterator = list(product(*[[self.uniques[idx][i] for i in range(locations[idx].start, locations[idx].stop)]
for idx in range(2)]))
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
_, unique_ind =
|
np.unique(indices, return_index=True)
|
numpy.unique
|
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Greedy_Tracker(object):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
self.network_type = cfg_tracker.network_type
self.cls_thr = cfg_tracker.nn_gating_thr
self.det_ratio_thr = cfg_tracker.det_ratio
self.N_miss_max = cfg_tracker.N_miss_max
self.img_height = cfg_tracker.IMAGE_HEIGHT
self.img_width = cfg_tracker.IMAGE_WIDTH
self.all_tracks = defaultdict(lambda: defaultdict(defaultdict))
self.track_num = 0
self.model_info = {}
self.model_info['app_hidden_dim'] = cfg_train.APP_HIDDEN_DIM
self.model_info['mot_hidden_dim'] = cfg_train.MOT_HIDDEN_DIM
self.model_info['mot_input_dim'] = cfg_train.MOT_INPUT_DIM
self.result = []
self.cfg_train = cfg_train
self.cfg_tracker = cfg_tracker
self.sess = session
self.tf_ops = tf_ops
self.tf_plh = tf_placeholders
self.neg_mem_indices = self.precompute_neg_mem_indices()
def precompute_neg_mem_indices(self):
# get indices for online negative examples (i.e. other tracks in the scene) for each track
# NOTE: need to be set again when the code is used for tracking more objects
max_track_num = 200
max_det_num = 200
neg_mem_ind = np.zeros((max_track_num, max_det_num, max_track_num-1, 2))
for i in range(100):
for j in range(100):
xy_ind_tmp = np.zeros((max_track_num - 1, 2))
x_ind_tmp = np.arange(max_track_num, dtype=np.int32)
xy_ind_tmp[:, 0] = x_ind_tmp[x_ind_tmp != i]
xy_ind_tmp[:, 1] = j
neg_mem_ind[i, j, :, :] = xy_ind_tmp
return neg_mem_ind
def build_neg_mem_indices(self, track_num, det_num):
if track_num > 1:
neg_mem_inds = self.neg_mem_indices[:track_num, :det_num, :(track_num-1), :]
elif track_num == 1:
neg_mem_inds = None
else:
raise NotImplementedError
return neg_mem_inds
def get_lstm_states(self, h_np, c_np, cur_detbb_num, is_track_state):
h_np = np.reshape(h_np, (cur_detbb_num, cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, cur_detbb_num, -1))
if is_track_state == True:
h_np = np.transpose(h_np, (1, 0, 2))
c_np = np.transpose(c_np, (1, 0, 2))
# loop can be commented out later to improve processing time
# check lstm states
h_np = np.reshape(h_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(h_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
h_np[:cur_detbb_num, :]))
h_np = h_np[:cur_detbb_num, :]
# check lstm states
c_np = np.reshape(c_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(c_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
c_np[:cur_detbb_num, :]))
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_new(self, h_np, c_np, cur_detbb_num):
h_np = np.reshape(h_np, (cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, -1))
h_np = h_np[:cur_detbb_num, :]
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_for_matched_tracks(self, matching, model_dim, h_np, c_np, trk_num, det_num):
inds_sel1 = []
track_i_sel = []
# select lstm states for matched tracks
if len(matching) > 0:
h_np_tmp = np.zeros((len(matching), model_dim))
c_np_tmp = np.zeros((len(matching), 2 * model_dim))
h_np = np.reshape(h_np, (trk_num, det_num, -1))
c_np = np.reshape(c_np, (trk_num, det_num, -1))
for kkk in range(0, len(matching)):
track_i = int(matching[kkk][0, 0])
detbb_i = int(matching[kkk][0, 1])
h_np_tmp[kkk, :] = h_np[track_i, detbb_i, :]
c_np_tmp[kkk, :] = c_np[track_i, detbb_i, :]
inds_sel1.append(detbb_i)
track_i_sel.append(track_i)
h_np = h_np_tmp
c_np = c_np_tmp
else:
h_np = []
c_np = []
return (h_np, c_np, inds_sel1, track_i_sel)
def precompute_app_features(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
feed_dict = {
self.tf_plh['detbb_num']: cur_detbb_num,
self.tf_plh['images']:imgs,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['valid_app_data']: np.ones((cur_detbb_num, 1, 1), dtype=np.int32),
self.tf_plh['indices_for_mapping']: np.reshape(np.arange(cur_detbb_num * 1, dtype=np.int32), (-1, 1)),
self.tf_plh['image_batch_shape']: np.array([cur_detbb_num * 1, self.cfg_train.APP_LAYER_DIM])
}
app_embed_np = self.sess.run(self.tf_ops['app_embed'], feed_dict=feed_dict)
return app_embed_np
def initialize_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
det_ids,
frame,
hidden_dim,
is_dummy,
network
):
h = np.reshape(h, (-1, hidden_dim))
if network == 'app_blstm':
assert(np.shape(memory)[0] == np.shape(h)[0])
assert(np.shape(memory)[0] == np.shape(c)[0])
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(np.shape(h)[0] == np.shape(c)[0])
if is_dummy == False:
for i in range(0, np.shape(h)[0]):
self.track_num += 1
# 1 x d
self.all_tracks[self.track_num]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[self.track_num]['c_states'] = c[i, :]
self.all_tracks[self.track_num]['real_det_num'] = 1
self.all_tracks[self.track_num]['miss_det_num'] = 0
self.all_tracks[self.track_num]['last_miss_det_num'] = 0
self.all_tracks[self.track_num]['bb'] = bbs[det_ids[i], :]
self.all_tracks[self.track_num]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[self.track_num]['frame'] = frame
self.all_tracks[self.track_num]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[self.track_num]['mem'] = memory[i, :, :]
self.result.append((frame, det_ids[i], 1.0, self.track_num))
elif is_dummy == True:
ct = -1
for i in range(0, np.shape(memory)[0]):
ct -= 1
# 1 x d
self.all_tracks[ct]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[ct]['c_states'] = c[i, :]
self.all_tracks[ct]['real_det_num'] = 1
self.all_tracks[ct]['miss_det_num'] = 0
self.all_tracks[ct]['last_miss_det_num'] = 0
self.all_tracks[ct]['bb'] = bbs[det_ids[i], :]
self.all_tracks[ct]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[ct]['frame'] = frame
self.all_tracks[ct]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[ct]['mem'] = memory[i, :, :]
else:
raise NotImplementedError
def delete_dummy_tracks(self, frame):
for i in self.all_tracks.keys():
if i < 0:
del self.all_tracks[i]
for i in self.all_tracks.keys():
assert(i > 0)
def update_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
track_ids,
matching,
matching_score,
frame,
hidden_dim,
network,
missdet_tracks
):
h = np.reshape(h, (-1, hidden_dim))
if np.shape(c)[0] != 0:
if network == 'app_blstm':
assert((np.shape(memory)[0] == np.shape(h)[0]))
assert((np.shape(memory)[0] == np.shape(c)[0]))
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(len(matching) == len(matching_score))
track_ids_sel1 = []
for i in range(0, len(matching)):
track_i = int(matching[i][0, 0])
detbb_i = int(matching[i][0, 1])
if network == 'app_blstm':
self.all_tracks[track_ids[track_i]]['mem'] = memory[i, :, :]
self.all_tracks[track_ids[track_i]]['h_states'] = h[i, :]
self.all_tracks[track_ids[track_i]]['c_states'] = c[i, :]
self.all_tracks[track_ids[track_i]]['real_det_num'] += 1
self.all_tracks[track_ids[track_i]]['last_miss_det_num'] = 0
self.all_tracks[track_ids[track_i]]['bb'] = bbs[detbb_i, :]
self.all_tracks[track_ids[track_i]]['bb_norm'] = bbs_norm[detbb_i, :]
self.all_tracks[track_ids[track_i]]['frame'] = frame
self.all_tracks[track_ids[track_i]]['th'] = self.all_tracks[track_ids[track_i]]['th'] \
+ [matching_score[i]]
self.result.append((frame, detbb_i, 1.0, track_ids[track_i]))
track_ids_sel1.append(track_ids[track_i])
# update non matched tracks with dummy detections
track_ids_sel2 = np.setdiff1d(track_ids, track_ids_sel1)
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
assert(np.array_equal(track_ids_sel2, missdet_tracks['track_ids']))
for i in range(0, len(track_ids_sel2)):
# skip dummy track
if track_ids_sel2[i] < 0:
continue
self.all_tracks[track_ids_sel2[i]]['miss_det_num'] += 1
self.all_tracks[track_ids_sel2[i]]['last_miss_det_num'] += 1
self.result.append((frame, None, None, track_ids_sel2[i]))
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
self.all_tracks[track_ids_sel2[i]]['h_states'] = missdet_tracks['h_states'][i, :]
self.all_tracks[track_ids_sel2[i]]['c_states'] = missdet_tracks['c_states'][i, :]
assert(track_ids_sel2[i] == missdet_tracks['track_ids'][i])
def compute_iou(self, bb_p, bb_n):
bb_px_min = bb_p[0]
bb_py_min = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_px_max = bb_px_min + bb_pw
bb_py_max = bb_py_min + bb_ph
bb_nx_min = bb_n[0]
bb_ny_min = bb_n[1]
bb_nw = bb_n[2]
bb_nh = bb_n[3]
bb_nx_max = bb_nx_min + bb_nw
bb_ny_max = bb_ny_min + bb_nh
bb_p_area = (bb_px_max - bb_px_min)*(bb_py_max - bb_py_min)
bb_n_area = (bb_nx_max - bb_nx_min)*(bb_ny_max - bb_ny_min)
x1 = np.maximum(bb_px_min, bb_nx_min)
y1 = np.maximum(bb_py_min, bb_ny_min)
x2 = np.minimum(bb_px_max, bb_nx_max)
y2 = np.minimum(bb_py_max, bb_ny_max)
w = np.maximum(0.0, x2 - x1)
h = np.maximum(0.0, y2 - y1)
intersection = np.multiply(w, h)
union = np.add(bb_p_area, bb_n_area) - intersection
IoU = np.divide(intersection, union)
return IoU
def solve_greedy_matching(self, softmax, m_states, track_num, detbb_num, track_ids, bbs, frame):
col1 = np.arange(track_num)
col2 = np.arange(detbb_num)
col1 = np.expand_dims(col1, axis=1)
col2 = np.expand_dims(col2, axis=0)
col1 = np.reshape(np.tile(col1, (1, detbb_num)), (-1, 1))
col2 = np.reshape(np.tile(col2, (track_num, 1)), (-1, 1))
track_detbb_pair_ind = np.concatenate((col1, col2), axis=1)
assert(np.shape(track_detbb_pair_ind)[0] == track_num * detbb_num)
motion_gating_mask = np.ones((track_num, detbb_num, 1))
if self.cfg_tracker.IS_NAIVE_GATING_ON == True:
for i in range(0, track_num):
bb_p = self.all_tracks[track_ids[i]]['bb']
bb_n = bbs
if track_ids[i] < 0:
motion_gating_mask[i, :, 0] = 0
else:
fr_diff = (frame - self.all_tracks[track_ids[i]]['frame'])
motion_gating_mask[i, :, 0] = self.naive_motion_gating(bb_p, bb_n, fr_diff)
motion_gating_mask = np.reshape(motion_gating_mask, (track_num * detbb_num, 1))
# (N1 * N2) x 1
softmax_pos = softmax[:, 1]
softmax_pos = np.reshape(softmax_pos, (-1, 1))
softmax_pos_org = softmax_pos
softmax_pos = np.multiply(softmax_pos, motion_gating_mask)
matching = []
matching_score = []
while True:
max_p = np.amax(softmax_pos, axis=0)
max_i = np.argmax(softmax_pos, axis=0)
assert(softmax_pos[max_i] == max_p)
assert(np.shape(softmax_pos)[0] == np.shape(track_detbb_pair_ind)[0])
if max_p > self.cls_thr:
matching.append(track_detbb_pair_ind[max_i, :])
matching_score.append(softmax_pos_org[max_i])
del_ind1 = track_detbb_pair_ind[:, 1] == track_detbb_pair_ind[max_i, 1]
del_ind2 = track_detbb_pair_ind[:, 0] == track_detbb_pair_ind[max_i, 0]
del_ind = np.where(np.logical_or(del_ind1, del_ind2))[0]
track_detbb_pair_ind_tmp = np.delete(track_detbb_pair_ind, del_ind, axis=0)
softmax_pos = np.delete(softmax_pos, del_ind, axis=0)
softmax_pos_org = np.delete(softmax_pos_org, del_ind, axis=0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 1] == track_detbb_pair_ind[max_i, 1])[0]) == 0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 0] == track_detbb_pair_ind[max_i, 0])[0]) == 0)
track_detbb_pair_ind = track_detbb_pair_ind_tmp
# out of the loop when there is no good match left
else:
break
# out of the loop when all detections are taken
if np.shape(track_detbb_pair_ind)[0] == 0:
break
return (matching, matching_score)
def pick_imgs(self, imgs, imgs_inds):
imgs_sel = np.zeros((len(imgs_inds), self.img_height, self.img_width, 3))
for i in range(0, len(imgs_inds)):
imgs_sel[i, :, :, :] = imgs[imgs_inds[i], :, :, :]
return imgs_sel
def pick_dets(self, dets, dets_inds):
dets_sel = np.zeros((len(dets_inds), self.model_info['mot_input_dim']))
for i in range(0, len(dets_inds)):
dets_sel[i, :] = dets[dets_inds[i], :]
return dets_sel
def get_gating_result(self, x_diff, y_diff, w_diff, h_diff, gating_factor):
# NOTE: These parameters are tuned for the MOT Challenge datasets.
x_diff_th = 3.5
y_diff_th = 2.0
w_diff_th = 1.8
h_diff_th = 1.8
return np.logical_and(np.logical_and(x_diff < x_diff_th, y_diff < y_diff_th),
np.logical_and(w_diff < w_diff_th, h_diff < h_diff_th))
def naive_motion_gating(self, bb_p, bb_n, gating_factor):
bb_px = bb_p[0]
bb_py = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_nx = bb_n[:, 0]
bb_ny = bb_n[:, 1]
bb_nw = bb_n[:, 2]
bb_nh = bb_n[:, 3]
x_diff = np.divide(np.abs(bb_px - bb_nx), bb_pw)
y_diff = np.divide(np.abs(bb_py - bb_ny), bb_ph)
w_diff = np.maximum(np.divide(bb_pw, bb_nw), np.divide(bb_nw, bb_pw))
h_diff = np.maximum(np.divide(bb_ph, bb_nh), np.divide(bb_nh, bb_ph))
return self.get_gating_result(x_diff, y_diff, w_diff, h_diff, gating_factor)
def get_result(self):
return self.result
class Greedy_Tracker_APP_BLSTM(Greedy_Tracker):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
super(Greedy_Tracker_APP_BLSTM, self).__init__(cfg_tracker, cfg_train, tf_ops, tf_placeholders, session)
def run(self, bbs, bbs_norm, imgs, frame_num):
# first frame
if len(self.all_tracks.keys()) == 0 and imgs is not None:
mem_np = self.initialize_track_mems(imgs, bbs)
h_np, c_np, memory_np = mem_np
cur_detbb_num = np.shape(imgs)[0]
self.initialize_tracks(
h_np,
c_np,
memory_np,
bbs,
bbs_norm,
np.array(range(cur_detbb_num)),
frame_num,
self.model_info['app_hidden_dim'],
is_dummy=False,
network='app_blstm'
)
elif len(self.all_tracks.keys()) != 0:
bookkeeping = {}
self.data_association(imgs, bbs, bbs_norm, frame_num, bookkeeping)
self.update_existing_tracks(bbs, bbs_norm, frame_num, bookkeeping)
self.start_new_tracks(imgs, bbs, bbs_norm, frame_num, bookkeeping)
def initialize_track_mems(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
# cnn features (input to lstm)
app_embed_np = self.precompute_app_features(imgs, bbs)
# current lstm states
c_np = np.zeros((cur_detbb_num, 2 * self.model_info['app_hidden_dim']))
track_mems = self.update_lstm_mems(app_embed_np, c_np, cur_detbb_num)
h_np, c_np, memory_np = track_mems
return (h_np, c_np, memory_np)
def update_lstm_mems(self, app_embed_np, c_np, cur_detbb_num):
feed_dict = {
self.tf_plh['track_num']: cur_detbb_num,
self.tf_plh['app_embed_plh']: app_embed_np,
self.tf_plh['istate_app']: c_np,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['frames_by_user']: np.array([1]),
self.tf_plh['track_len']: np.ones(cur_detbb_num, dtype=np.int32),
self.tf_plh['track_len_offset']: np.zeros(cur_detbb_num, dtype=np.int32),
}
tf_ops = [
self.tf_ops['h_app_states'],
self.tf_ops['c_app_states'],
self.tf_ops['h_app_track_memory']
]
# N2 x 1 x d
h_np, c_np, memory_np = self.sess.run(tf_ops, feed_dict=feed_dict)
return (h_np, c_np, memory_np)
def data_association(self, imgs, bbs, bbs_norm, frame_num, bookkeeping):
# 1. compute appearance features for new detections
app_embed_np = []
if imgs is not None:
app_embed_np = self.precompute_app_features(imgs, bbs)
# 2. load exisiting tracks
track_memory_sel, track_bb_sel, track_ids_sel = self.collect_track_memory()
assert(np.shape(track_memory_sel)[0] == len(track_ids_sel))
# 3. solve matching between detections and tracks
matching = []
matching_score = []
inds_sel1 = []
istates_sel = []
if imgs is not None and np.shape(track_memory_sel)[0] > 1:
detbb_num = np.shape(imgs)[0]
assert(detbb_num == np.shape(bbs)[0])
track_num = np.shape(track_memory_sel)[0]
softmax_np, m_states_np = self.compute_softmax_score(
bbs,
track_bb_sel,
imgs,
app_embed_np,
track_memory_sel,
track_ids_sel,
track_num,
detbb_num
)
matching, matching_score = self.solve_greedy_matching(
softmax_np,
m_states_np,
track_num,
detbb_num,
track_ids_sel,
bbs,
frame_num
)
inds_sel1, istates_sel = self.pick_imgs_and_istates(
track_ids_sel,
matching,
self.model_info['app_hidden_dim'],
)
bookkeeping['matching'] = matching
bookkeeping['matching_score'] = matching_score
bookkeeping['inds_sel1'] = inds_sel1
bookkeeping['istates_sel'] = istates_sel
bookkeeping['track_ids_sel'] = track_ids_sel
bookkeeping['app_embed_np'] = app_embed_np
def collect_track_memory(self):
all_ids = sorted(self.all_tracks.keys())
sel_ids = []
for i in range(0, len(all_ids)):
total_num = self.all_tracks[all_ids[i]]['real_det_num'] + \
self.all_tracks[all_ids[i]]['miss_det_num']
if self.all_tracks[all_ids[i]]['real_det_num'] > 0:
if (((float(self.all_tracks[all_ids[i]]['real_det_num']) / total_num) >= self.det_ratio_thr) and
(self.all_tracks[all_ids[i]]['last_miss_det_num'] <= self.N_miss_max)):
sel_ids.append(all_ids[i])
else:
assert((float(self.all_tracks[all_ids[i]]['real_det_num']) / total_num) < 1.0)
track_mem_sel = np.zeros((len(sel_ids), 1, self.model_info['app_hidden_dim']))
track_bb_sel = np.zeros((len(sel_ids), self.cfg_train.MOT_INPUT_DIM))
for i in range(0, len(sel_ids)):
track_mem_sel[i, :, :] = self.all_tracks[sel_ids[i]]['mem']
track_bb_sel[i, :] = self.all_tracks[sel_ids[i]]['bb'][:self.cfg_train.MOT_INPUT_DIM]
# track_bb_sel[i, :] = self.all_tracks[sel_ids[i]]['bb_norm'][:6]
assert(np.shape(self.all_tracks[sel_ids[i]]['bb'])[0] == (self.cfg_train.MOT_INPUT_DIM + 1))
return (track_mem_sel, track_bb_sel, sel_ids)
def compute_softmax_score(self, det_bbs, trk_bbs, imgs, app_embed, trk_memory_sel, trk_ids_sel, trk_num, det_num):
indices = self.build_neg_mem_indices(trk_num, det_num)
# istate_app serves as dummy variables here.
# It does not effect the results here
feed_dict = {
self.tf_plh['detbb_num']: det_num,
self.tf_plh['track_num']: trk_num,
self.tf_plh['app_embed_plh']: app_embed,
self.tf_plh['istate_app']: np.zeros((trk_num, 2 * self.model_info['app_hidden_dim'])),
self.tf_plh['h_app_track_memory_plh']: trk_memory_sel,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['frames_by_user']: np.array([0]),
self.tf_plh['indices_by_user']: indices,
self.tf_plh['track_len']: np.ones(np.shape(app_embed)[0], dtype=np.int32),
self.tf_plh['track_len_offset']: np.zeros(np.shape(app_embed)[0], dtype=np.int32),
self.tf_plh['valid_app_data']: np.ones((det_num, 1, 1), dtype=np.int32),
self.tf_plh['indices_for_mapping']: np.reshape(np.arange(det_num * 1, dtype=np.int32), (-1, 1)),
self.tf_plh['image_batch_shape']: np.array([det_num * 1, self.cfg_train.APP_LAYER_DIM]),
self.tf_plh['det_bbox_org']: det_bbs[:, :self.cfg_train.MOT_INPUT_DIM],
self.tf_plh['trk_bbox_org']: trk_bbs,
self.tf_plh['app_frame_num']: np.zeros((trk_num, 1)),
self.tf_plh['sel_indices']: np.ones((trk_num, det_num, 1))
}
tf_ops = [
self.tf_ops['softmax_out'],
self.tf_ops['m_states']
]
# (N2 * N1) x 2, (N2 * N1) x (2 * (d / feat_dim)) matrix
softmax_np, m_states_np = self.sess.run(tf_ops, feed_dict=feed_dict)
return softmax_np, m_states_np
def pick_imgs_and_istates(self, track_ids, matching, hidden_dim):
h_states_sel = np.zeros((len(matching), hidden_dim))
c_states_sel = np.zeros((len(matching), 2 * hidden_dim))
imgs_sel_inds = []
for i in range(0, len(matching)):
track_i = int(matching[i][0, 0])
detbb_i = int(matching[i][0, 1])
h_states_sel[i, :] = self.all_tracks[track_ids[track_i]]['h_states']
c_states_sel[i, :] = self.all_tracks[track_ids[track_i]]['c_states']
imgs_sel_inds.append(detbb_i)
return (imgs_sel_inds, c_states_sel)
def update_existing_tracks(self, bbs, bbs_norm, frame_num, bookkeeping):
# update tracks with selected detections
assert(len(bookkeeping['matching']) == len(bookkeeping['inds_sel1']))
h_np = []
c_np = []
memory_np = []
if len(bookkeeping['inds_sel1']) > 0:
track_num = np.shape(bookkeeping['istates_sel'])[0]
app_embed_sel_np = bookkeeping['app_embed_np'][bookkeeping['inds_sel1'], :]
track_mems = self.update_lstm_mems(
app_embed_sel_np,
bookkeeping['istates_sel'],
track_num
)
h_np, c_np, memory_np = track_mems
self.update_tracks(
h_np,
c_np,
memory_np,
bbs,
bbs_norm,
bookkeeping['track_ids_sel'],
bookkeeping['matching'],
bookkeeping['matching_score'],
frame_num,
self.model_info['app_hidden_dim'],
network='app_blstm',
missdet_tracks=None
)
self.delete_dummy_tracks(frame_num)
def start_new_tracks(self, imgs, bbs, bbs_norm, frame_num, bookkeeping):
# start new tracks from detections which are not selected
if imgs is not None:
all_inds = range(np.shape(imgs)[0])
inds_sel2 = np.setdiff1d(all_inds, bookkeeping['inds_sel1'])
if len(inds_sel2) > 0:
cur_detbb_num = len(inds_sel2)
app_embed_sel2_np = bookkeeping['app_embed_np'][inds_sel2, :]
assert(cur_detbb_num == len(inds_sel2))
c_np = np.zeros((cur_detbb_num, 2 * self.model_info['app_hidden_dim']))
track_mems = self.update_lstm_mems(app_embed_sel2_np, c_np, cur_detbb_num)
h_np, c_np, memory_np = track_mems
self.initialize_tracks(
h_np,
c_np,
memory_np,
bbs,
bbs_norm,
inds_sel2,
frame_num,
self.model_info['app_hidden_dim'],
is_dummy=False,
network='app_blstm'
)
class Greedy_Tracker_MOT_LSTM(Greedy_Tracker):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
super(Greedy_Tracker_MOT_LSTM, self).__init__(cfg_tracker, cfg_train, tf_ops, tf_placeholders, session)
def run(self, bbs, bbs_norm, imgs, frame_num):
# first frame
if len(self.all_tracks.keys()) == 0 and imgs is not None:
h_np, c_np = self.initialize_track_mems(imgs, bbs, bbs_norm)
cur_detbb_num = np.shape(imgs)[0]
self.initialize_tracks(
h_np,
c_np,
"",
bbs,
bbs_norm,
np.array(range(cur_detbb_num)),
frame_num,
self.model_info['mot_hidden_dim'],
is_dummy=False,
network='mot_lstm'
)
elif len(self.all_tracks.keys()) != 0:
bookkeeping = {}
self.data_association(imgs, bbs, bbs_norm, frame_num, bookkeeping)
self.update_existing_tracks(bbs, bbs_norm, frame_num, bookkeeping)
self.start_new_tracks(imgs, bbs, bbs_norm, frame_num, bookkeeping)
def initialize_track_mems(self, imgs, bbs, bbs_norm):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
mask_np = np.ones((cur_detbb_num, 1, 1))
state_np = np.zeros((cur_detbb_num, 2 * self.model_info['mot_hidden_dim']))
# det_num == trk_num, that's why I use cur_detbb_num repeatedly
h_np, c_np = self.update_lstm_states(bbs_norm, cur_detbb_num, cur_detbb_num, mask_np, state_np)
assert(np.shape(h_np)[0] == cur_detbb_num)
assert(np.shape(c_np)[0] == cur_detbb_num)
h_np, c_np = self.get_lstm_states_new(h_np, c_np, cur_detbb_num)
return (h_np, c_np)
def update_lstm_states(self, det_bbs_np, det_num, trk_num, valid_mot_data_np, motlstm_state_np):
feed_dict = {
self.tf_plh['detection_bboxes']: det_bbs_np,
self.tf_plh['valid_mot_data']: valid_mot_data_np,
self.tf_plh['start_offset']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['end_offset']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['istate_mot']: motlstm_state_np,
self.tf_plh['track_len']: np.ones(det_num, dtype=np.int32),
self.tf_plh['c_mot_states_plh']: motlstm_state_np,
self.tf_plh['mid_missdet_num']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['first_missdet_num']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['detbb_num']: det_num,
self.tf_plh['track_num']: trk_num
}
tf_ops = [
self.tf_ops['h_mot_states_test'],
self.tf_ops['c_mot_states_last_test']
]
h_np, c_np = self.sess.run(tf_ops, feed_dict=feed_dict)
return (h_np, c_np)
def data_association(self, imgs, bbs, bbs_norm, frame_num, bookkeeping):
# cur_detbb_num = np.shape(imgs)[0]
track_memory_sel, track_bb_sel, track_ids_sel = self.collect_track_memory()
assert(np.shape(track_memory_sel)[0] == len(track_ids_sel))
matching = []
matching_score = []
inds_sel1 = []
track_i_sel = []
h_np = []
c_np = []
if imgs is not None and np.shape(track_memory_sel)[0] > 1:
detbb_num = np.shape(imgs)[0]
assert(detbb_num == np.shape(bbs)[0])
track_num = np.shape(track_memory_sel)[0]
softmax_np, h_np, c_np = self.compute_softmax_score(
bbs_norm,
bbs,
track_bb_sel,
detbb_num,
track_num,
track_memory_sel
)
matching, matching_score = self.solve_greedy_matching(
softmax_np,
track_memory_sel,
track_num,
detbb_num,
track_ids_sel,
bbs,
frame_num
)
h_np, c_np, inds_sel1, track_i_sel = self.get_lstm_states_for_matched_tracks(
matching,
self.model_info['mot_hidden_dim'],
h_np,
c_np,
track_num,
detbb_num
)
bookkeeping['matching'] = matching
bookkeeping['matching_score'] = matching_score
bookkeeping['track_memory_sel'] = track_memory_sel
bookkeeping['track_ids_sel'] = track_ids_sel
bookkeeping['inds_sel1'] = inds_sel1
bookkeeping['track_i_sel'] = track_i_sel
bookkeeping['h_np'] = h_np
bookkeeping['c_np'] = c_np
def collect_track_memory(self):
all_ids = sorted(self.all_tracks.keys())
sel_ids = []
for i in range(0, len(all_ids)):
total_num = self.all_tracks[all_ids[i]]['real_det_num'] + \
self.all_tracks[all_ids[i]]['miss_det_num']
if self.all_tracks[all_ids[i]]['real_det_num'] > 0:
if (((float(self.all_tracks[all_ids[i]]['real_det_num']) / total_num) >= self.det_ratio_thr) and
(self.all_tracks[all_ids[i]]['last_miss_det_num'] <= self.N_miss_max)):
sel_ids.append(all_ids[i])
else:
assert((float(self.all_tracks[all_ids[i]]['real_det_num']) / total_num) < 1.0)
track_mem_sel = np.zeros((len(sel_ids), 2 * self.model_info['mot_hidden_dim']))
track_bb_sel = np.zeros((len(sel_ids), self.cfg_train.MOT_INPUT_DIM))
for i in range(0, len(sel_ids)):
track_mem_sel[i, :] = self.all_tracks[sel_ids[i]]['c_states']
track_bb_sel[i, :] = self.all_tracks[sel_ids[i]]['bb'][:self.cfg_train.MOT_INPUT_DIM]
assert(np.shape(self.all_tracks[sel_ids[i]]['bb'])[0] == (self.cfg_train.MOT_INPUT_DIM + 1))
return (track_mem_sel, track_bb_sel, sel_ids)
def compute_softmax_score(self, bbs_norm, bbs, track_bbs, detbb_num, track_num, track_memory_sel):
feed_dict = {
self.tf_plh['detection_bboxes']: bbs_norm,
self.tf_plh['valid_mot_data']: np.ones((detbb_num, 1, 1)),
self.tf_plh['start_offset']: np.zeros(detbb_num, dtype=np.int32),
self.tf_plh['end_offset']: np.zeros(detbb_num, dtype=np.int32),
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['istate_mot']: track_memory_sel,
self.tf_plh['track_len']: np.ones(track_num, dtype=np.int32),
self.tf_plh['c_mot_states_plh']: track_memory_sel,
self.tf_plh['mid_missdet_num']: np.zeros(detbb_num, dtype=np.int32),
self.tf_plh['first_missdet_num']: np.zeros(detbb_num, dtype=np.int32),
self.tf_plh['det_bbox_org']: bbs[:, :self.cfg_train.MOT_INPUT_DIM],
self.tf_plh['trk_bbox_org']: track_bbs,
self.tf_plh['detbb_num']: detbb_num,
self.tf_plh['track_num']: track_num,
self.tf_plh['sel_indices']: np.ones((track_num, detbb_num, 1))
}
tf_ops = [
self.tf_ops['softmax_out'],
self.tf_ops['h_mot_states'],
self.tf_ops['c_mot_states_last']
]
# (N2 * N1) x 2
softmax_np, h_np, c_np = self.sess.run(tf_ops, feed_dict=feed_dict)
return (softmax_np, h_np, c_np)
def update_existing_tracks(self, bbs, bbs_norm, frame_num, bookkeeping):
missing_det_tracks = {}
track_i_all = range(len(bookkeeping['track_ids_sel']))
track_i_left = np.setdiff1d(track_i_all, bookkeeping['track_i_sel'])
if len(track_i_left) > 0:
track_memory_left = np.zeros((len(track_i_left), 2 * self.model_info['mot_hidden_dim']))
track_ids_left = np.zeros(len(track_i_left))
for kkk in range(0, len(track_i_left)):
track_i_cur = track_i_left[kkk]
track_memory_left[kkk, :] = bookkeeping['track_memory_sel'][track_i_cur, :]
track_ids_left[kkk] = bookkeeping['track_ids_sel'][track_i_cur]
det_num = len(track_i_left)
det_bbs_np = np.zeros((det_num, self.cfg_train.MOT_INPUT_DIM))
valid_mot_data_np = np.zeros((det_num, 1, 1)) # use zero mask to make zero input
motlstm_state_np = track_memory_left
# det_num == trk_num, that's why det_num is used repeatedly
h_np_left, c_np_left = self.update_lstm_states(
det_bbs_np,
det_num,
det_num,
valid_mot_data_np,
motlstm_state_np
)
missing_det_tracks['track_ids'] = track_ids_left
missing_det_tracks['track_index'] = track_i_left
h_np_left, c_np_left = self.get_lstm_states_new(h_np_left, c_np_left, det_num)
missing_det_tracks['c_states'] = c_np_left
missing_det_tracks['h_states'] = h_np_left
self.update_tracks(
bookkeeping['h_np'],
bookkeeping['c_np'],
None,
bbs,
bbs_norm,
bookkeeping['track_ids_sel'],
bookkeeping['matching'],
bookkeeping['matching_score'],
frame_num,
self.model_info['mot_hidden_dim'],
network='mot_lstm',
missdet_tracks=missing_det_tracks
)
self.delete_dummy_tracks(frame_num)
def start_new_tracks(self, imgs, bbs, bbs_norm, frame_num, bookkeeping):
if imgs is not None:
# start new tracks from detections which are not selected
all_inds = range(np.shape(imgs)[0])
inds_sel2 = np.setdiff1d(all_inds, bookkeeping['inds_sel1'])
if len(inds_sel2) > 0:
bbs_norm_sel2 = self.pick_dets(bbs_norm, inds_sel2)
cur_detbb_num = np.shape(bbs_norm_sel2)[0]
c_np = np.zeros((cur_detbb_num, 2 * self.model_info['mot_hidden_dim']))
assert(cur_detbb_num == len(inds_sel2))
det_bbs_np = bbs_norm_sel2
det_num = cur_detbb_num
valid_mot_data_np = np.ones((cur_detbb_num, 1, 1))
motlstm_state_np = c_np
# det_num == trk_num, that's why det_num is used repeatedly
h_np, c_np = self.update_lstm_states(
det_bbs_np,
det_num,
det_num,
valid_mot_data_np,
motlstm_state_np
)
assert(np.shape(h_np)[0] == cur_detbb_num)
assert(np.shape(c_np)[0] == cur_detbb_num)
h_np, c_np = self.get_lstm_states_new(h_np, c_np, cur_detbb_num)
self.initialize_tracks(
h_np,
c_np,
None,
bbs,
bbs_norm,
inds_sel2,
frame_num,
self.model_info['mot_hidden_dim'],
is_dummy=False,
network='mot_lstm'
)
class Greedy_Tracker_APP_MOT(Greedy_Tracker_APP_BLSTM, Greedy_Tracker_MOT_LSTM):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
super(Greedy_Tracker_APP_MOT, self).__init__(cfg_tracker, cfg_train, tf_ops, tf_placeholders, session)
def run(self, bbs, bbs_norm, imgs, frame_num):
# first frame
if len(self.all_tracks.keys()) == 0 and imgs is not None:
mem_np = self.initialize_track_mems(imgs, bbs, bbs_norm)
h_app_np, c_app_np, memory_app_np, h_mot_np, c_mot_np = mem_np
cur_detbb_num = np.shape(imgs)[0]
self.initialize_tracks(
h_app_np,
c_app_np,
memory_app_np,
h_mot_np,
c_mot_np,
bbs,
bbs_norm,
np.array(range(cur_detbb_num)),
frame_num,
self.model_info['app_hidden_dim'],
self.model_info['mot_hidden_dim'],
is_dummy=False,
network='app_mot_network'
)
elif len(self.all_tracks.keys()) != 0:
bookkeeping = {}
self.data_association(imgs, bbs, bbs_norm, frame_num, bookkeeping)
self.update_existing_tracks(bbs, bbs_norm, frame_num, bookkeeping)
self.start_new_tracks(imgs, bbs, bbs_norm, frame_num, bookkeeping)
def initialize_track_mems(self, imgs, bbs, bbs_norm):
app_embed_np = self.precompute_app_features(imgs, bbs)
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
valid_mot_data = np.ones((cur_detbb_num, 1, 1))
h_app_np, c_app_np, mem_app_np, h_mot_np, c_mot_np = self.update_lstm_states(
app_embed_np,
bbs_norm,
valid_mot_data,
cur_detbb_num
)
assert(np.shape(h_mot_np)[0] == cur_detbb_num)
assert(np.shape(c_mot_np)[0] == cur_detbb_num)
h_mot_np, c_mot_np = self.get_lstm_states_new(h_mot_np, c_mot_np, cur_detbb_num)
mem_np = (h_app_np, c_app_np, mem_app_np, h_mot_np, c_mot_np)
return mem_np
def update_lstm_states(
self,
app_embed,
bbs_norm,
valid_mot_data,
det_num
):
c_app = np.zeros((det_num, 2 * self.model_info['app_hidden_dim']))
c_mot = np.zeros((det_num, 2 * self.model_info['mot_hidden_dim']))
feed_dict = {
self.tf_plh['detbb_num']: det_num,
self.tf_plh['track_num']: det_num, # dummy
self.tf_plh['app_embed_plh']: app_embed,
self.tf_plh['istate_app']: c_app,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['frames_by_user']: np.array([1]),
self.tf_plh['track_len']: np.ones(det_num, dtype=np.int32),
self.tf_plh['track_len_offset']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['detection_bboxes']: bbs_norm,
self.tf_plh['valid_mot_data']: valid_mot_data,
self.tf_plh['start_offset']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['end_offset']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['istate_mot']: c_mot,
self.tf_plh['c_mot_states_plh']: c_mot,
self.tf_plh['mid_missdet_num']: np.zeros(det_num, dtype=np.int32),
self.tf_plh['first_missdet_num']: np.zeros(det_num, dtype=np.int32),
}
tf_ops = [
self.tf_ops['h_app_states'],
self.tf_ops['c_app_states'],
self.tf_ops['h_app_track_memory'],
self.tf_ops['h_mot_states_test'],
self.tf_ops['c_mot_states_last_test']
]
h_app_np, c_app_np, mem_app_np, h_mot_np, c_mot_np = self.sess.run(tf_ops, feed_dict=feed_dict)
return (h_app_np, c_app_np, mem_app_np, h_mot_np, c_mot_np)
def initialize_tracks(
self,
h_app,
c_app,
memory_app,
h_mot,
c_mot,
bbs,
bbs_norm,
det_ids,
frame,
app_hidden_dim,
mot_hidden_dim,
is_dummy,
network
):
h_app = np.reshape(h_app, (-1, app_hidden_dim))
assert(np.shape(memory_app)[0] == np.shape(h_app)[0])
assert(np.shape(memory_app)[0] == np.shape(c_app)[0])
assert(np.array_equal(h_app, c_app[:, app_hidden_dim:]))
assert(np.shape(h_app)[0] == np.shape(c_app)[0])
h_mot = np.reshape(h_mot, (-1, mot_hidden_dim))
assert(np.array_equal(h_mot, c_mot[:, mot_hidden_dim:]))
assert(np.shape(h_mot)[0] == np.shape(c_mot)[0])
assert(np.shape(h_mot)[0] == np.shape(h_app)[0])
assert(np.shape(c_mot)[0] == np.shape(c_app)[0])
if is_dummy == False:
for i in range(0, np.shape(h_app)[0]):
self.track_num += 1
# 1 x 1 x mem dim
self.all_tracks[self.track_num]['mem_app'] = memory_app[i, :, :]
# 1 x app state dim
self.all_tracks[self.track_num]['h_app_states'] = h_app[i, :]
# 1 x app state dim
self.all_tracks[self.track_num]['c_app_states'] = c_app[i, :]
# 1 x mot state dim
self.all_tracks[self.track_num]['h_mot_states'] = h_mot[i, :]
# 1 x mot state dim
self.all_tracks[self.track_num]['c_mot_states'] = c_mot[i, :]
self.all_tracks[self.track_num]['real_det_num'] = 1
self.all_tracks[self.track_num]['miss_det_num'] = 0
self.all_tracks[self.track_num]['last_miss_det_num'] = 0
self.all_tracks[self.track_num]['bb'] = bbs[det_ids[i], :]
self.all_tracks[self.track_num]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[self.track_num]['frame'] = frame
self.all_tracks[self.track_num]['th'] = [self.cls_thr]
# self.all_tracks[self.track_num]['norm'] = np.linalg.norm(memory[i, :, :])
self.result.append((frame, det_ids[i], 1.0, self.track_num))
elif is_dummy == True:
ct = -1
for i in range(0, np.shape(memory_app)[0]):
ct -= 1
# 1 x 1 x d
self.all_tracks[ct]['mem_app'] = memory_app[i, :, :]
# 1 x d
self.all_tracks[ct]['h_app_states'] = h_app[i, :]
# 1 x d
self.all_tracks[ct]['c_app_states'] = c_app[i, :]
# 1 x d
self.all_tracks[ct]['h_mot_states'] = h_mot[i, :]
# 1 x d
self.all_tracks[ct]['c_mot_states'] = c_mot[i, :]
self.all_tracks[ct]['real_det_num'] = 1
self.all_tracks[ct]['miss_det_num'] = 0
self.all_tracks[ct]['last_miss_det_num'] = 0
self.all_tracks[ct]['bb'] = bbs[det_ids[i], :]
self.all_tracks[ct]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[ct]['frame'] = frame
self.all_tracks[ct]['th'] = [self.cls_thr]
else:
raise NotImplementedError
def data_association(self, imgs, bbs, bbs_norm, frame_num, bookkeeping):
app_embed_np = []
if imgs is not None:
app_embed_np = self.precompute_app_features(imgs, bbs)
track_memory_sel, track_bb_sel, track_ids_sel = self.collect_track_memory()
track_app_memory_sel, track_mot_memory_sel = track_memory_sel
assert(np.shape(track_app_memory_sel)[0] == len(track_ids_sel))
assert(np.shape(track_mot_memory_sel)[0] == len(track_ids_sel))
matching = []
matching_score = []
inds_sel1 = []
inds_sel1_tmp = []
istates_sel = []
track_i_sel = []
h_app_np = []
c_app_np = []
memory_app_np = []
h_mot_np = []
c_mot_np = []
if imgs is not None and np.shape(track_app_memory_sel)[0] > 1:
detbb_num =
|
np.shape(imgs)
|
numpy.shape
|
from keras.models import Sequential
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Multiply
from keras.layers import Add
from keras.layers import concatenate
from keras.layers import Lambda
from keras.optimizers import Adam
from keras.callbacks import CSVLogger
import keras.backend as K
from collections import deque
import random
import numpy as np
from agents.SumTree import SumTree
# Double Deep Q-learning Agent with Prioritized Experience Replay
class PERDDQNAgent:
def __init__(self, game, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = Memory(1000000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.training_frequency = 4
self.target_update_frequency = 10000
self.model = self._build_model()
self.target_model = self._build_model()
self.reset_target_model()
self.csv_loss_logger = CSVLogger(game + '_perddqn/' + game + '_perddqn_loss.csv', append=True, separator=',')
def _build_model(self):
model = Sequential()
model.add(Dense(150, input_dim=self.state_size, activation='relu'))
model.add(Dense(120, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
''' Can uncomment this to try out the dueling architecture with PER
state_input = Input(shape=(self.state_size,))
dense1 = Dense(24, activation='relu')(state_input)
hidden1 = Dense(48, activation='relu')(dense1)
q_prediction = Dense(self.action_size)(hidden1)
hidden2 = Dense(48, activation='relu')(dense1)
state_prediction = Dense(1)(hidden2)
# Q = State value + (Action value - Average of all action values)
q_prediction = Lambda(lambda x: x-K.mean(x, axis=-1), output_shape=(self.action_size,))(q_prediction)
state_prediction = Lambda(lambda state_prediction: K.tile(state_prediction, [1, self.action_size]))(state_prediction)
target_q = Add()([state_prediction, q_prediction])
model = Model(inputs=state_input, outputs=target_q)
'''
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
def reset_target_model(self):
self.target_model.set_weights(self.model.get_weights())
def save_model(self, filename):
self.model.save_weights(filename)
def load_model(self, filename):
self.model.load_weights(filename)
def remember(self, state, action, reward, next_state, done):
sample = (state, action, reward, next_state, done)
state, target_f, error = self.getTargets(sample)
self.memory.add(error, sample)
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size) # take random action
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def test_act(self, state):
'''
Don't take random actions when testing agent
'''
act_values = self.model.predict(state)
return
|
np.argmax(act_values[0])
|
numpy.argmax
|
import numpy as np
import unittest
from numpy.testing import assert_array_equal
from .test_base import (
BaseSparrayTest, dense1d, dense2d, sparse2d, dense3d, assert_sparse_equal)
class TestIndexing(BaseSparrayTest):
def test_simple_indexing(self):
for i in [0, 1, len(dense1d) - 1, -1]:
self.assertEqual(dense1d[i], self.sp1d[i])
for i in [0, 1, len(dense2d) - 1, -1]:
for j in [0, 1, dense2d.shape[1] - 1, -1]:
self.assertEqual(dense2d[i,j], self.sp2d[i,j])
# check out of bounds indexes
self.assertRaises(IndexError, lambda: self.sp1d[len(dense1d)])
def test_ellipses(self):
assert_array_equal(dense1d[...], self.sp1d[...].toarray())
assert_array_equal(dense2d[...], self.sp2d[...].toarray())
# two ellipses is an error in recent numpy
self.assertRaises(IndexError, lambda: self.sp1d[...,...])
# three ellipses is too many for any numpy
self.assertRaises(IndexError, lambda: self.sp1d[...,...,...])
def test_partial_indexing(self):
for i in [0, 1, len(dense2d) - 1, -1]:
assert_array_equal(dense2d[i], self.sp2d[i].toarray())
for j in [0, 1, dense2d.shape[1] - 1, -1]:
assert_array_equal(dense2d[:,j], self.sp2d[:,j].toarray())
def test_iter(self):
assert_array_equal(dense1d, list(self.sp1d))
for dense_row, sparse_row in zip(dense2d, self.sp2d):
assert_array_equal(dense_row, sparse_row.toarray())
def test_diagonal(self):
assert_array_equal(dense2d.diagonal(), self.sp2d.diagonal().toarray())
self.assertRaises(ValueError, lambda: self.sp1d.diagonal())
self.assertRaises(ValueError, lambda: self.sp2d.diagonal(0,1,1))
def test_offset_diagonal(self):
for k in [1, -1, 2, -2, 3, -3, 4, -4]:
assert_sparse_equal(dense2d.diagonal(offset=k),
self.sp2d.diagonal(offset=k),
err_msg='Mismatch for k=%d' % k)
def test_slicing(self):
assert_array_equal(dense1d[1:], self.sp1d[1:].toarray())
assert_array_equal(dense2d[1:,1:], self.sp2d[1:,1:].toarray())
def test_mixed_fancy_indexing(self):
idx = [0,2]
assert_array_equal(dense2d[:,idx], self.sp2d[:,idx].toarray())
assert_array_equal(dense2d[idx,:], self.sp2d[idx,:].toarray())
assert_array_equal(dense3d[idx,:,idx], self.sp3d[idx,:,idx].toarray())
assert_array_equal(dense3d[[1],:,idx], self.sp3d[[1],:,idx].toarray())
assert_array_equal(dense3d[:,[1],idx], self.sp3d[:,[1],idx].toarray())
assert_array_equal(dense3d[idx,[1],:], self.sp3d[idx,[1],:].toarray())
assert_array_equal(dense3d[2,:,idx], self.sp3d[2,:,idx].toarray())
assert_array_equal(dense3d[:,1,idx], self.sp3d[:,1,idx].toarray())
assert_array_equal(dense3d[idx,1,:], self.sp3d[idx,1,:].toarray())
def test_inner_indexing(self):
idx = [0,2]
assert_array_equal(dense1d[idx], self.sp1d[idx].toarray())
assert_array_equal(dense2d[idx,idx], self.sp2d[idx,idx].toarray())
@unittest.expectedFailure
def test_outer_indexing(self):
ii = np.array([1,3])[:,None]
jj = np.array([0,2])
assert_array_equal(dense2d[ii,jj], self.sp2d[ii,jj].toarray())
def test_1d_boolean(self):
for idx in ([0,0,0,0,0], [1,0,0,0,0], [0,1,1,0,0], [1,1,1,1,1]):
idx =
|
np.array(idx, dtype=bool)
|
numpy.array
|
"""A module containing general utility methods for performance comparisons, data manipulation and
random feature generation"""
from __future__ import print_function
from builtins import object
import autograd.numpy as np
import time
import pandas as pd
import os
import sys
import math
from collections import defaultdict
import joblib
import data as data
import numpy as np
import random
from tqdm import tqdm
from two_sample_test_utils import RMMD
def performance_comparisons(methods,num_runs,param_name,params,alpha=0.05,mean_scale = 1, output='p_value',
verbose=False, size=300, num_obs = 10, var1=.1, var2=.1, function1 = 'sine',
function2 = 'sine',error1="gaussian", error2="gaussian",meta_mu=7):
'''
This function computes performance comparison runs
methods: name of methods to be tested
num_runs: number of run results averaged over
param: parameter vector to iterate over
alpha: significance level
'''
performance = defaultdict(int)
for param in params:
# define which parameter to iterate over
if param_name == 'num_obs':
num_obs = param
if param_name == 'var1':
var1 = param
if param_name == 'size':
size = param
if param_name == 'mean_scale':
mean_scale = param
if param_name == 'error':
error = param
for n_run in tqdm(range(num_runs)):
np.random.seed(n_run)
# Create a null dataset.
t1, y1 = data.generate_random_functions(size=size, num_obs = num_obs, mean_scale = mean_scale,\
function= function1,variance = var1, error=error1,\
meta_mu=meta_mu)
t2, y2 = data.generate_random_functions(size=size, num_obs = num_obs, mean_scale = 1,\
function= function2, variance = var2, error=error2,\
meta_mu=meta_mu)
if n_run % 100 == 0 and verbose == True:
print('=' * 70)
print('Sample output for parameter:', param)
print('=' * 70)
# Run the tests on both data sets and compute type I and II errors.
for method in methods:
method_name = method.__name__
key2 = 'method: {}; param value: {} '.format(method_name, param)
tic = time.time()
pval = method(t1, y1, t2, y2, output=output)
toc = (time.time() - tic) / 2.
# performance refers to tyoe I error if data generated under the null
# and referes to power if data under the alternative
performance[key2] += int(pval < alpha) / num_runs
if n_run % 100 == 0 and verbose == True:
print('{}: time={:.2}s, p_value={:.4}.'.format( method_name, toc, pval))
return performance
def performance_comparisons_indep(methods,num_runs,param_name,params,alpha=0.05,mean_scale = 1, output='p_value',
verbose=False, size=100, num_obs = 10, var=.1, data_type='ind',transformation=None,
meta_mu=5):
'''
This function computes performance comparison runs
methods: name of methods to be tested
num_runs: number of run results averaged over
param: parameter vector to iterate over
alpha: significance level
'''
performance = defaultdict(int)
for param in params:
# define which parameter to iterate over
if param_name == 'num_obs':
num_obs = param
if param_name == 'size':
size = param
if param_name == 'meta_mu':
meta_mu = param
if param_name == 'var':
var = param
for n_run in tqdm(range(num_runs)):
np.random.seed(n_run)
# Create a dataset under the alternative hypothesis of dependence.
if data_type == 'ind':
t1, y1, _, _ = data.generate_conditional_functions(size=size, num_obs = num_obs,
function='sine',meta_mu=meta_mu,var=var)
t2, y2, _, _ = data.generate_conditional_functions(size=size, num_obs = num_obs,
function='zero', meta_mu=meta_mu,var=var)
if data_type == 'dep':
t1, y1, t2, y2 = data.generate_conditional_functions(size=size, num_obs = num_obs,
function='sine',meta_mu=meta_mu,
transformation=transformation,var=var)
if n_run % 100 == 0 and verbose == True:
print('=' * 70)
print('Sample output for parameter:', param)
print('=' * 70)
# Run the tests on both data sets and compute type I and II errors.
for method in methods:
method_name = method.__name__
key2 = 'method: {}; param value: {} '.format(method_name, param)
tic = time.time()
pval = method(t1, y1, t2, y2, output=output)
toc = (time.time() - tic) / 2.
# performance refers to tyoe I error if data generated under the null
# and referes to power if data under the alternative
performance[key2] += int(pval < alpha) / num_runs
if n_run % 100 == 0 and verbose == True:
print('{}: time={:.2}s, p_value={:.4}.'.format( method_name, toc, pval))
return performance
def time_complexity(methods,sizes,num_runs=10, num_obs = 10):
times = defaultdict(int)
for size in tqdm(sizes):
t1, y1 = data.generate_random_functions(size=size, num_obs = num_obs)
t2, y2 = data.generate_random_functions(size=size, num_obs = num_obs)
for method in methods:
method_name = method.__name__
key = 'method: {}, number of samples {}'.format(method_name, size)
tic = time.time()
[method(t1, y1, t2, y2, output='p_value') for i in range(num_runs)]
toc = (time.time() - tic)
times[key] = toc / num_runs
return times
def perf_num_features(num_runs, num_features, alpha=0.05,mean_scale = 1, output='p_value',
verbose=False, size=300, num_obs = 10, var1=.1, var2=.1, function1 = 'sine',
function2 = 'sine',error1="gaussian", error2="gaussian"):
'''
This function computes performance as a function of the number of random features used to approximate
the mean embedding.
num_runs: number of run results averaged over
'''
performance = defaultdict(int)
for feat in num_features:
for n_run in tqdm(range(num_runs)):
np.random.seed(n_run)
# Create a null dataset.
t1, y1 = data.generate_random_functions(size=size, num_obs = num_obs, mean_scale = mean_scale,\
function= function1,variance = var1, error=error1)
t2, y2 = data.generate_random_functions(size=size, num_obs = num_obs, mean_scale = 1,\
function= function2, variance = var2, error=error2)
if n_run % 100 == 0 and verbose == True:
print('=' * 70)
print('Sample output for parameter:', param)
print('=' * 70)
# Run the tests on both data sets and compute type I and II errors.
method_name = 'RMMD'
key = 'method: {}; number of features: {} '.format(method_name, feat)
tic = time.time()
pval = RMMD(t1, y1, t2, y2, output=output)
toc = (time.time() - tic) / 2.
# performance refers to tyoe I error if data generated under the null
# and referes to power if data under the alternative
performance[key] += int(pval < alpha) / num_runs
if n_run % 100 == 0 and verbose == True:
print('{}: time={:.2}s, p_value={:.4}.'.format( method_name, toc, pval))
return performance
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X ** 2, 1)
sy = np.sum(Y ** 2, 1)
D2 = sx[:, np.newaxis] - 2.0 * np.dot(X, Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(x):
"""return true if x is a real number"""
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except ValueError:
return False
def tr_te_indices(n, tr_proportion, seed=9282):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion * n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=28):
"""
Return a list of indices to choose k out of n without replacement
"""
rand_state = np.random.get_state()
np.random.seed(seed)
ind = np.random.choice(n, k, replace=False)
np.random.set_state(rand_state)
return ind
def rp(k,s,d):
'''
This function samples random frequencies from the fourier transform of the gaussian kernel
(that is from a gaussian distribution) and uniform samples in [0,2pi]
param:
- k = number of random features
- s = median heuristic for standard deviation of gaussian distribution, s**2 is the variance.
In original paper of <NAME>, three embeddings are created with different scales s = [s1,s2,s3]
- d = dimensionality of w
'''
#s_list = [0.5*s,s,2*s]
#w = np.vstack([si*np.random.randn(k,d) for si in s_list])
#b = 2*np.pi*np.random.rand(3*k,1)
# with one parameter s use the following
w = s*
|
np.random.randn(k,d)
|
numpy.random.randn
|
"""
Fast JIT'ed graph methods.
These are outside the CSRGraph class so methods can call them
"""
import gc
import numba
from numba import jit
import numpy as np
import pandas as pd
import os
from scipy import sparse
import csrgraph as cg
def _edgelist_to_graph(src, dst, weights, nnodes, nodenames=None):
"""
Assumptions:
1) edgelist is sorted by source nodes
2) nodes are all ints in [0, num_nodes]
Params:
---------
elist : pd.Dataframe[src, dst, (weight)]
df of edge pairs. Assumed to be sorted.
If w weight column is present, named 'weight'
Return:
----------
csrgraph object
"""
new_src = np.zeros(nnodes + 1)
# Fill indptr array
new_src[1:] = np.cumsum(
|
np.bincount(src, minlength=nnodes)
|
numpy.bincount
|
import sys
from unittest import TestCase, main
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_series_equal
sys.path.append("../")
from valhalla.transform import ColumnSelector, ColumnMerger
from valhalla.transform import WordUnifier, DuplicateRemover, StopWordRemover
from valhalla.transform import WordLower, RegExReplacer
from valhalla.transform import MorphTokenizer, NounTokenizer, PosTokenizer
class ColumnPreprocessingSimpleTest(TestCase):
"""
테스트 메소드 리스트
- ColumnSelector : DONE
- ColumnMerger : DONE
"""
def test_ColumnSelector(self):
df = pd.DataFrame(
data={
"과일": [
'사과', '배', '딸기'], "시장": [
'명동', '상정', '죽도']})
answer = pd.Series(data=['사과', '배', "딸기"])
cs = ColumnSelector("과일")
pred = cs.transform(df)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
def test_ColumnMerger(self):
df = pd.DataFrame(
data={
"과일": [
'사과', '배', '딸기'], "시장": [
'명동', '상정', '죽도']})
answer = pd.Series(data=["사과 명동", "배 상정", "딸기 죽도"])
cs = ColumnMerger(["과일", "시장"])
pred = cs.transform(df)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
class BasicNLPPreprocessingSimpleTest(TestCase):
"""
테스트 메소드 리스트
- WordUnifier : TODO
- DuplicateRemover : DONE
- StopWordRemover : DONE
- RegExReplacer : TODO => Input Argument가 어떤 구성이 깔끔할지 고민이 되는 중
- WordLower : DONE
"""
"""
Word Unifier Test
"""
def test_word_unifier_with_list(self):
sample = ['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"]
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = ['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_word_unifier_with_numpy_input(self):
sample = np.array(
['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"])
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = np.array(['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_word_unifier_with_pandas_input(self):
sample = pd.Series(
['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"])
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = pd.Series(['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
RegExReplacer Test
"""
def test_RegExReplacer_with_list(self):
sample = ["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"]
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = ["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_RegExReplacer_with_numpy_input(self):
sample = np.array(["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"])
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = np.array(["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_RegExReplacer_with_pandas_input(self):
sample = pd.Series(["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"])
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = pd.Series(["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
DuplicateRemover Test
"""
def test_DuplicateRemover_with_list(self):
sample = ['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"]
transformer = DuplicateRemover()
answer = ['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박']
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_DuplicateRemover_with_numpy_input(self):
sample = np.array(
['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"])
transformer = DuplicateRemover()
answer = np.array(['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박'])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_DuplicateRemover_with_pandas_input(self):
sample = pd.Series(
['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"])
transformer = DuplicateRemover()
answer = pd.Series(['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박'])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
StopWordRemover Test
"""
def test_StopWordRemover_with_list(self):
sample = [
"노트북 할인 판매",
"옷 기타 완전 세일",
"비아그라 할인",
"클래식기타 판매 세일",
"판매왕의 판매 전략"]
transformer = StopWordRemover(['판매', '기타'])
answer = ["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_StopWordRemover_with_numpy_input(self):
sample = np.array(["노트북 할인 판매", "옷 기타 완전 세일",
"비아그라 할인", "클래식기타 판매 세일", "판매왕의 판매 전략"])
transformer = StopWordRemover(['판매', '기타'])
answer = np.array(
["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_StopWordRemover_with_pandas_input(self):
sample = pd.Series(["노트북 할인 판매", "옷 기타 완전 세일",
"비아그라 할인", "클래식기타 판매 세일", "판매왕의 판매 전략"])
transformer = StopWordRemover(['판매', '기타'])
answer = pd.Series(
["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
WordLower Test
"""
def test_WordLower_with_list(self):
sample = ["Kang", "KAM", "Kan"]
transformer = WordLower()
answer = ["kang", "kam", "kan"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_WordLower_with_numpy_input(self):
sample = np.array(["Kang", "KAM", "Kan"])
transformer = WordLower()
answer = np.array(["kang", "kam", "kan"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_WordLower_with_pandas_input(self):
sample = pd.Series(["Kang", "KAM", "Kan"])
transformer = WordLower()
answer = pd.Series(["kang", "kam", "kan"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
class TokenizerSimpleTest(TestCase):
"""
MorphTokenizer Test
"""
def test_MorphTokenizer_with_list(self):
sample = ["아버지가방에 들어가신다! 123", "어머니김치는 참맛있다!"]
transformer = MorphTokenizer()
answer = ['아버지 가방 에 들어가신다 ! 123', '어머니 김치 는 참 맛있다 !']
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_MorphTokenizer_with_numpy_input(self):
sample = np.array(["아버지가방에 들어가신다! 123", "어머니김치는 참맛있다!"])
transformer = MorphTokenizer()
answer = np.array(['아버지 가방 에 들어가신다 ! 123', '어머니 김치 는 참 맛있다 !'])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_MorphTokenizer_with_pandas_input(self):
sample = pd.Series(["아버지가방에 들어가신다! 123", "어머니김치는 참맛있다!"])
transformer = MorphTokenizer()
answer = pd.Series(['아버지 가방 에 들어가신다 ! 123', '어머니 김치 는 참 맛있다 !'])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
NounTokenizer Test
"""
def test_NounTokenizer_with_list(self):
sample = ["아버지가방에 들어가신다! 123", "어머니김치는 참맛있다!"]
transformer = NounTokenizer()
answer = ['아버지 가방', '어머니 김치']
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_NounTokenizer_with_numpy_input(self):
sample =
|
np.array(["아버지가방에 들어가신다! 123", "어머니김치는 참맛있다!"])
|
numpy.array
|
'''
@Description: In User Settings Edit
@Author: <NAME>
@Date: 2019-08-23 21:54:44
@LastEditTime: 2019-09-02 20:53:16
@LastEditors: Please set LastEditors
'''
import numpy as np
import scipy.sparse as sp
import torch
from numpy import linalg as la
import time
import logging
from PIL import Image
import numpy as np
'''
@description: 读取off文件,跳过前三行,然后前面是n×6的矩阵 n是点个数(x y z ol1 ol2 ol3),后面是m×4的矩阵 m是面个数(3 index1 index2 index3)
@param {type}
@return: A : n*3的矩阵,每个点3维特征;
'''
#load all data from xx.ply
def load_light_normal_adj(path,normalization):
with open(path, 'r') as f:
lines = f.readlines()
nver = int(lines[3].split(' ')[2])
del lines[0:17]
normal = np.zeros((nver, 3), dtype=float)
light = np.zeros((nver, 3), dtype=float)
adj = np.zeros((nver, nver), dtype=float)
i = 0
for line in lines:
if i < nver:
value = line.strip('\n').split(' ')
#print(value[0])
#print(value)
normal[i][0] = value[3] # value[3:6]
normal[i][1] = value[4]
normal[i][2] = value[5]
light[i][0] = value[6] # value[6:9]
light[i][1] = value[7]
light[i][2] = value[8]
i += 1
else:
value = line.strip('\n').split(' ') # 带自连接的邻接矩阵
index1=int(value[1])
index2=int(value[2])
index3=int(value[3])
adj[index1][index1] = 1
adj[index1][index2] = 1
adj[index1][index3] = 1
adj[index2][index1] = 1
adj[index2][index2] = 1
adj[index2][index3] = 1
adj[index3][index1] = 1
adj[index3][index2] = 1
adj[index3][index3] = 1
for i in range(0,nver):
if adj[i][i] ==0:
adj[i][i] =1
Dsum=np.sum(adj,axis=1)
V = np.diag(Dsum**(-0.5))
adj = V*adj*V
if normalization == True:
light = light/255
return light,normal,adj
#load all data from xx.ply
def load_color_normal_adj(path,normalization):
with open(path, 'r') as f:
lines = f.readlines()
nver = int(lines[3].split(' ')[2])
del lines[0:17]
normal = np.zeros((nver, 3), dtype=float)
color = np.zeros((nver, 1), dtype=float)
adj = np.zeros((nver, nver), dtype=float)
i = 0
for line in lines:
if i < nver:
value = line.strip('\n').split(' ')
#print(value[0])
#print(value)
normal[i][0] = value[3] # value[3:6]
normal[i][1] = value[4]
normal[i][2] = value[5]
color[i][0] = value[6] # value[6:9]
i += 1
else:
value = line.strip('\n').split(' ') # 带自连接的邻接矩阵
index1=int(value[1])
index2=int(value[2])
index3=int(value[3])
adj[index1][index1] = 1
adj[index1][index2] = 1
adj[index1][index3] = 1
adj[index2][index1] = 1
adj[index2][index2] = 1
adj[index2][index3] = 1
adj[index3][index1] = 1
adj[index3][index2] = 1
adj[index3][index3] = 1
for i in range(0,nver):
if adj[i][i] ==0:
adj[i][i] =1
Dsum=np.sum(adj,axis=1)
V = np.diag(Dsum**(-0.5))
adj = V*adj*V
if normalization == True:
color = color/255
return color,normal,adj
def load_color_data(filepath, normalization=True):
with open(path, 'r') as f:
lines = f.readlines()
nver = int(lines[3].split(' ')[2])
del lines[0:17]
color = np.zeros((nver, 1), dtype=float)
i = 0
for line in lines:
if i < nver:
value = line.strip('\n').split(' ')
color[i][0] = value[6] # value[6:9]
i += 1
else:
break
if normalization == True:
color = color/255
return color
def load_adj_data(filepath):
with open(path, 'r') as f:
lines = f.readlines()
nver = int(lines[3].split(' ')[2])
del lines[0:17]
adj = np.zeros((nver, nver), dtype=float)
i = 0
for line in lines:
if i >= nver:
value = line.strip('\n').split(' ') # 带自连接的邻接矩阵
index1=int(value[1])
index2=int(value[2])
index3=int(value[3])
adj[index1][index1] = 1
adj[index1][index2] = 1
adj[index1][index3] = 1
adj[index2][index1] = 1
adj[index2][index2] = 1
adj[index2][index3] = 1
adj[index3][index1] = 1
adj[index3][index2] = 1
adj[index3][index3] = 1
for i in range(0,nver):
if adj[i][i] ==0:
adj[i][i] =1
Dsum=np.sum(adj,axis=1)
V = np.diag(Dsum**(-0.5))
adj = V*adj*V
return torch.Tensor(adj)
def load_nonself_adj(filepath):
with open(path, 'r') as f:
lines = f.readlines()
nver = int(lines[3].split(' ')[2])
del lines[0:17]
adj = np.zeros((nver, nver), dtype=float)
i = 0
for line in lines:
if i >= nver:
value = line.strip('\n').split(' ')
index1=int(value[1])
index2=int(value[2])
index3=int(value[3])
adj[index1][index1] = 1
adj[index1][index2] = 1
adj[index1][index3] = 1
adj[index2][index1] = 1
adj[index2][index2] = 1
adj[index2][index3] = 1
adj[index3][index1] = 1
adj[index3][index2] = 1
adj[index3][index3] = 1
Dsum=np.sum(adj,axis=1)
V =
|
np.diag(Dsum**(-0.5))
|
numpy.diag
|
from argparse import ArgumentParser
import json
import os
import sys
import time
import shutil
import inspect
import textwrap
import subprocess
from jinja2 import Environment
from sympy import im
import numpy as np
import pyne.utils
import pyne.data
from pyne import nucname
from .cram import get_CRAM_from_cache
from .partialfrac import thetas_alphas
from . import __version__
GCC_COMPILER_FLAGS = ['-O0', '-fcx-fortran-rules', '-fcx-limited-range',
'-ftree-sra', '-ftree-ter', '-fexpensive-optimizations']
CLANG_COMPILER_FLAGS = ['-O0', '-ffast-math']
HEADER = """\
/* This file was generated automatically with transmutagen version {{__version__}}. */
/* The command used to generate this file was: python -m transmutagen.gensolve {{' '.join(sys.argv[1:])}}*/
#ifndef {{namespace.upper()}}_SOLVE_C
#define {{namespace.upper()}}_SOLVE_C
{% if py_solve %}
#include <complex.h>
{%- endif %}
typedef struct {{namespace}}_transmute_info_tag {
int n;
int nnz;
int* i;
int* j;
char** nucs;
int* nucids;
double* decay_matrix;
} {{namespace}}_transmute_info_t;
extern {{namespace}}_transmute_info_t {{namespace}}_transmute_info;
int {{namespace}}_transmute_ij(int i, int j);
int {{namespace}}_transmute_nucid_to_i(int nucid);
{% if py_solve %}
{%- for type, typefuncname in types %}
void {{namespace}}_solve_{{typefuncname}}({{type}}* A, {{type}}* b, {{type}}* x);
void {{namespace}}_diag_add_{{typefuncname}}({{type}}* A, {{type}} alpha);
void {{namespace}}_dot_{{typefuncname}}({{type}}* A, {{type}}* x, {{type}}* y);
void {{namespace}}_scalar_times_vector_{{typefuncname}}({{type}}, {{type}}*);
{% endfor %}
{%- endif %}
{%- for degree in degrees %}
void {{namespace}}_expm_multiply{{degree}}(double* A, double* b, double* x {%- if include_lost_bits %}, double* lost_bits {% endif %});
{%- endfor %}
#endif
"""
SRC = """\
/* This file was generated automatically with transmutagen version {{__version__}}. */
/* The command used to generate this file was: python -m transmutagen.gensolve {{' '.join(sys.argv[1:])}}*/
#include <string.h>
#include <complex.h>
{%- if include_lost_bits %}
#include <math.h>
#include <stdlib.h>
{%- endif %}
{% if timing_test %}
#include <time.h>
#include <stdio.h>
{% endif %}
#include "{{headerfilename}}"
const int {{namespace.upper()}}_I[{{NNZ}}] =
{ {%- for i, j in sorted(ij) %}{{i}},{% endfor -%} };
const int {{namespace.upper()}}_J[{{NNZ}}] =
{ {%- for i, j in sorted(ij) %}{{j}},{% endfor -%} };
const char* {{namespace.upper()}}_NUCS[{{N}}] =
{ {%- for nuc in nucs %}"{{nuc}}",{% endfor -%} };
const int {{namespace.upper()}}_NUCIDS[{{N}}] =
{ {%- for nuc in nucs %}{{nucname.id(nuc)}},{% endfor -%} };
const double {{namespace.upper()}}_DECAY_MATRIX[{{NNZ}}] =
{%- if len(decay_matrix) > 0 %}
{ {%- for x in decay_matrix %}{{x.hex()}},{% endfor -%} };
{%- else -%}
{ {%- for i in range(NNZ) %}0,{% endfor -%} };
{% endif %}
{{namespace}}_transmute_info_t {{namespace}}_transmute_info = {
.n = {{N}},
.nnz = {{NNZ}},
.i = (int*) {{namespace.upper()}}_I,
.j = (int*) {{namespace.upper()}}_J,
.nucs = (char**) {{namespace.upper()}}_NUCS,
.nucids = (int*) {{namespace.upper()}}_NUCIDS,
.decay_matrix = (double*) {{namespace.upper()}}_DECAY_MATRIX,
};
int {{namespace}}_transmute_ij(int i, int j) {
int n = (i << 16) + j;
switch (n) {
{%- for i, j in sorted(ij) %}
case {{(i * 2**16) + j}}:
return {{ij[i, j]}};
{%- endfor %}
default:
return -1;
}
}
int {{namespace}}_transmute_nucid_to_i(int nucid) {
switch (nucid) {
{%- for i, nuc in enumerate(nucs) %}
case {{nucname.id(nuc)}}:
return {{i}};
{%- endfor %}
default:
return -1;
}
}
{%- if py_solve %}
{%- for type, typefuncname in types %}
void {{namespace}}_solve_{{typefuncname}}({{type}}* A, {{type}}* b, {{type}}* x) {
/* Decompose first */
{{type}} LU [{{NIJK}}];
memcpy(LU, A, {{NNZ}}*sizeof({{type}}));
memset(LU+{{NNZ}}, 0, {{NIJK-NNZ}}*sizeof({{type}}));
{%- for i in range(N) %}
{%- for j in range(i+1, N) %}
{%- if (j, i) in ijk %}
LU[{{ijk[j, i]}}] /= LU[{{ijk[i, i]}}];
{%- for k in range(i+1, N) %}
{%- if (i, k) in ijk %}
LU[{{ijk[j, k]}}] -= LU[{{ijk[j, i]}}] * LU[{{ijk[i, k]}}];
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{%- endfor %}
/* Perform Solve */
memcpy(x, b, {{N}}*sizeof({{type}}));
{%- for i in range(N) %}{% if more_than_fore[i] %}
x[{{i}}] = x[{{i}}]{% for j in range(i) %}{%if (i, j) in ijk%} - LU[{{ijk[i, j]}}]*x[{{j}}]{%endif%}{% endfor %};
{%- endif %}
{%- endfor %}
/* Backward calc */
{%- for i in range(N-1, -1, -1) %}{%if more_than_back[i]%}
x[{{i}}] = x[{{i}}]{% for j in range(i+1, N) %}{%if (i, j) in ijk%} - LU[{{ijk[i, j]}}]*x[{{j}}]{%endif%}{% endfor %};
{%- endif %}
x[{{i}}] /= LU[{{ijk[i, i]}}];
{%- endfor %}
}
void {{namespace}}_diag_add_{{typefuncname}}({{type}}* A, {{type}} theta) {
/* In-place, performs the addition A + theta I, for a scalar theta. */
{%- for i in range(N) %}
A[{{ij[i, i]}}] += theta;
{%- endfor %}
}
void {{namespace}}_dot_{{typefuncname}}({{type}}* A, {{type}}* x, {{type}}* y) {
/* Performs the caclulation Ax = y and returns y */
{%- for i in range(N) %}
y[{{i}}] ={% for j in range(N) %}{% if (i,j) in ij %} + A[{{ij[i, j]}}]*x[{{j}}]{% endif %}{% endfor %};
{%- endfor %}
}
void {{namespace}}_scalar_times_vector_{{typefuncname}}({{type}} alpha, {{type}}* v) {
/* In-place, performs alpha*v, for a scalar alpha and vector v. */
{%- for i in range(N) %}
v[{{i}}] *= alpha;
{%- endfor %}
}
{%- endfor %}
{%- endif %}
void {{namespace}}_solve_special(double* A, double complex theta, double complex alpha, double* b, double complex* x {%- if include_lost_bits %}, double* lost_bits{% endif %}) {
/* Solves (A + theta*I)x = alpha*b and stores the result in x */
double complex LU [{{NIJK}}];
/* LU = A + theta*I */
{%- for i in range(NNZ) %}
{%- if i in diagonals %}
LU[{{i}}] = theta + A[{{i}}];
{%- else %}
LU[{{i}}] = A[{{i}}];
{%- endif %}
{%- endfor %}
memset(LU+{{NNZ}}, 0, {{NIJK-NNZ}}*sizeof(double complex));
/* Decompose first */
{%- for i in range(N) %}
{%- for j in range(i+1, N) %}
{%- if (j, i) in ijk %}
LU[{{ijk[j, i]}}] /= LU[{{ijk[i, i]}}];
{%- for k in range(i+1, N) %}
{%- if (i, k) in ijk %}
LU[{{ijk[j, k]}}] -= LU[{{ijk[j, i]}}] * LU[{{ijk[i, k]}}];
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{%- endfor %}
/* Multiply x by alpha and perform Solve */
{%- for i in range(N) %}
{%- if include_lost_bits %}
x[{{i}}] = alpha*b[{{i}}];
{%- for j in range(i) %}
{% if (i, j) in ijk %}
x[{{i}}] -= LU[{{ijk[i, j]}}]*x[{{j}}];
if (creal(x[{{i}}]) && creal(LU[{{ijk[i, j]}}]*x[{{j}}]) && creal(x[{{i}}])*creal(LU[{{ijk[i, j]}}]*x[{{j}}]) < 0) {
if (abs(creal(x[{{i}}])) > abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}]))) {
lost_bits[{{i}}] += log2(1 - abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}]))/abs(creal(x[{{i}}])));
} else {
lost_bits[{{i}}] += log2(1 - abs(creal(x[{{i}}]))/abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}])));
}
}
{%- endif %}
{%- endfor %}
{%- else %}
x[{{i}}] = alpha*b[{{i}}]{% for j in range(i) %}{%if (i, j) in ijk%} - LU[{{ijk[i, j]}}]*x[{{j}}]{%endif%}{% endfor %};
{%- endif %}
{%- endfor %}
/* Backward calc */
{%- for i in range(N-1, -1, -1) %}{%if more_than_back[i]%}
{%- if include_lost_bits %}
x[{{i}}] = x[{{i}}];
{%- for j in range(i+1, N) %}
{%- if (i, j) in ijk %}
x[{{i}}] -= LU[{{ijk[i, j]}}]*x[{{j}}];
if (creal(x[{{i}}]) && creal(LU[{{ijk[i, j]}}]*x[{{j}}]) && creal(x[{{i}}])*creal(LU[{{ijk[i, j]}}]*x[{{j}}]) < 0) {
if (abs(creal(x[{{i}}])) > abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}]))) {
lost_bits[{{i}}] += log2(1 - abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}]))/abs(creal(x[{{i}}])));
} else {
lost_bits[{{i}}] += log2(1 - abs(creal(x[{{i}}]))/abs(creal(LU[{{ijk[i, j]}}]*x[{{j}}])));
}
}
{%- endif %}
{%- endfor %}
{%- else %}
x[{{i}}] = x[{{i}}]{% for j in range(i+1, N) %}{%if (i, j) in ijk%} - LU[{{ijk[i, j]}}]*x[{{j}}]{%endif%}{% endfor %};
{%- endif %}
{%- endif %}
x[{{i}}] /= LU[{{ijk[i, i]}}];
{%- endfor %}
}
{% for degree in degrees %}
void {{namespace}}_expm_multiply{{degree}}(double* A, double* b, double* x {%- if include_lost_bits %}, double* lost_bits{% endif %}) {
/* Computes exp(A)*b and stores the result in x */
{%- for i in range(degree//2) %}
double complex x{{i}} [{{N}}];
{%- endfor %}
{%- if include_lost_bits %}
{%- for i in range(N) %}
lost_bits[{{i}}] = 0;
{%- endfor %}
{%- endif %}
{% set thetas, alphas, alpha0 = get_thetas_alphas(degree) -%}
{% for theta, alpha in sorted(zip(thetas, alphas), key=abs0) if im(theta) >= 0 %}
{{namespace}}_solve_special(A, {{ -theta}}, {{2*alpha}}, b, x{{loop.index0}} {%- if include_lost_bits %}, lost_bits {% endif %});
{%- endfor %}
{% for i in range(N) %}
x[{{i}}] = (double)creal({%- for j in range(degree//2) %}+x{{j}}[{{i}}]{%- endfor %}) + {{alpha0}}*b[{{i}}];
{%- endfor %}
}
{% endfor %}
{% if timing_test %}
int main(int argc, const char* argv[]) {
double A[{{NNZ}}];
double b[{{N}}];
double x[{{N}}];
int i;
double sum = 0.0;
clock_t start, diff;
memcpy(A, {{namespace.upper()}}_DECAY_MATRIX, {{NNZ}}*sizeof(double));
for (i=0; i <= {{N}}; i++) {
b[i] = 0.0;
}
/* U235 */
b[{{namespace}}_transmute_nucid_to_i(922350000)] = 1.0;
/* CPU time */
start = clock();
{{namespace}}_expm_multiply14(A, b, x);
diff = clock() - start;
float msec = (float)diff / CLOCKS_PER_SEC;
printf("Took %f seconds\\n", msec);
for (i=0; i <= {{N}}; i++) {
sum += x[i];
}
printf("Sum of resulting vector: %f\\n", sum);
return(0);
}
{% endif %}
"""
def make_solve_special(ij, N):
import math
ijk = make_ijk(ij, N)
NIJK = len(ijk)
more_than_back = [len([j for j in range(i, N) if (i, j) in ijk]) > 1 for i in range(N)]
diagonals = {ij[i, i]: i for i in range(N)}
_pre_decomposed = {}
_pre_decomposed_A = None
def decompose(A, theta):
nonlocal _pre_decomposed_A
NNZ = A.shape[0]
LU = np.zeros(NIJK, dtype=complex)
if np.all(_pre_decomposed_A == A):
if theta in _pre_decomposed:
return _pre_decomposed[theta].copy()
else:
_pre_decomposed_A = A.copy()
# LU = A + theta*I
LU[:NNZ] = A
for i in diagonals:
LU[i] += theta
# Decompose first
for i in range(N):
for j in range(i+1, N):
if (j, i) in ijk:
LU[ijk[j, i]] /= LU[ijk[i, i]]
for k in range(i+1, N):
if (i, k) in ijk:
LU[ijk[j, k]] -= LU[ijk[j, i]] * LU[ijk[i, k]]
_pre_decomposed[theta] = LU.copy()
return LU
def solve_special(A, theta, alpha, b):
"""
Solves (A + theta*I)x = alpha*b for x
"""
if len(A.shape) != 1:
raise TypeError("A should be 1-dimensional")
if len(b) != N:
raise TypeError("b should be length %d" % N)
LU = decompose(A, theta)
# Multiply x by alpha and perform Solve
x = alpha*b
x_lost_bits = np.zeros(x.shape)
for i in range(N):
xvals = [x[i]]
for j in range(i):
if (i, j) in ijk:
rhs = LU[ijk[i, j]]*x[j]
if np.real(x[i]) and np.real(rhs):
l = abs(np.real(x[i]))
r = abs(np.real(rhs))
if l > r:
x_lost_bits[i] += (l - r)*2**(-53)*(1 - r/l)
else:
x_lost_bits[i] += (r - l)*2**(-53)*(1 - l/r)
xvals.append(-rhs)
x[i] = math.fsum(np.real(xvals)) + math.fsum(np.imag(xvals))*1j
# Backward calc
for i in range(N-1, -1, -1):
xvals = [x[i]]
if more_than_back[i]:
for j in range(i+1, N):
if (i, j) in ijk:
rhs = LU[ijk[i, j]]*x[j]
if np.real(x[i]) and np.real(rhs):
l = abs(
|
np.real(x[i])
|
numpy.real
|
# -*- coding: utf-8 -*-
"""
Simple application to provide freq from images of seismic.
Freq code by endolith https://gist.github.com/endolith/255291
"""
from io import BytesIO
import uuid
import base64
from flask import Flask
from flask import make_response
from flask import request, jsonify, render_template
import urllib
import requests
import numpy as np
from PIL import Image
from bruges import get_bruges
import geophysics
from segy import write_segy
import utils
from errors import InvalidUsage
application = Flask(__name__)
@application.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
#
# Seismic frequency and SEGY bot
#
@application.route('/freq')
def freq():
# Params from inputs.
url = request.args.get('url')
b64 = request.args.get('image')
method = request.args.get('method') or 'xing'
avg = request.args.get('avg') or 'mean'
region = request.args.get('region')
ntraces = request.args.get('ntraces') or '10'
trace_spacing = request.args.get('trace_spacing') or 'regular'
bins = request.args.get('bins') or '11'
t_min = request.args.get('tmin') or '0'
t_max = request.args.get('tmax') or '1'
dt_param = request.args.get('dt') or 'auto'
# Booleans.
spectrum = request.args.get('spectrum') or 'false'
segy = request.args.get('segy') or 'false'
nope = {i: False for i in ('none', 'false', 'no', '0')}
spectrum = nope.get(spectrum.lower(), True)
segy = nope.get(segy.lower(), True)
# Condition or generate params.
ntraces = int(ntraces)
bins = int(bins)
t_min = float(t_min)
t_max = float(t_max)
uuid1 = str(uuid.uuid1())
if region:
region = [int(n) for n in region.split(',')]
else:
region = []
# Fetch and crop image.
if url:
try:
r = requests.get(url)
im = Image.open(BytesIO(r.content))
except Exception:
payload = {'job_uuid': uuid1}
payload['parameters'] = utils.build_params(method, avg,
t_min, t_max, dt_param,
region,
trace_spacing,
url=url)
mess = 'Unable to open image from target URI.'
raise InvalidUsage(mess, status_code=410, payload=payload)
elif b64:
try:
im = Image.open(BytesIO(base64.b64decode(b64)))
except Exception:
payload = {'job_uuid': uuid1}
payload['parameters'] = utils.build_params(method, avg,
t_min, t_max, dt_param,
region,
trace_spacing,
url=url)
mess = 'Could not decode payload image. Check base64 encoding.'
raise InvalidUsage(mess, status_code=410, payload=payload)
else:
payload = {'job_uuid': uuid1}
payload['parameters'] = utils.build_params(method, avg,
t_min, t_max, dt_param,
region,
trace_spacing,
url=url)
mess = 'You must provide an image.'
raise InvalidUsage(mess, status_code=410, payload=payload)
if region:
try:
im = im.crop(region)
except Exception:
mess = 'Improper crop parameters '
raise InvalidUsage(mess, status_code=410)
width, height = im.size[0], im.size[1]
# Calculate dt and interpolate if necessary.
if dt_param[:4].lower() == 'orig':
dt = (t_max - t_min) / (height - 1)
else:
if dt_param[:4].lower() == 'auto':
dts = [0.0005, 0.001, 0.002, 0.004, 0.008]
for dt in sorted(dts, reverse=True):
target = int(1 + (t_max - t_min) / dt)
# Accept the first one that is larger than the current height.
if target >= height:
break # dt and target are set
else:
dt = float(dt_param)
target = int((t_max - t_min) / dt)
# If dt is not orig, we need to inpterpolate.
im = im.resize((width, target), Image.ANTIALIAS)
# Set up the image.
grey = geophysics.is_greyscale(im)
i = np.asarray(im) - 128
i = i.astype(np.int8)
if (not grey) and (i.ndim == 3):
r, g, b = i[..., 0], i[..., 1], i[..., 2]
i = np.sqrt(0.299 * r**2. + 0.587 * g**2. + 0.114 * b**2.)
elif i.ndim == 3:
i = i[..., 0]
else:
i = i
# Get SEGY file link, if requested.
if segy:
try:
databytes = BytesIO()
write_segy(i, databytes, dt, t_min)
databytes.seek(0)
except:
print('Write SEGY failed')
else:
file_link = utils.get_url(databytes, uuid1)
# Do analysis.
print("Starting analysis")
m = {'auto': geophysics.freq_from_autocorr,
'fft': geophysics.freq_from_fft,
'xing': geophysics.freq_from_crossings}
traces = geophysics.get_trace_indices(i.shape[1], ntraces, trace_spacing)
specs, f_list, p_list, snr_list, mis, mas = geophysics.analyse(i,
t_min,
t_max,
traces,
m[method])
print("Finished analysis")
# Compute statistics.
print("***** f_list:", f_list)
fsd, psd = np.nanstd(f_list), np.nanstd(p_list)
fn, pn = len(f_list), len(p_list)
if avg.lower() == 'trim' and fn > 4:
f = geophysics.trim_mean(f_list, 0.2)
if np.isnan(f):
f = 0
elif avg.lower() == 'mean' or (avg == 'trim' and fn <= 4):
f = np.nanmean(f_list)
else:
mess = 'avg parameter must be trim or mean'
raise InvalidUsage(mess, status_code=410)
if avg.lower() == 'trim' and pn > 4:
p = geophysics.trim_mean(p_list, 0.2)
elif avg.lower() == 'mean' or (avg == 'trim' and pn <= 4):
p = np.nanmean(p_list)
else:
mess = 'avg parameter must be trim or mean'
raise InvalidUsage(mess, status_code=410)
snrsd = np.nanstd(snr_list)
snr = np.nanmean(snr_list)
# Spectrum.
print("Starting spectrum")
try:
spec = np.nanmean(np.dstack(specs), axis=-1)
fs = i.shape[0] / (t_max - t_min)
freq = np.fft.rfftfreq(i.shape[0], 1/fs)
f_min = np.amin(mis)
f_max = np.amax(mas)
except:
print("Failed spectrum")
# Probably the image is not greyscale.
payload = {'job_uuid': uuid1}
payload['parameters'] = utils.build_params(method, avg,
t_min, t_max, dt_param,
region,
trace_spacing,
url=url)
mess = 'Analysis error. Probably the colorbar is not greyscale.'
raise InvalidUsage(mess, status_code=410, payload=payload)
# Histogram.
if bins:
hist = np.histogram(i, bins=bins)
else:
hist = None
# Construct the result and return.
result = {'job_uuid': uuid1}
result['status'] = 'success'
result['message'] = ''
result['result'] = {}
result['result']['freq'] = {'peak': np.round(f, 2),
'sd': np.round(fsd, 2),
'n': fn,
'min':
|
np.round(f_min, 2)
|
numpy.round
|
from collections import OrderedDict
import numpy as np
from sklearn.metrics import confusion_matrix
def softmax(x, dim=1):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return e_x / e_x.sum(axis=dim, keepdims=True)
def norm_confusion_matrix(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = np.sum(cf, axis=1, keepdims=True)
norm_cm = cf / cls_cnt
return norm_cm
def invalid_pred_info(scores, labels, k=5, scale=1.0):
pred = np.argsort(scores, axis=-1)[:, -k:]
conf = np.max(softmax(scale * scores, dim=-1), axis=1)
invalid_mask = np.array([labels[i] not in pred[i] for i in range(len(labels))])
invalid_ids = np.arange(len(pred))[invalid_mask]
invalid_conf = conf[invalid_mask]
return invalid_ids, invalid_conf
def mean_class_accuracy(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred)
cls_cnt = cf.sum(axis=1)
cls_cnt[cls_cnt == 0] = 1
cls_hit = np.diag(cf)
return np.mean(cls_hit.astype(float) / cls_cnt.astype(float))
def mean_top_k_accuracy(scores, labels, k=1):
"""MS-ASL like top-k accuracy definition.
"""
idx = np.argsort(-scores, axis=-1)[:, :k]
labels = np.array(labels)
matches = np.any(idx == labels.reshape([-1, 1]), axis=-1)
classes = np.unique(labels)
accuracy_values = []
for class_id in classes:
mask = labels == class_id
num_valid =
|
np.sum(mask)
|
numpy.sum
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for V-trace.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
"""
from gym.spaces import Box
import numpy as np
import unittest
from ray.rllib.agents.impala import vtrace_tf as vtrace_tf
from ray.rllib.agents.impala import vtrace_torch as vtrace_torch
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import softmax
from ray.rllib.utils.test_utils import check, framework_iterator
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
def _ground_truth_calculation(vtrace, discounts, log_rhos, rewards, values,
bootstrap_value, clip_rho_threshold,
clip_pg_rho_threshold):
"""Calculates the ground truth for V-trace in Python/Numpy."""
vs = []
seq_len = len(discounts)
rhos = np.exp(log_rhos)
cs = np.minimum(rhos, 1.0)
clipped_rhos = rhos
if clip_rho_threshold:
clipped_rhos =
|
np.minimum(rhos, clip_rho_threshold)
|
numpy.minimum
|
""" General functions for plotting PV data """
from typing import List
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from nowcasting_dataset.data_sources.pv.pv_data_source import PV
from nowcasting_dataset.geospatial import osgb_to_lat_lon
from plotly.subplots import make_subplots
from nowcasting_utils.visualization.line import make_trace
from nowcasting_utils.visualization.utils import make_buttons, make_slider
def get_trace_centroid_pv(pv: PV, example_index: int) -> go.Scatter:
"""Produce plot of centroid pv system"""
y = pv.power_normalized[example_index, :, 0]
x = pv.time[example_index]
return make_trace(x, y, truth=True, name="centorid pv", mode="lines")
def get_trace_all_pv_systems(
pv: PV, example_index: int, center_system: bool = True
) -> List[go.Scatter]:
"""Produce plot of centroid pv system"""
traces = []
x = pv.time[example_index]
n_pv_systems = pv.power_mw.shape[2]
print(pv.power_mw.shape)
print(n_pv_systems)
if center_system:
start_idx = 1
centroid_trace = get_trace_centroid_pv(pv=pv, example_index=example_index)
traces.append(centroid_trace)
else:
start_idx = 0
# make the lines a little bit see-through
opacity = (1 / n_pv_systems) ** 0.35
for pv_system_index in range(start_idx, n_pv_systems):
y = pv.power_normalized[example_index, :, pv_system_index]
pv_id = pv.id[example_index, pv_system_index].values
truth = False
if ~
|
np.isnan(pv_id)
|
numpy.isnan
|
#! /usr/bin/python3
from datetime import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.collections as collections
from scipy import fftpack
eps0 = 8.8541878176e-12
class Grid1D:
"""FDTD 1D Grid, TEM propagating along x-axis"""
def __init__(self, dx, width):
self.dx = dx
self.x = np.arange(0.0, width, dx)
self.ndx = len(self.x)
self.dt = self.dx / (2 * 3e8)
self.ez = np.zeros(self.ndx)
self.hy = np.zeros(self.ndx)
self.ca =
|
np.ones(self.ndx)
|
numpy.ones
|
import json
from enum import IntEnum
from threading import Lock
import numpy as np
from club_controller import app_config
from club_controller.audio_udp_server.dsp import interpolate
from club_controller.audio_udp_server.filters import ExpFilter
from club_controller.misc.config_manager import config_manager
from club_controller.protocol.message_ids import (ClientAnimationId,
ServerMessageId)
from scipy.ndimage.filters import gaussian_filter1d
from .client import Client
from .client_type_id import ClientTypeId
from .led_strip_mode_id import LedStripModeId
class EffectId(IntEnum):
COLORED_ENERGY = 0
ENERGY = 1
SCROLL = 2
SPECTRUM = 3
class LedStripClient(Client):
#TODO check if a max refreshrate is necessary
#_max_led_FPS = int(((N_PIXELS * 30e-6) + 50e-6)**-1.0)
#assert FPS <= _max_led_FPS, 'FPS must be <= {}'.format(_max_led_FPS)
def __init__(self, uid, ip, port, mac, name, mode, color, color_templates, effect_id, fps, frequency, num_pixels, filter, strobe):
self.mode = mode
self.color = color
self.color_templates = color_templates
self.effect_id = effect_id
self.fps = fps
self.frequency = frequency
self.num_pixels = num_pixels
self.filter = filter
self.strobe = strobe
self.pixels_lock = Lock()
self.pixels = np.tile(1, (3, self.num_pixels))
self.p = np.tile(1.0, (3, self.num_pixels // 2))
self.energy_filt = ExpFilter(0, alpha_decay=filter["decay"], alpha_rise=filter["rise"])
self.p_filt = ExpFilter(np.tile(0, (3, self.num_pixels // 2)),
alpha_decay=filter["decay"], alpha_rise=filter["rise"])
self.gain = ExpFilter(np.tile(0.01, self.num_pixels // 2),
alpha_decay=0.001, alpha_rise=0.99)
self.prev_pixels = np.tile(0, (3, self.num_pixels))
self.prev_spectrum = np.tile(0.01, self.num_pixels // 2)
self.r_filt = ExpFilter(np.tile(0.01, self.num_pixels // 2),
alpha_decay=0.2, alpha_rise=0.99)
self.b_filt = ExpFilter(np.tile(0.01, self.num_pixels // 2),
alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, self.num_pixels // 2),
alpha_decay=0.99, alpha_rise=0.01)
super().__init__(uid, ip, port, mac, int(ClientTypeId.LED_STRIP_CLIENT), name)
def toJson(self):
return {
"uid": self.uid,
"mac": self.mac,
"ip": self.ip,
"port": self.port,
"name": self.name,
"type_id": self.type_id,
"is_connected": self.is_connected,
"color": self.color,
"color_templates": self.color_templates,
"effect_id": self.effect_id,
"fps": self.fps,
"frequency": self.frequency,
"num_pixels": self.num_pixels,
"filter": self.filter,
"strobe": self.strobe,
"mode": self.mode,
}
def update_from_json(self, new_data):
for key, value in new_data.items():
if key == "mode":
self.update_mode(value)
if key == "effect_id":
# TODO store as string
self.effect_id = int(value)
else:
setattr(self, key, value)
if key == "filter":
self.update_filter_parameters(value)
self.send_parameter_update(self.toJson())
def update_filter_parameters(self, new_filter_value):
self.p_filt.alpha_rise = new_filter_value["rise"]
self.p_filt.alpha_decay = new_filter_value["decay"]
self.energy_filt.alpha_rise = new_filter_value["rise"]
self.energy_filt.alpha_decay = new_filter_value["decay"]
def update_strobe_delay(self, delay_ms : float):
if self.strobe is None:
print("update_strobe_delay: self.strobe is None")
return
self.strobe["delay_ms"] = delay_ms
if self.is_connected:
self.send_strobe_update(self.strobe)
def update_mode(self, mode : str):
if self.mode == mode:
return
self.mode = mode
if self.mode == LedStripModeId.STROBE.name:
data = {
"delay": config_manager.get_ui_config()["main_ui_parameters"]["strobe"]["delay_ms"],
"color": config_manager.get_ui_config()["main_ui_parameters"]["strobe"]["color"],
}
elif self.mode == LedStripModeId.AUDIO_CLIENT.name:
data = {
"color": self.color,
}
else:
data = {}
if self.is_connected:
self.send_mode_update(LedStripModeId[mode], data)
def set_pixel_data(self, pixel_data):
self.pixels_lock.acquire()
self.pixels = pixel_data
self.pixels_lock.release()
# TOOD returned processed data (Should the processing be somewhere else???)
def process(self, fft_data):
"""Transforms the given fft data into pixel values based on the current config.
Args:
fft_data (np.ndarray): Fft data in the range 0 - 1
"""
self.pixels_lock.acquire()
# get chosen frequency range
# fft_data spans from 0 to SAMPLING_RATE/2 Hz with spacing SAMPLING_RATE/len(fft_dat)
spacing = app_config.SAMPLE_RATE / 2 / len(fft_data)
i_min_freq = int(self.frequency["min"] / spacing)
i_max_freq = int(self.frequency["max"] / spacing)
if i_min_freq == i_max_freq:
i_max_freq = i_min_freq + 1
mapped_fft_data = fft_data[i_min_freq:i_max_freq]
effect_id = EffectId(self.effect_id)
if(effect_id == EffectId.COLORED_ENERGY):
pixel_values = self.get_pixel_values_colored_energy(mapped_fft_data)
elif(effect_id == EffectId.ENERGY):
pixel_values = self.get_pixel_values_energy(mapped_fft_data)
elif(effect_id == EffectId.SCROLL):
pixel_values = self.get_pixel_values_scroll(mapped_fft_data)
elif(effect_id == EffectId.SPECTRUM):
pixel_values = self.get_pixel_values_spectrum(mapped_fft_data)
else:
if __debug__:
print("Unkown effectId: " + str(self.effect_id))
pixel_values = np.zeros(self.num_pixels)
self.pixels = pixel_values
self.pixels_lock.release()
def get_pixel_values_colored_energy(self, fft_data):
"""Effect that expands from the center with increasing sound energy and set color"""
y = interpolate(fft_data, self.num_pixels//2)
self.gain.update(np.max(gaussian_filter1d(y, sigma=1.0)))
y /= self.gain.value
# Scale by the width of the LED strip
y *= float((self.num_pixels // 2) - 1)
# Adjust width with user input
y *= self.filter["gain_adjust"]
# Map color channels according to set color and the energy over the freq bands
color = self.color
i_on = int(np.mean(y))
# Assign color to different frequency regions
self.p[0, :i_on] = color["r"]
self.p[0, i_on:] = 0.0
self.p[1, :i_on] = color["g"]
self.p[1, i_on:] = 0.0
self.p[2, :i_on] = color["b"]
self.p[2, i_on:] = 0.0
self.p_filt.update(self.p)
p = np.round(self.p_filt.value)
# Apply substantial blur to smooth the edges
p[0, :] = gaussian_filter1d(p[0, :], sigma=float(self.filter["edge_blurring"]))
p[1, :] = gaussian_filter1d(p[1, :], sigma=float(self.filter["edge_blurring"]))
p[2, :] = gaussian_filter1d(p[2, :], sigma=float(self.filter["edge_blurring"]))
return np.concatenate((p[:, ::-1], p), axis=1)
def get_pixel_values_energy(self, mel_data):
"""Effect that expands from the center with increasing sound energy"""
y =
|
np.copy(mel_data)
|
numpy.copy
|
"""
Functions for visualizing flow cytometry data.
Functions in this module are divided in two categories:
- Simple Plot Functions, with a signature similar to the following::
plot_fxn(data_list, channels, parameters, savefig)
where `data_list` is a NxD FCSData object or numpy array, or a list of
such, `channels` specifies the channel or channels to use for the plot,
`parameters` are function-specific parameters, and `savefig` indicates
whether to save the figure to an image file. Note that `hist1d`, `violin`,
and `violin_dose_response` use `channel` instead of `channels`, since they
use a single channel, and `density2d` only accepts one FCSData object or
numpy array as its first argument.
Simple Plot Functions do not create a new figure or axis, so they can be
called directly to plot in a previously created axis if desired. If
`savefig` is not specified, the plot is maintained in the current axis
when the function returns. This allows for further modifications to the
axis by direct calls to, for example, ``plt.xlabel``, ``plt.title``, etc.
However, if `savefig` is specified, the figure is closed after being
saved. In this case, the function may include keyword parameters
`xlabel`, `ylabel`, `xlim`, `ylim`, `title`, and others related to
legend or color, which allow the user to modify the axis prior to saving.
The following functions in this module are Simple Plot Functions:
- ``hist1d``
- ``violin``
- ``violin_dose_response``
- ``density2d``
- ``scatter2d``
- ``scatter3d``
- Complex Plot Functions, which create a figure with several axes, and use
one or more Simple Plot functions to populate the axes. They always
include a `savefig` argument, which indicates whether to save the figure
to a file. If `savefig` is not specified, the plot is maintained in the
newly created figure when the function returns. However, if `savefig` is
specified, the figure is closed after being saved.
The following functions in this module are Complex Plot Functions:
- ``density_and_hist``
- ``scatter3d_and_projections``
"""
import packaging
import packaging.version
import collections
import numpy as np
import scipy.ndimage.filters
import matplotlib
import matplotlib.scale
import matplotlib.transforms
import matplotlib.ticker
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.font_manager import FontProperties
import warnings
# expose the collections module abstract base classes (ABCs) in both
# Python 2 and 3
try:
# python 3
collectionsAbc = collections.abc
except AttributeError:
# python 2
collectionsAbc = collections
cmap_default = plt.get_cmap('Spectral_r')
savefig_dpi = 250
###
# CUSTOM TRANSFORMS
###
class _InterpolatedInverseTransform(matplotlib.transforms.Transform):
"""
Class that inverts a given transform class using interpolation.
Parameters
----------
transform : matplotlib.transforms.Transform
Transform class to invert. It should be a monotonic transformation.
smin : float
Minimum value to transform.
smax : float
Maximum value to transform.
resolution : int, optional
Number of points to use to evaulate `transform`. Default is 1000.
Methods
-------
transform_non_affine(x)
Apply inverse transformation to a Nx1 numpy array.
Notes
-----
Upon construction, this class generates an array of `resolution` points
between `smin` and `smax`. Next, it evaluates the specified
transformation on this array, and both the original and transformed
arrays are stored. When calling ``transform_non_affine(x)``, these two
arrays are used along with ``np.interp()`` to inverse-transform ``x``.
Note that `smin` and `smax` are also transformed and stored. When using
``transform_non_affine(x)``, any values in ``x`` outside the range
specified by `smin` and `smax` transformed are masked.
"""
# ``input_dims``, ``output_dims``, and ``is_separable`` are required by
# matplotlib.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, transform, smin, smax, resolution=1000):
# Call parent's constructor
matplotlib.transforms.Transform.__init__(self)
# Store transform object
self._transform = transform
# Generate input array
self._s_range = np.linspace(smin, smax, resolution)
# Evaluate provided transformation and store result
self._x_range = transform.transform_non_affine(self._s_range)
# Transform bounds and store
self._xmin = transform.transform_non_affine(smin)
self._xmax = transform.transform_non_affine(smax)
if self._xmin > self._xmax:
self._xmax, self._xmin = self._xmin, self._xmax
def transform_non_affine(self, x, mask_out_of_range=True):
"""
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
"""
# Mask out-of-range values
if mask_out_of_range:
x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),
x)
else:
x_masked = x
# Calculate s and return
return np.interp(x_masked, self._x_range, self._s_range)
def inverted(self):
"""
Get an object representing an inverse transformation to this class.
Since this class implements the inverse of a given transformation,
this function just returns the original transformation.
Return
------
matplotlib.transforms.Transform
Object implementing the reverse transformation.
"""
return self._transform
class _LogicleTransform(matplotlib.transforms.Transform):
"""
Class implementing the Logicle transform, from scale to data values.
Relevant parameters can be specified manually, or calculated from
a given FCSData object.
Parameters
----------
T : float
Maximum range of data values. If `data` is None, `T` defaults to
262144. If `data` is not None, specifying `T` overrides the
default value that would be calculated from `data`.
M : float
(Asymptotic) number of decades in display scale units. If `data` is
None, `M` defaults to 4.5. If `data` is not None, specifying `M`
overrides the default value that would be calculated from `data`.
W : float
Width of linear range in display scale units. If `data` is None,
`W` defaults to 0.5. If `data` is not None, specifying `W`
overrides the default value that would be calculated from `data`.
data : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data from which a set of T, M, and W parameters will
be generated.
channel : str or int
Channel of `data` from which a set of T, M, and W parameters will
be generated. `channel` should be specified if `data` is
multidimensional.
Methods
-------
transform_non_affine(s)
Apply transformation to a Nx1 numpy array.
Notes
-----
Logicle scaling combines the advantages of logarithmic and linear
scaling. It is useful when data spans several orders of magnitude
(when logarithmic scaling would be appropriate) and a significant
number of datapoints are negative.
Logicle scaling is implemented using the following equation::
x = T * 10**(-(M-W)) * (10**(s-W) \
- (p**2)*10**(-(s-W)/p) + p**2 - 1)
This equation transforms data ``s`` expressed in "display scale" units
into ``x`` in "data value" units. Parameters in this equation
correspond to the class properties. ``p`` and ``W`` are related as
follows::
W = 2*p * log10(p) / (p + 1)
If a FCSData object or list of FCSData objects is specified along with
a channel, the following default logicle parameters are used: T is
taken from the largest ``data[i].range(channel)[1]`` or the largest
element in ``data[i]`` if ``data[i].range()`` is not available, M is
set to the largest of 4.5 and ``4.5 / np.log10(262144) * np.log10(T)``,
and W is taken from ``(M - log10(T / abs(r))) / 2``, where ``r`` is the
minimum negative event. If no negative events are present, W is set to
zero.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low Signals
and Compensated Data," Cytometry Part A 69A:541-551, 2006, PMID
16604519.
"""
# ``input_dims``, ``output_dims``, and ``is_separable`` are required by
# matplotlib.
input_dims = 1
output_dims = 1
is_separable = True
# Locator objects need this object to store the logarithm base used as an
# attribute.
base = 10
def __init__(self, T=None, M=None, W=None, data=None, channel=None):
matplotlib.transforms.Transform.__init__(self)
# If data is included, try to obtain T, M and W from it
if data is not None:
# Convert to list if necessary
if not isinstance(data, list):
data = [data]
# Obtain T, M, and W if not specified
# If elements of data have ``.range()``, use it to determine the
# max data value. Else, use the maximum value in the array.
if T is None:
T = 0
for d in data:
# Extract channel
if d.ndim > 1:
if channel is None:
msg = "if multidimensional data is provided, a"
msg += " channel should be specified"
raise ValueError(msg)
else:
y = d[:, channel]
else:
y = d
if hasattr(y, 'range') and hasattr(y.range, '__call__'):
Ti = y.range(0)[1]
else:
Ti = np.max(y)
T = Ti if Ti > T else T
if M is None:
M = max(4.5, 4.5 / np.log10(262144) * np.log10(T))
if W is None:
W = 0
for d in data:
# Extract channel
if d.ndim > 1:
if channel is None:
msg = "if multidimensional data is provided, a"
msg += " channel should be specified"
raise ValueError(msg)
else:
y = d[:, channel]
else:
y = d
# If negative events are present, use minimum.
if np.any(y < 0):
r = np.min(y)
Wi = (M - np.log10(T / abs(r))) / 2
W = Wi if Wi > W else W
else:
# Default parameter values
if T is None:
T = 262144
if M is None:
M = 4.5
if W is None:
W = 0.5
# Check that property values are valid
if T <= 0:
raise ValueError("T should be positive")
if M <= 0:
raise ValueError("M should be positive")
if W < 0:
raise ValueError("W should not be negative")
# Store parameters
self._T = T
self._M = M
self._W = W
# Calculate dependent parameter p
# It is not possible to analytically obtain ``p`` as a function of W
# only, so ``p`` is calculated numerically using a root finding
# algorithm. The initial estimate provided to the algorithm is taken
# from the asymptotic behavior of the equation as ``p -> inf``. This
# results in ``W = 2*log10(p)``.
p0 = 10**(W / 2.)
# Functions to provide to the root finding algorithm
def W_f(p):
return 2*p / (p + 1) * np.log10(p)
def W_root(p, W_target):
return W_f(p) - W_target
# Find solution
sol = scipy.optimize.root(W_root, x0=p0, args=(W))
# Solution should be unique
assert sol.success
assert len(sol.x) == 1
# Store solution
self._p = sol.x[0]
@property
def T(self):
"""
Maximum range of data.
"""
return self._T
@property
def M(self):
"""
(Asymptotic) number of decades in display scale units.
"""
return self._M
@property
def W(self):
"""
Width of linear range in display scale units.
"""
return self._W
def transform_non_affine(self, s):
"""
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
"""
T = self._T
M = self._M
W = self._W
p = self._p
# Calculate x
return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)
def inverted(self):
"""
Get an object implementing the inverse transformation.
Return
------
_InterpolatedInverseTransform
Object implementing the reverse transformation.
"""
return _InterpolatedInverseTransform(transform=self,
smin=0,
smax=self._M)
###
# CUSTOM TICK LOCATORS AND FORMATTERS
###
class _LogicleLocator(matplotlib.ticker.Locator):
"""
Determine the tick locations for logicle axes.
Parameters
----------
transform : _LogicleTransform
transform object
subs : array, optional
Subtick values, as multiples of the main ticks. If None, do not use
subticks.
"""
def __init__(self, transform, subs=None):
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""
Set parameters within this locator.
Parameters
----------
subs : array, optional
Subtick values, as multiples of the main ticks.
numticks : array, optional
Number of ticks.
"""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
"""
Return the locations of the ticks.
"""
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
"""
Get a set of tick values properly spaced for logicle axis.
"""
# Extract base from transform object
b = self._transform.base
# The logicle domain is divided into two regions: A "linear" region,
# which may include negative numbers, and a "logarithmic" region, which
# only includes positive numbers. These two regions are separated by a
# value t, given by the logicle equations. An illustration is given
# below.
#
# -t ==0== t ========>
# lin log
#
# vmin and vmax can be anywhere in this domain, meaning that both should
# be greater than -t.
#
# The logarithmic region will only have major ticks at integral log
# positions. The linear region will have a major tick at zero, and one
# major tick at the largest absolute integral log value in screen
# inside this region. Subticks will be added at multiples of the
# integral log positions.
# If the linear range is too small, create new transformation object
# with slightly wider linear range. Otherwise, the number of decades
# below will be infinite
if self._transform.W == 0 or \
self._transform.M / self._transform.W > self.numticks:
self._transform = _LogicleTransform(
T=self._transform.T,
M=self._transform.M,
W=self._transform.M / self.numticks)
# Calculate t
t = - self._transform.transform_non_affine(0)
# Swap vmin and vmax if necessary
if vmax < vmin:
vmin, vmax = vmax, vmin
# Calculate minimum and maximum limits in scale units
vmins = self._transform.inverted().transform_non_affine(vmin)
vmaxs = self._transform.inverted().transform_non_affine(vmax)
# Check whether linear or log regions are present
has_linear = has_log = False
if vmin <= t:
has_linear = True
if vmax > t:
has_log = True
else:
has_log = True
# Calculate number of ticks in linear and log regions
# The number of ticks is distributed by the fraction that each region
# occupies in scale units
if has_linear:
fraction_linear = (min(vmaxs, 2*self._transform.W) - vmins) / \
(vmaxs - vmins)
numticks_linear = np.round(self.numticks*fraction_linear)
else:
numticks_linear = 0
if has_log:
fraction_log = (vmaxs - max(vmins, 2*self._transform.W)) / \
(vmaxs - vmins)
numticks_log = np.round(self.numticks*fraction_log)
else:
numticks_log = 0
# Calculate extended ranges and step size for tick location
# Extended ranges take into account discretization.
if has_log:
# The logarithmic region's range will include from the decade
# immediately below the lower end of the region to the decade
# immediately above the upper end.
# Note that this may extend the logarithmic region to the left.
log_ext_range = [np.floor(np.log(max(vmin, t)) / np.log(b)),
np.ceil(np.log(vmax) /
|
np.log(b)
|
numpy.log
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME>, <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import numpy as np
import scipy.special as spe
import scipy.integrate as integ
import scipy.stats as st
import matplotlib.pyplot as plt
# This script performs detailed checks of the Breit-Wheeler pair production process.
# Four populations of photons are initialized with different momenta in different
# directions in a background EM field (with non-zero components along each direction).
# Specifically the script checks that:
#
# - The expected number of generated pairs n_pairs is in agreement with theory
# (the maximum tolerated error is 5*sqrt(n_pairs)
# - The weight of the generated particles is equal to the weight of the photon
# - Momenta of the residual photons are still equal to the original momentum
# - The generated particles are emitted in the right direction
# - Total energy is conserved in each event
# - The energy distribution of the generated particles is in agreement with theory
# - The optical depths of the product species are correctly initialized (QED effects are
# enabled for product species too).
#
# More details on the theoretical formulas used in this script can be found in
# the Jupyter notebook picsar/src/multi_physics/QED_tests/validation/validation.ipynb
#
# References:
# 1) <NAME> et al 2011 Plasma Phys. Control. Fusion 53 015009
# 2) <NAME> et al. 2015 Phys. Rev. E 92, 023305
# 3) <NAME>. PhD thesis "Effets radiatifs et d'electrodynamique
# quantique dans l'interaction laser-matiere ultra-relativiste"
# URL: https://tel.archives-ouvertes.fr/tel-01314224
# Tolerances
tol = 1.e-8
tol_red = 2.e-2
# Physical constants (from CODATA 2018, see: https://physics.nist.gov/cuu/Constants/index.html )
me = 9.1093837015e-31 #electron mass
c = 299792458 #speed of light
hbar = 6.62607015e-34/(2*np.pi) #reduced Plank constant
fine_structure = 7.2973525693e-3 #fine structure constant
qe = 1.602176634e-19#elementary charge
E_s = (me**2 * c**3)/(qe * hbar) #Schwinger E field
B_s = E_s/c #Schwinger B field
mec = me*c
mec2 = mec*c
#______________
# Initial parameters
spec_names_phot = ["p1", "p2", "p3", "p4"]
spec_names_ele = ["ele1", "ele2", "ele3", "ele4"]
spec_names_pos = ["pos1", "pos2", "pos3", "pos4"]
initial_momenta = [
np.array([2000.0,0,0])*mec,
np.array([0.0,5000.0,0.0])*mec,
np.array([0.0,0.0,10000.0])*mec,
np.array([57735.02691896, 57735.02691896, 57735.02691896])*mec
]
initial_particle_number = 1048576
E_f = np.array([-2433321316961438., 973328526784575., 1459992790176863.])
B_f = np.array([2857142.85714286, 4285714.28571428, 8571428.57142857])
NNS = [128,128,128,128] #bins for energy distribution comparison.
#______________
#Returns all the species names and if they are photon species or not
def get_all_species_names_and_types():
names = spec_names_phot + spec_names_ele + spec_names_pos
types = [True]*len(spec_names_phot) + [False]*(len(spec_names_ele)+len(spec_names_pos))
return names, types
def calc_chi_gamma(p, E, B):
pnorm = np.linalg.norm(p)
v = c*(p/pnorm)
EpvvecB = E + np.cross(v,B)
vdotEoverc = np.dot(v,E)/c
ff = np.sqrt(np.dot(EpvvecB,EpvvecB) - np.dot(vdotEoverc,vdotEoverc))
gamma_phot = pnorm/mec
return gamma_phot*ff/E_s
#Auxiliary functions
@np.vectorize
def BW_inner(x):
return integ.quad(lambda s: np.sqrt(s)*spe.kv(1./3., 2./3. * s**(3./2.)), x, np.inf)[0]
def BW_X(chi_phot, chi_ele):
div = (chi_ele*(chi_phot-chi_ele))
div = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), div, 1.0);
res = np.where(
|
np.logical_and(chi_phot > chi_ele, chi_ele != 0)
|
numpy.logical_and
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributions as distributions
import matplotlib.pyplot as plt
import numpy as np
import gym
import park
import tqdm
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super().__init__()
self.fc_1 = nn.Linear(input_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = self.fc_1(x)
x = F.relu(x)
x = self.fc_2(x)
return x
class ActorCritic(nn.Module):
def __init__(self, actor, critic):
super().__init__()
self.actor = actor
self.critic = critic
def forward(self, state):
action_pred = self.actor(state)
value_pred = self.critic(state)
return action_pred, value_pred
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_normal_(m.weight)
m.bias.data.fill_(0)
def calculate_returns(rewards, discount_factor, device, normalize=True):
returns = []
R = 0
for r in reversed(rewards):
R = r + R * discount_factor
returns.insert(0, R)
returns = torch.tensor(returns).to(device)
if normalize:
returns = (returns - returns.mean()) / returns.std()
return returns
def calculate_advantages(returns, pred_values, normalize=True):
advantages = returns - pred_values
if normalize:
advantages = (advantages - advantages.mean()) / advantages.std()
return advantages
def update_policy(advantages, log_prob_actions, returns, value_preds, entropies, optimizer):
returns = returns.detach()
policy_loss = -(advantages * log_prob_actions).mean()
value_loss = F.smooth_l1_loss(returns, value_preds)
optimizer.zero_grad()
loss = policy_loss + value_loss * 0.5 - entropies.mean() * 0.01
loss.backward()
optimizer.step()
return loss.item()
def evaluate(env, policy, device,num_steps):
policy.eval()
done = False
#episode_reward = 0
episode_reward = []
state = env.reset()
for steps in range(num_steps):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
with torch.no_grad():
action_pred, _ = policy(state)
action_prob = F.softmax(action_pred, dim=-1)
action = torch.argmax(action_prob, dim=-1)
state, reward, done, _ = env.step(action.item())
#episode_reward += reward
episode_reward.append(reward)
if(done):
break
#return episode_reward
return np.mean(np.array(episode_reward)[-50:])
def train(env, policy, optimizer, discount_factor, device,num_steps):
policy.train()
log_prob_actions = []
entropies = []
value_preds = []
rewards = []
done = False
episode_reward = 0
state = env.reset()
for steps in range(num_steps):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
action_pred, value_pred = policy(state)
action_prob = F.softmax(action_pred, dim=-1)
dist = distributions.Categorical(action_prob)
action = dist.sample()
log_prob_action = dist.log_prob(action)
entropy = dist.entropy()
state, reward, done, _ = env.step(action.item())
log_prob_actions.append(log_prob_action)
entropies.append(entropy)
value_preds.append(value_pred.squeeze(0))
rewards.append(reward)
episode_reward += reward
if(done):
break
log_prob_actions = torch.cat(log_prob_actions)
entropies = torch.cat(entropies)
value_preds = torch.cat(value_preds)
returns = calculate_returns(rewards, discount_factor, device)
advantages = calculate_advantages(returns, value_preds)
loss = update_policy(advantages, log_prob_actions, returns, value_preds, entropies, optimizer)
return loss, episode_reward
if __name__ == '__main__':
hidden_dim = 256
max_episodes = 80
discount_factor = 0.99
num_steps = 1000000
train_env = park.make('cache')
test_env = park.make('cache')
input_dim = train_env.observation_space.shape[0]
output_dim = train_env.action_space.n
train_rewards = torch.zeros(max_episodes)
test_rewards = torch.zeros(max_episodes)
device = torch.device('cpu')
actor = MLP(input_dim, hidden_dim, output_dim)
critic = MLP(input_dim, hidden_dim, 1)
actor_critic = ActorCritic(actor, critic)
actor_critic = actor_critic.to(device)
actor_critic.apply(init_weights)
optimizer = optim.Adam(actor_critic.parameters(), lr=3e-4)
for episode in tqdm.tqdm(range(max_episodes)):
loss, train_reward = train(train_env, actor_critic, optimizer, discount_factor, device, num_steps)
test_reward = evaluate(test_env, actor_critic, device, num_steps)
train_rewards[episode] = torch.from_numpy(np.array(train_reward))
test_rewards[episode] = torch.from_numpy(
|
np.array(test_reward)
|
numpy.array
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated hypothesis-based clustering implementation.
Based on the paper:
Three Approaches for Personalization with Applications to Federated Learning
<NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/2002.10619
"""
from typing import Iterator, List, Mapping, NamedTuple, Optional
from fedjax.legacy import core
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
class HypClusterState(NamedTuple):
"""State of server for HypCluster passed between rounds.
Attributes:
cluster_params: A list of pytrees representing the server model parameters
per cluster.
cluster_server_opt_states: A list of pytrees representing the server
optimizer state per cluster.
"""
cluster_params: List[core.Params]
cluster_server_opt_states: List[core.OptState]
class HypClusterHParams(NamedTuple):
"""Hyperparameters for federated hypothesis-based clustering.
Attributes:
train_data_hparams: Hyperparameters for training client data preparation.
num_clusters: Number of clusters.
init_cluster_client_ids: Optional list of client ids used to initialize
cluster centers via k-means++ variant. If None, initialization is random.
"""
train_data_hparams: core.ClientDataHParams
num_clusters: int = 1
init_cluster_client_ids: Optional[List[str]] = None
def _get_cluster_by_client_loss(
federated_data: core.FederatedData,
client_ids: List[str],
model: core.Model,
cluster_params: List[core.Params],
data_hparams: Optional[core.ClientDataHParams] = None) -> np.ndarray:
"""Returns the loss per cluster and client pair.
Args:
federated_data: Federated data separated per client to be evaluated.
client_ids: Ids of clients to be evaluated.
model: Model implementation.
cluster_params: List of pytree model parameters per cluster.
data_hparams: Hyperparameters for dataset preparation.
Returns:
Numpy array of losses of shape [num_clusters, num_clients].
"""
cluster_by_client_loss = []
for params in cluster_params:
client_metrics = core.evaluate_multiple_clients(
federated_data=federated_data,
client_ids=client_ids,
model=model,
params=params,
client_data_hparams=data_hparams)
client_loss = [m['loss'] if m else np.inf for m in client_metrics]
cluster_by_client_loss.append(client_loss)
# [num_clusters, num_clients].
cluster_by_client_loss = np.array(cluster_by_client_loss)
return cluster_by_client_loss
def maximization(
federated_data: core.FederatedData,
client_ids: List[str],
model: core.Model,
cluster_params: List[core.Params],
data_hparams: Optional[core.ClientDataHParams] = None) -> List[List[str]]:
"""Finds the cluster that produces the lowest loss for each client.
Args:
federated_data: Federated data separated per client to be evaluated.
client_ids: Ids of clients to be evaluated.
model: Model implementation.
cluster_params: List of pytree model parameters per cluster.
data_hparams: Hyperparameters for dataset preparation.
Returns:
List of lists of client ids per cluster.
"""
cluster_by_client_loss = _get_cluster_by_client_loss(federated_data,
client_ids, model,
cluster_params,
data_hparams)
# [num_clients] index of best cluster per client.
cluster_ids = np.argmin(cluster_by_client_loss, axis=0)
cluster_client_ids = [[] for _ in cluster_params]
for cluster_id, client_id in zip(cluster_ids, client_ids):
cluster_client_ids[cluster_id].append(client_id)
return cluster_client_ids
# TODO(b/162864512): Break up functions to jit compile.
class HypCluster(core.FederatedAlgorithm):
"""Federated hypothesis-based clustering algorithm."""
def __init__(self, federated_data: core.FederatedData, model: core.Model,
client_optimizer: core.Optimizer,
server_optimizer: core.Optimizer, hparams: HypClusterHParams,
rng_seq: core.PRNGSequence):
"""Initializes HypCluster algorithm.
Args:
federated_data: Federated data separated per client.
model: Model implementation.
client_optimizer: Client optimizer.
server_optimizer: Server optimizer.
hparams: Hyperparameters for federated hypothesis-based clustering.
rng_seq: Iterator of JAX random keys.
"""
self._federated_data = federated_data
self._model = model
self._client_optimizer = client_optimizer
self._server_optimizer = server_optimizer
self._hparams = hparams
self._rng_seq = rng_seq
self._client_trainer = core.DefaultClientTrainer(model, client_optimizer)
def _kmeans_init(self) -> List[core.Params]:
"""Initializes cluster parameters for HypCluster using k-means++ variant.
Given a set of input clients, we train parameters for each client. The
initial cluster parameters are chosen out of this set of client parameters.
At a high level, the cluster parameters are selected by choosing the client
parameters that are the "furthest away" from each other (greatest difference
in loss).
Returns:
Cluster center params.
"""
init_params = self._model.init_params(next(self._rng_seq))
client_ids = self._hparams.init_cluster_client_ids
client_states = core.train_multiple_clients(
federated_data=self.federated_data,
client_ids=client_ids,
client_trainer=self._client_trainer,
init_client_trainer_state=self._client_trainer.init_state(
params=init_params),
rng_seq=self._rng_seq,
client_data_hparams=self._hparams.train_data_hparams)
params_by_client = [cs.params for cs in client_states]
center_client = jrandom.choice(next(self._rng_seq), len(params_by_client))
cluster_centers = [params_by_client[center_client]]
for _ in range(1, self._hparams.num_clusters):
# [num_centers, num_clients].
cluster_by_client_loss = _get_cluster_by_client_loss(
self.federated_data, client_ids, self.model, cluster_centers,
self._hparams.train_data_hparams)
# [num_clients].
best_loss_per_client = np.min(cluster_by_client_loss, axis=0)
worst_client =
|
np.argmax(best_loss_per_client)
|
numpy.argmax
|
"""
The transformers in this module act on a single numpy array.
"""
from abc import ABC, abstractmethod
import logging
import numpy as np
from PIL import Image
from typing import Tuple, Dict
LOG = logging.getLogger(__name__)
class NumpyTransform(ABC):
@staticmethod
def check_type_with_warning(input_: np.ndarray) -> None:
if not isinstance(input_, np.ndarray):
raise TypeError(f'Attempting to use a numpy transform with input of type {type(input_)}. Abort.')
@abstractmethod
def __call__(self, *args, **kwargs) -> np.ndarray:
raise NotImplementedError
class Numpy2PILTransform(NumpyTransform):
"""Transforms a Numpy nd.array into a PIL image."""
def __call__(self, array: np.ndarray) -> Image:
self.check_type_with_warning(array)
return Image.fromarray(array)
class NumpyReshapeTransform(NumpyTransform):
"""Take a flattened 1D numpy array and transform into new 2D shape and returns a PIL image (for torchvision)."""
def __init__(self, new_shape: Tuple[int, int]) -> None:
self._new_shape = new_shape
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return np.reshape(array, self._new_shape)
class NumpyNormalizeTransform(NumpyTransform):
"""Normalizes a numpy array to have zero mean and unit variance.
Note: This transformer takes NO mask into account!
"""
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return normalize_2d_array(array)
class NumpyNormalize01Transform(NumpyTransform):
"""Normalizes the data such that it lies in the range of [0, 1].
Note: This transformer takes NO mask into account!
"""
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return (array - np.min(array)) / (np.max(array) - np.min(array))
def normalize_2d_array(input_array: np.ndarray, mask: np.ndarray = None) -> np.ndarray:
"""Normalizes a given input array to zero mean and unit variance. When mask is given, only consider those values."""
if mask is not None:
assert input_array.shape == mask.shape, f'Input and mask need to have same shape: ' \
f'{input_array.shape} != {mask.shape}'
assert mask.dtype == bool, f'Mask needs to be boolean. Given: {mask.dtype}'
relevant_values = input_array[mask] # gives back 1D array of values where mask has 'True' entry
else:
relevant_values = input_array
mean =
|
np.mean(relevant_values)
|
numpy.mean
|
import numpy as np
from numpy import linalg as la
from matrixmath import randn,vec
from ltimultgen import gen_system_mult
from policygradient import PolicyGradientOptions, run_policy_gradient, Regularizer
from ltimult import dlyap_obj,check_olmss
from matplotlib import pyplot as plt
from time import time
import os
from utility import create_directory
from pickle_io import pickle_import,pickle_export
def calc_sparsity(K,thresh,PGO):
if PGO is None:
regstr = 'vec1'
else:
if PGO.regularizer is None:
regstr = 'vec1'
else:
regstr = PGO.regularizer.regstr
# Calculate vals
if regstr == 'vec1' or PGO.regularizer.regstr == 'vec_huber':
vals = np.abs(vec(K))
elif regstr == 'mr':
vals = np.abs(K).max(1)
elif regstr == 'mc':
vals = np.abs(K).max(0)
elif regstr == 'glr'or regstr == 'glr_huber':
vals = la.norm(K,ord=2,axis=1)
elif regstr == 'glc'or regstr == 'glc_huber':
vals = la.norm(K,ord=2,axis=0)
binmax = np.max(vals)
bin1 = thresh*binmax
sparsity = np.sum(vals<bin1)/vals.size
print('Sparsity = %.3f' % sparsity)
# Calculate black and white sparsity matrix
Kbw =
|
np.zeros_like(K)
|
numpy.zeros_like
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.