prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
# hmmlearn可以在安装numpy以后,再使用pip install hmmlearn安装
from hmmlearn import hmm
from sklearn.cross_validation import train_test_split
import os
from sklearn.model_selection import LeaveOneOut
import math
from plot_confusion_matrix import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
import time
'''
Created on 2017-12-4
本例为天气和行为的关系
'''
#states = ["Rainy", "Sunny"]
###隐藏状态
#n_states = len(states)
###隐藏状态长度
#observations = ["walk", "shop", "clean"]
###可观察的状态
#n_observations = len(observations)
###可观察序列的长度
#start_probability = np.array([0.6, 0.4])
###开始转移概率,即开始是Rainy和Sunny的概率 ##隐藏间天气转移混淆矩阵,即Rainy和Sunny之间的转换关系,例如[0,0]表示今天Rainy,明天Rainy的概率
#transition_probability = np.array([[0.7, 0.3], [0.4, 0.6]])
###隐藏状态天气和可视行为混淆矩阵,例如[0,0]表示今天Rainy,walk行为的概率为0.1
#emission_probability = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])
##构建了一个MultinomialHMM模型,这模型包括开始的转移概率,隐藏间天气转换混淆矩阵(transmat),隐藏状态天气和可视行为混淆矩阵emissionprob,对模型参数初始化
#model = hmm.MultinomialHMM(n_components=n_states)
#model.startprob_= start_probability
#model.transmat_ = transition_probability
#model.emissionprob_ = emission_probability
##给出一个可见序列
#bob_Actions = np.array([[2, 0, 1, 1, 2, 0]]).T
## 解决问题1,解码问题,已知模型参数和X,估计最可能的Z;维特比算法
#logprob, weathers = model.decode(bob_Actions, algorithm="viterbi")
#print("Bob Actions:",map(lambda x: observations[x], bob_Actions))
#print("weathers:",map(lambda x: states[x], weathers))
#print(logprob)
##该参数反映模型拟合的好坏,数值越大越好 # 解决问题2,概率问题,已知模型参数和X,估计X出现的概率, 向前-向后算法
#score = model.score(bob_Actions, lengths=None)
##最后输出结果
#print(score)
#
#
#
#states = ["A", "B", "C"]
#n_states = len(states)
#
#observations = ["down","up"]
#n_observations = len(observations)
#
#p = np.array([0.7, 0.2, 0.1])
#a = np.array([
# [0.5, 0.2, 0.3],
# [0.3, 0.5, 0.2],
# [0.2, 0.3, 0.5]
#])
#b = np.array([
# [0.6, 0.2],
# [0.3, 0.3],
# [0.1, 0.5]
#])
#o = np.array([[1, 0, 1, 1, 1]]).T
#
#model = hmm.MultinomialHMM(n_components=n_states)
#model.startprob_= p
#model.transmat_= a
#model.emissionprob_= b
#
#logprob, h = model.decode(o, algorithm="viterbi")
#print("The hidden h", ", ".join(map(lambda x: states[x], h)))
class HMMTrainer(object):
def __init__(self, model_name='GaussianHMM', n_components=10, cov_type='diag', n_iter=100):
#模型名称 hmmlearn实现了三种HMM模型类,GaussianHMM和GMMHMM是连续观测状态的HMM模型,MultinomialHMM是离散观测状态的模型
self.model=None
self.model_name = model_name
#隐藏状态个数
self.n_components = n_components
#转移矩阵协方差类型
self.cov_type = cov_type
#训练迭代次数
self.n_iter = n_iter
self.models = []
# self.states = ["als", "control","hunt", "park"]
# self.n_states = len(self.states)
self.observations = []
self.n_observations = len(self.observations)
if self.model_name == 'GaussianHMM':
self.model = hmm.GaussianHMM(n_components=self.n_components,covariance_type=self.cov_type, n_iter=self.n_iter)
else:
# self.model = hmm.MultinomialHMM(n_components=self.n_components, n_iter=self.n_iter, tol=0.01)
self.model = hmm.GMMHMM(n_components=self.n_components, n_iter=self.n_iter, tol=0.01)
# raise TypeError('Invalid model type')
# X是2维numpy数组每行13列
def train(self, X):
np.seterr(all='ignore')
self.models.append(self.model.fit(X))
# 测试输入的模型得分
def get_score(self, input_data,label):
return self.model.score(input_data,label)
def hmm_model_create(data,hmm_models,class_label,n_components=10):
m,n=data.shape
hmm_trainer = HMMTrainer()
# class_labels=class_label * np.ones((m,1))
hmm_trainer.model.fit(data,class_label)
startprob=1/n_components * np.ones((1, n_components))
hmm_trainer.model.startprob_=startprob[0]
# hmm_trainer.model.transmat_= np.zeros((n_components, n_components))
# for i in range(n_components-1):
# hmm_trainer.model.transmat_[n_components-1,0]=1
# hmm_trainer.model.transmat_[i,i+1] = 1
# print(hmm_trainer.model.transmat_)
# hmm_trainer.model.emissionprob_= B_emissionprob
hmm_models.append((hmm_trainer, class_label))
def auto_data_split_fog(a,b):
trainsets_x=[]
testsets=[]
train_x1,test_x1,train_y1,test_y1 = train_test_split(a[0:7650,:],b[0:7650],test_size=0.7)
train_x2,test_x2,train_y2,test_y2 = train_test_split(a[7650:11880,:],b[7650:11880],test_size=0.7)
train_x3,test_x3,train_y3,test_y3 = train_test_split(a[11880:,:],b[11880:],test_size=0.7)
trainsets_x.append(train_x1)
trainsets_x.append(train_x2)
trainsets_x.append(train_x3)
testsets_x=np.r_[test_x1,test_x2,test_x3]
testsets_y=np.r_[test_y1,test_y2,test_y3]
testsets.append(testsets_x)
testsets.append(testsets_y)
return trainsets_x, testsets
def auto_data_split(a,b):
trainsets_x=[]
testsets=[]
train_x1,test_x1,train_y1,test_y1 = train_test_split(a[0:156,:],b[0:156],test_size=0.2)
train_x2,test_x2,train_y2,test_y2 = train_test_split(a[156:348,:],b[156:348],test_size=0.2)
train_x3,test_x3,train_y3,test_y3 = train_test_split(a[348:576,:],b[348:576],test_size=0.2)
train_x4,test_x4,train_y4,test_y4 = train_test_split(a[576:756,:],b[576:756],test_size=0.2)
trainsets_x.append(train_x1)
trainsets_x.append(train_x2)
trainsets_x.append(train_x3)
trainsets_x.append(train_x4)
testsets_x=np.r_[test_x1,test_x2,test_x3,test_x4]
testsets_y=np.r_[test_y1,test_y2,test_y3,test_y4]
testsets.append(testsets_x)
testsets.append(testsets_y)
return trainsets_x, testsets
def manul_data_split(a,b):
trainset1=[]
trainset2=[]
trainset3=[]
trainset4=[]
trainset5=[]
trainsets=[]
testsets=[]
# training data
# training set 1
trainset1.append(a[0:125,:])
trainset1.append(a[156:310,:])
trainset1.append(a[348:530,:])
trainset1.append(a[576:720,:])
trainsets.append(trainset1)
# testing data 1
test_x1=np.r_[a[125:156,:],a[310:348,:],a[530:576,:],a[720:756,:]]
test_y1=np.r_[b[125:156],b[310:348],b[530:576],b[720:756]]
testsets.append([test_x1,test_y1])
# training set 2
trainset2.append(a[31:156,:])
trainset2.append(a[156+38:348,:])
trainset2.append(a[348+46:576,:])
trainset2.append(a[576+36:756,:])
trainsets.append(trainset2)
# testing data 2
test_x2=np.r_[a[0:31,:],a[156:156+38,:],a[348:348+46,:],a[576:576+36,:]]
test_y2=np.r_[b[0:31],b[156:156+38],b[348:348+46],b[576:576+36]]
testsets.append([test_x2,test_y2])
# training set 3
trainset3.append(a[10:125+10,:])
trainset3.append(a[156+10:310+10,:])
trainset3.append(a[348+10:530+10,:])
trainset3.append(a[576+10:720+10,:])
trainsets.append(trainset3)
# testing data 3
test_x3=np.r_[a[0:10,:],a[135:156,:],a[156:166,:],a[320:348,:],a[348:358,:],a[540:576,:],a[576:586,:],a[730:756,:]]
test_y3=np.r_[b[0:10],b[135:156],b[156:166],b[320:348],b[348:358],b[540:576],b[576:586],b[730:756]]
testsets.append([test_x3,test_y3])
# training set 4
trainset4.append(a[20:145,:])
trainset4.append(a[176:330,:])
trainset4.append(a[368:550,:])
trainset4.append(a[596:740,:])
trainsets.append(trainset4)
# testing data 4
test_x4=np.r_[a[0:20,:],a[145:156,:],a[156:176,:],a[330:348,:],a[348:368,:],a[550:576,:],a[576:596,:],a[740:756,:]]
test_y4=np.r_[b[0:20],b[145:156],b[156:176],b[330:348],b[348:368],b[550:576],b[576:596],b[740:756]]
testsets.append([test_x4,test_y4])
# training set 5
trainset5.append(a[30:155,:])
trainset5.append(a[186:340,:])
trainset5.append(a[378:560,:])
trainset5.append(a[606:750,:])
trainsets.append(trainset5)
# testing data 5
test_x5=np.r_[a[0:30,:],a[145:156,:],a[156:176,:],a[330:348,:],a[348:368,:],a[550:576,:],a[576:596,:],a[740:756,:]]
test_y5=np.r_[b[0:30],b[145:156],b[156:176],b[330:348],b[348:368],b[550:576],b[576:596],b[740:756]]
testsets.append([test_x5,test_y5])
return trainsets, testsets
def softmax(x):
x_exp = | np.exp(x) | numpy.exp |
import numpy as np
from scipy.optimize import linprog
from scipy.optimize import minimize
import quadprog
def linear_solver(n, M):
M -= np.amin(M) # Let zero sum game at least with nonnegative payoff
c = np.ones((n))
b = np.ones((n))
res = linprog(-c, A_ub = M.T, b_ub = b)
w = res.x
return w/np.sum(w)
def quadratic_solver(n, M, regularizer):
qp_G = np.matmul(M, M.T)
qp_G += regularizer * np.eye(n)
qp_a = np.zeros(n, dtype = np.float64)
qp_C = np.zeros((n,n+1), dtype = np.float64)
for i in range(n):
qp_C[i,0] = 1.0
qp_C[i,i+1] = 1.0
qp_b = np.zeros(n+1, dtype = np.float64)
qp_b[0] = 1.0
meq = 1
res = quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)
w = res[0]
return w
'''
def quadratic_solver_extend(n, M, b, regularizer):
qp_G = np.matmul(M, M.T)
qp_G += regularizer * np.eye(n)
qp_a = np.matmul(b[None, :], M.T).reshape(-1)
qp_C = np.zeros((n,n+1), dtype = np.float64)
for i in range(n):
qp_C[i,0] = 1.0
qp_C[i,i+1] = 1.0
qp_b = np.zeros(n+1, dtype = np.float64)
qp_b[0] = 1.0
meq = 1
res = quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)
w = res[0]
return w
'''
class Density_Ratio_discrete(object):
def __init__(self, num_state):
self.num_state = num_state
self.Ghat = np.zeros([num_state, num_state], dtype = np.float64)
self.Nstate = np.zeros([num_state, 1], dtype = np.float64)
def reset(self):
num_state = self.num_state
self.Ghat = np.zeros([num_state, num_state], dtype = np.float64)
self.Nstate = np.zeros([num_state, 1], dtype = np.float64)
def feed_data(self, cur, next, policy_ratio):
self.Ghat[cur, next] += policy_ratio
self.Ghat[next, next] -= 1.0
self.Nstate[cur] += 0.5
self.Nstate[next] += 0.5
def density_ratio_estimate_old(self):
Frequency = (self.Nstate + 1e-5)
Frequency = Frequency / np.sum(Frequency)
G = self.Ghat / Frequency
n = self.num_state
x = quadratic_solver(n, G/100.0)
#x2 = linear_solver(n, G)
#print x
#print x2
#print np.sum((x-x2)*(x-x2))
w = x/Frequency.reshape(-1)
return x, w
def density_ratio_estimate(self, regularizer = 0.001):
Frequency = self.Nstate.flat
tvalid = np.where(Frequency >= 1e-9)
G = np.zeros_like(self.Ghat)
Frequency = Frequency/np.sum(Frequency)
G[tvalid] = self.Ghat[tvalid]/(Frequency[:,None])[tvalid]
n = self.num_state
x = quadratic_solver(n, G/50.0, regularizer)
w = np.zeros(self.num_state)
w[tvalid] = x[tvalid]/Frequency[tvalid]
return x, w
def density_ratio_estimate_exact(self):
Frequency = self.Nstate.flat
tvalid = np.where(Frequency >= 1e-9)
G = np.zeros_like(self.Ghat)
Frequency = Frequency/np.sum(Frequency)
G = self.Ghat[tvalid, tvalid]/(Frequency[:,None])[tvalid]
G = G/np.linalg.norm(G, 'fro')
n = Frequency[tvalid].shape[0]
x = np.zeros(self.num_state)
x[tvalid] = quadratic_solver(n, G)
w = np.zeros(self.num_state)
w[tvalid] = x[tvalid]/Frequency[tvalid]
return x, w
class Density_Ratio_discounted(object):
def __init__(self, num_state, gamma):
self.num_state = num_state
self.Ghat = np.zeros([num_state, num_state], dtype = np.float64)
self.Nstate = np.zeros([num_state, 1], dtype = np.float64)
self.initial_b = np.zeros([num_state], dtype = np.float64)
self.gamma = gamma
def reset(self):
num_state = self.num_state
self.Ghat = np.zeros([num_state, num_state], dtype = np.float64)
self.Nstate = np.zeros([num_state, 1], dtype = np.float64)
def feed_data(self, cur, next, initial, policy_ratio, discounted_t):
if cur == -1:
self.Ghat[next, next] -= discounted_t
else:
self.Ghat[cur, next] += policy_ratio * discounted_t
self.Ghat[cur, initial] += (1-self.gamma)/self.gamma * discounted_t
self.Ghat[next, next] -= discounted_t
self.Nstate[cur] += discounted_t
def density_ratio_estimate(self, regularizer = 0.001):
Frequency = self.Nstate.reshape(-1)
tvalid = np.where(Frequency >= 1e-20)
G = np.zeros_like(self.Ghat)
Frequency = Frequency/np.sum(Frequency)
G[tvalid] = self.Ghat[tvalid]/(Frequency[:,None])[tvalid]
n = self.num_state
x = quadratic_solver(n, G/50.0, regularizer)
w = | np.zeros(self.num_state) | numpy.zeros |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
import pytest
import cirq
from cirq.linalg import matrix_commutes
def test_is_diagonal():
assert cirq.is_diagonal(np.empty((0, 0)))
assert cirq.is_diagonal(np.empty((1, 0)))
assert cirq.is_diagonal(np.empty((0, 1)))
assert cirq.is_diagonal(np.array([[1]]))
assert cirq.is_diagonal(np.array([[-1]]))
assert cirq.is_diagonal(np.array([[5]]))
assert cirq.is_diagonal(np.array([[3j]]))
assert cirq.is_diagonal(np.array([[1, 0]]))
assert cirq.is_diagonal(np.array([[1], [0]]))
assert not cirq.is_diagonal(np.array([[1, 1]]))
assert not cirq.is_diagonal(np.array([[1], [1]]))
assert cirq.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert cirq.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert cirq.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_diagonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), atol=atol)
def test_is_hermitian():
assert cirq.is_hermitian(np.empty((0, 0)))
assert not cirq.is_hermitian(np.empty((1, 0)))
assert not cirq.is_hermitian(np.empty((0, 1)))
assert cirq.is_hermitian(np.array([[1]]))
assert cirq.is_hermitian(np.array([[-1]]))
assert cirq.is_hermitian(np.array([[5]]))
assert not cirq.is_hermitian(np.array([[3j]]))
assert not cirq.is_hermitian(np.array([[0, 0]]))
assert not cirq.is_hermitian(np.array([[0], [0]]))
assert not cirq.is_hermitian(np.array([[5j, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[5, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[1, 0], [0, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0], [1, 1]]))
assert not cirq.is_hermitian(np.array([[1, 1], [0, 1]]))
assert cirq.is_hermitian(np.array([[1, 1], [1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_hermitian(np.array([[1, 1j], [1j, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0.1], [-0.1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j + 1e-11], [-1j, 1 + 1j * 1e-9]]))
def test_is_hermitian_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_hermitian(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert cirq.is_hermitian(np.array([[1, 0.25], [-0.25, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.25], [-0.35, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_hermitian(np.array([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
def test_is_unitary():
assert cirq.is_unitary( | np.empty((0, 0)) | numpy.empty |
"""
sources.sepp
============
Produces synthetic data based upon a "self-exciting" or "Hawkes model" point
process. These are point processes where the conditional intensity function
depends upon a background intensity (i.e. a homogeneous or possibly
inhomogeneous Poisson process) and when each event in the past contributes
a further (linearly additive) terms governed by a trigger / aftershock kernel.
Such models, with specific forms for the trigger kernel, are known as
"epidemic type aftershock models" in the Earthquake modelling literature.
Rather than rely upon external libraries (excepting numpy which we do use) we
produce a number of base classes which define kernels and samplers, and provide
some common kernels and samplers for backgrounds and triggers.
"""
from .. import data
from .. import kernels
from . import random
import abc as _abc
import numpy as _np
from numpy import timedelta64
import itertools as _itertools
class SpaceTimeKernel(kernels.Kernel):
"""To produce a kernel as required by the samplers in this package,
either extend this abstract class implementing `intensity(t, x, y)`
or provide your own class which has the same signature as `__call__`
and the property `kernel_max`"""
@_abc.abstractmethod
def intensity(self, t, x, y):
"""t, x and y will be one-dimensional numpy arrays of the same length.
:return: A numpy array of the same length as the input"""
pass
def __call__(self, points):
return self.intensity(points[0], points[1], points[2])
def set_scale(self):
raise NotImplementedError()
@_abc.abstractmethod
def kernel_max(self, time_start, time_end):
"""Return a value which is greater than or equal to the maximum
intensity of the kernel over the time range (and for any space input).
"""
pass
class PoissonTimeGaussianSpace(SpaceTimeKernel):
"""A kernel which is a constant rate Poisson process in time, and a two
dimensional Gaussian kernel in space (see
https://en.wikipedia.org/wiki/Multivariate_normal_distribution).
:param time_rate: The rate of the Poisson process in time.
:param mus: A pair of the mean values of the Gaussian in each variable.
:param variances: A pair of the variances of the Gaussian in each variable.
:param correlation: The correlation between the two Gaussians.
"""
def __init__(self, time_rate, mus, variances, correlation):
self.time_rate = time_rate
self.mus = mus
self.variances = variances
self.correlation = correlation
def _normalisation(self):
c = (1 - self.correlation**2)
return 1.0 / (2 * _np.pi * _np.sqrt(self.variances[0] * self.variances[1] * c) )
def intensity(self, t, x, y):
xf = (x - self.mus[0]) ** 2 / self.variances[0]
yf = (y - self.mus[1]) ** 2 / self.variances[1]
jf = ( 2 * self.correlation * (x - self.mus[0]) * (y - self.mus[1])
/ _np.sqrt(self.variances[0] * self.variances[1]) )
c = (1 - self.correlation**2)
k = _np.exp( - (xf + yf - jf) / (2 * c) )
return self.time_rate * k * self._normalisation()
def kernel_max(self, time_start, time_end):
return self._normalisation() * self.time_rate
class TimeKernel(kernels.Kernel):
"""A one dimensional kernel which can estimate its upper bound, for use
with rejection sampling.
"""
@_abc.abstractmethod
def kernel_max(self, time_start, time_end):
"""Return a value which is greater than or equal to the maximum
intensity of the kernel over the time range.
"""
pass
def set_scale(self):
raise NotImplementedError()
class HomogeneousPoisson(TimeKernel):
"""A constant kernel, representing a homogeneous poisson process.
:param rate: The rate of the process: the expected number of events per
time unit.
"""
def __init__(self, rate=1):
self._rate = rate
def __call__(self, times):
return _np.zeros_like(times) + self._rate
def kernel_max(self, time_start, time_end):
return self._rate
class Exponential(TimeKernel):
"""An exponentially decaying kernel.
:param exp_rate: The "rate" parameter of the exponential.
:param total_rate: The overall scaling of the kernel. If this kernel is
used to simulate a point process, then this is the expected number of
events.
"""
def __init__(self, exp_rate=1, total_rate=1):
self._rate = exp_rate
self._total = total_rate
def __call__(self, times):
return _np.exp( -self._rate * times) * self._rate * self._total
def kernel_max(self, time_start, time_end):
return self._rate * self._total
class Sampler(metaclass=_abc.ABCMeta):
"""Sample from a point process."""
@_abc.abstractmethod
def sample(self, start_time, end_time):
"""Find a sample from a point process.
:param start_time: The start of the time window to sample from.
:param end_time: The end of the time window to sample from.
:return: An array of shape (3,n) of space/time coordinates.
The data should always be _sorted_ in time.
"""
pass
@staticmethod
def _order_by_time(points):
"""Utility method to sort by time.
:param points: Usual time/space array of points.
:return: The same data, with each triple (t,x,y) preserved, but now
ordered so that points[0] is increasing.
"""
a = _np.argsort(points[0])
return points[:,a]
class InhomogeneousPoisson(Sampler):
"""A simple rejection (aka Otago thining) sampler.
:param region: the spatial extent of the simulation.
:param kernel: should follow the interface of :class SpaceTimeKernel:
"""
def __init__(self, region, kernel):
self._region = region
self._kernel = kernel
def _uniform_sample_region(self, start_time, end_time, num_points):
scale = _np.array([end_time - start_time,
self._region.xmax - self._region.xmin,
self._region.ymax - self._region.ymin])
offset = _np.array([start_time, self._region.xmin, self._region.ymin])
return _np.random.random((3,num_points)) * scale[:,None] + offset[:,None]
def sample(self, start_time, end_time):
area = (self._region.xmax - self._region.xmin) * (self._region.ymax - self._region.ymin)
kmax = self._kernel.kernel_max(start_time, end_time)
total_points = kmax * area * (end_time - start_time)
num_points = _np.random.poisson(lam = total_points)
pts = self._uniform_sample_region(start_time, end_time, num_points)
accept_prob = _np.random.random(num_points) * kmax
accept = (self._kernel(pts) >= accept_prob)
return self._order_by_time(pts[:,accept])
class SpaceSampler(metaclass=_abc.ABCMeta):
"""Base class for classes which can return samples from a space (two
dimensional) distribution.
"""
@_abc.abstractmethod
def __call__(self, length):
"""Return an array of shape (2,length)"""
pass
class GaussianSpaceSampler(SpaceSampler):
"""Returns samples from a Multivariate normal distribution.
:param mus: A pair of the mean values of the Gaussian in each variable.
:param variances: A pair of the variances of the Gaussian in each variable.
:param correlation: The correlation between the two Gaussians.
"""
def __init__(self, mus, variances, correlation):
self.mus = mus
self.stds = _np.sqrt(_np.array(variances))
self.correlation = correlation
def __call__(self, length):
xy = _np.random.standard_normal(size = length * 2).reshape((2,length))
theta = _np.arcsin(self.correlation) / 2
sin, cos = _np.sin(theta), _np.cos(theta)
x = xy[0] * sin + xy[1] * cos
y = xy[0] * cos + xy[1] * sin
x = x * self.stds[0] + self.mus[0]
y = y * self.stds[1] + self.mus[1]
return _np.vstack([x,y])
class UniformRegionSampler(SpaceSampler):
"""Returns space samples chosen uniformly from a rectangular region.
:param region: An instance of :class RectangularRegion: giving the region.
"""
def __init__(self, region):
self.region = region
def __call__(self, length):
x = _np.random.random(length) * self.region.width + self.region.xmin
y = _np.random.random(length) * self.region.height + self.region.ymin
return _np.vstack([x,y])
class InhomogeneousPoissonFactors(Sampler):
"""A time/space sampler where the kernel factorises into a time kernel and
a space kernel. For efficiency, we use a space sampler.
:param time_kernel: Should follow the interface of :class:`TimeKernel`
:param space_sampler: Should follow the interface of :class:`SpaceSampler`
"""
def __init__(self, time_kernel, space_sampler):
self._time_kernel = time_kernel
self._space_sampler = space_sampler
def sample(self, start_time, end_time):
kmax = self._time_kernel.kernel_max(start_time, end_time)
number_samples = _np.random.poisson(kmax * (end_time - start_time))
times = | _np.random.random(size=number_samples) | numpy.random.random |
MODEL_NAME = 'DA_Pts_dropout'
NORM = 'L1'
DegMax = 75
Debug = False
Parallel = False
ConstrastSimu = True # if True it randomly simulates contrast changes for each patch
DoBigEpochs = True
batch_number = 32
N_epochs = 5000
steps_epoch=100
NeededData = batch_number * N_epochs * steps_epoch + 1
SHOW_TB_weights = False # Show Net-weights info in TensorBoard
if MODEL_NAME[0:6]=="DA_Pts":
NetAffine = False # if False the NeuralNet will estimate point coordinates
else:
NetAffine = True # if True the NeuralNet will estimate the affine transformation itself
# When default GPU is being used... prepare to use a second one
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from library import *
from acc_test_library import *
import numpy as np
import time
import random
import cv2
def ProcessData(GA, stacked_patches, groundtruth_pts):
if ConstrastSimu:
channels = np.int32(np.shape(stacked_patches)[2]/2)
val1 = random.uniform(1/3, 3)
val2 = random.uniform(1/3, 3)
# cv2.imwrite("/tmp/p1_before.png",stacked_patches[:,:,0]*255)
# cv2.imwrite("/tmp/p2_before.png",stacked_patches[:,:,1]*255)
for i in range(channels):
stacked_patches[:,:,i] = np.power(stacked_patches[:,:,i],val1)
stacked_patches[:,:,channels+i] = np.power(stacked_patches[:,:,channels+i],val2)
# cv2.imwrite("/tmp/p1.png",stacked_patches[:,:,0]*255)
# cv2.imwrite("/tmp/p2.png",stacked_patches[:,:,1]*255)
if NetAffine:
groundtruth_pts = GA.Nvec2Avec(groundtruth_pts)
# groundtruth_pts2 = GA.Avec2Nvec(groundtruth_pts1)
# print(GA.UnNormalizeVector(groundtruth_pts)-GA.UnNormalizeVector(groundtruth_pts2))
return stacked_patches, groundtruth_pts #if ConstrastSimu==False -> Identity
GAval = GenAffine("./imgs-val/", save_path = "./db-gen-val-"+str(DegMax)+"/", DoBigEpochs = DoBigEpochs, tmax = DegMax)
GAtrain = GenAffine("./imgs-train/", save_path = "./db-gen-train-"+str(DegMax)+"/", DoBigEpochs = DoBigEpochs, tmax = DegMax)
Set_FirstThreadTouch(GAval,True)
Set_FirstThreadTouch(GAtrain,True)
stacked_patches, groundtruth_pts = GAtrain.gen_affine_patches()
stacked_patches, groundtruth_pts = ProcessData(GAtrain, stacked_patches, groundtruth_pts)
def affine_generator(GA, batch_num=32, Force2Gen=False, ForceFast=False):
P_list = []
GT_list = []
FastThread = False
t2sleep = 2*random.random()
time.sleep(t2sleep)
assert Force2Gen==False or ForceFast==False
if ForceFast:
FastThread = True
if Force2Gen==False and Check_FirstThreadTouch(GA)==False:
print("Fast Thread Created ! Needs "+str(NeededData)+" generated data")
Set_FirstThreadTouch(GA,True)
FastThread = True
while True:
if FastThread and ForceFast==False:
GA.ScatteredGenData_2_BlockData() # it will be really done every 30 minutes
stacked_patches, groundtruth_pts = [], []
if FastThread and Force2Gen==False:
# print(len(P_list), len(GT_list))
stacked_patches, groundtruth_pts = GA.Fast_gen_affine_patches()
else:
stacked_patches, groundtruth_pts = GA.gen_affine_patches()
stacked_patches, groundtruth_pts = ProcessData(GA, stacked_patches, groundtruth_pts)
vgg_input_shape = np.shape(stacked_patches)
vgg_output_shape = np.shape(groundtruth_pts)
bPshape = tuple([batch_num]) + tuple(vgg_input_shape)
bGTshape = tuple([batch_num]) + tuple(vgg_output_shape)
bP = np.zeros(shape=bPshape, dtype = np.float32)
bGT = np.zeros(shape=bGTshape, dtype = np.float32)
bP[0,:,:,:] = stacked_patches
bGT[0,:] = groundtruth_pts
for i in range(1,batch_num):
if FastThread and Force2Gen==False:
# print(len(P_list), len(GT_list))
stacked_patches, groundtruth_pts = GA.Fast_gen_affine_patches()
else:
stacked_patches, groundtruth_pts = GA.gen_affine_patches()
stacked_patches, groundtruth_pts = ProcessData(GA, stacked_patches, groundtruth_pts)
bP[i,:,:,:] = stacked_patches
bGT[i,:] = groundtruth_pts
# print('These numbers should not repeat in other lines: '+ str(bP[0,0,0,0])+" "+str(bP[-1,0,0,0]))
# print('Gen batch: '+str(np.shape(bP))+', '+str(np.shape(bGT)))
yield [bP , bGT], None
# VGG like network
from tensorflow.compat.v1.keras import layers
from tensorflow.compat.v1.keras.models import Model
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras.backend import set_session
config = tf.ConfigProto(allow_soft_placement=True)
#, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
from models import *
vgg_input_shape = np.shape(stacked_patches)
vgg_output_shape = np.shape(groundtruth_pts)
train_model = create_model(vgg_input_shape, vgg_output_shape, model_name = MODEL_NAME, Norm=NORM, resume = True)
# ---> TRAIN NETWORK
import math
import scipy.special
import random
from sklearn.manifold import TSNE, MDS
from sklearn.metrics import f1_score, accuracy_score
from tensorflow.compat.v1.keras.callbacks import TerminateOnNaN, ModelCheckpoint, TensorBoard, LambdaCallback, ReduceLROnPlateau
import os
from shutil import copyfile
import matplotlib.pyplot as plt
plt.switch_backend('agg')
#modified from http://seoulai.com/2018/02/06/keras-and-tensorboard.html
class TensorboardKeras(object):
def __init__(self, model, log_dir, GAval, GAtrain, static_val_num=1):
self.model = model
self.log_dir = log_dir
self.session = K.get_session()
self.lastloss = float('nan')
self.lastvalloss = float('nan')
self.GAval = GAval
self.GAtrain = GAtrain
self.static_Patches = []
self.static_GTval = []
self.static_val_num = static_val_num
self.acc_data_inputs = []
self.acc_data_names = []
self.lastacc = 0
self.TKid = random.randint(0,1000)
for d in affine_generator(self.GAval, batch_num=self.static_val_num,ForceFast=True):
self.static_Patches = d[0][0]
self.static_GTval = d[0][1]
break
hs, ws = self.static_Patches.shape[1:3]
self.SquarePatch = SquareOrderedPts(hs,ws,CV=False)
self.static_val_repr = tf.placeholder(dtype=tf.float32)
tf.summary.image("Repr/Static_validation", self.static_val_repr)
self.dynamic_val_repr = tf.placeholder(dtype=tf.float32)
tf.summary.image("Repr/Dynamic_validation", self.dynamic_val_repr)
self.lr_ph = tf.placeholder(shape=(), dtype=tf.float32)
tf.summary.scalar('Learning_rate', self.lr_ph)
self.big_epoch = tf.placeholder(shape=(), dtype=tf.float32)
tf.summary.scalar('Big_Epoch', self.big_epoch)
self.val_loss_ph = tf.placeholder(shape=(), dtype=tf.float32)
tf.summary.scalar('losses/validation', self.val_loss_ph)
self.train_loss_ph = tf.placeholder(dtype=tf.float32)
tf.summary.scalar('losses/training', self.train_loss_ph)
# self.sift = cv2.xfeatures2d.SIFT_create( nfeatures = siftparams.nfeatures,
# nOctaveLayers = siftparams.nOctaveLayers, contrastThreshold = siftparams.contrastThreshold,
# edgeThreshold = siftparams.edgeThreshold, sigma = siftparams.sigma)
self.global_acc_holder = tf.placeholder(dtype=tf.float32)
tf.summary.scalar('accuracy/_GLOBAL_', self.global_acc_holder)
self.acc_test_holder = []
for file in glob.glob('./acc-test/*.txt'):
self.acc_data_names.append( os.path.basename(file)[:-4] )
i = len(self.acc_data_names) - 1
pathway = './acc-test/' + self.acc_data_names[i]
# asift_KPlist1, patches1, GT_Avec_list, asift_KPlist2, patches2 = load_acc_test_data(pathway)
self.acc_data_inputs.append( load_acc_test_data(pathway) )
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.acc_test_holder.append(tf.placeholder(dtype=tf.float32))
self.variable_summaries(self.acc_test_holder[7*i ], self.acc_data_names[i]+'-accuracy-info/zoom-diff')
self.variable_summaries(self.acc_test_holder[7*i+1], self.acc_data_names[i]+'-accuracy-info/phi2-diff')
self.variable_summaries(self.acc_test_holder[7*i+2], self.acc_data_names[i]+'-accuracy-info/tilt-diff')
self.variable_summaries(self.acc_test_holder[7*i+3], self.acc_data_names[i]+'-accuracy-info/phi1-diff')
tf.summary.scalar('accuracy/'+self.acc_data_names[i], self.acc_test_holder[7*i+4])
self.variable_summaries(self.acc_test_holder[7*i+5], self.acc_data_names[i]+'-accuracy-info/tras-x_coor-diff')
self.variable_summaries(self.acc_test_holder[7*i+6], self.acc_data_names[i]+'-accuracy-info/tras-y_coor-diff')
if SHOW_TB_weights:
l = np.shape(self.model.layers[2].get_weights())[0]
self.weightsholder = []
for i in range(0,l):
self.weightsholder.append(tf.placeholder(dtype=tf.float32))
self.variable_summaries(self.weightsholder[i], 'weights/'+repr(i).zfill(3)+'-layer')
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.log_dir)
copyfile(os.path.realpath(__file__), self.log_dir+"/"+os.path.basename(__file__))
def _get_val_image_repr(self,batchGT,batchE, inSquare=True):
fig = plt.figure()
spn = self.GAval.NormalizeVector( Pts2Flatten(self.SquarePatch) )
plt.plot(close_per(spn[0:8:2]),close_per(spn[1:8:2]),':k')
for i in range(0, | np.shape(batchGT) | numpy.shape |
from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter, OrderedDict
import itertools
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer, PrecisionValTransformer)
import scipy
from sklearn.isotonic import IsotonicRegression
SUBSAMPLE_CAP = 1000000
#The only parts of TransformAndThresholdResults that are used in
# TfModiscoWorkflow are the transformed_pos/neg_thresholds and the
# val_transformer (used in metaclustering with multiple tasks)
#TransformAndThresholdResults are also used to be
# able to replicate the same procedure used for identifying coordinates as
# when TfMoDisco was first run; the information needed in that case would
# be specific to the type of Coordproducer used
class AbstractTransformAndThresholdResults(object):
def __init__(self, transformed_neg_threshold, transformed_pos_threshold,
val_transformer):
self.transformed_neg_threshold = transformed_neg_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
@classmethod
def from_hdf5(cls, grp):
if "class" not in grp.attrs:
the_class = FWACTransformAndThresholdResults
else:
the_class = eval(grp.attrs["class"])
if (the_class.__name__ != cls.__name__):
return the_class.from_hdf5(grp)
class BasicTransformAndThresholdResults(AbstractTransformAndThresholdResults):
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def load_basic_attrs_from_hdf5(cls, grp):
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return (transformed_neg_threshold, transformed_pos_threshold,
val_transformer)
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
(transformed_neg_threshold,
transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
return cls(transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
#FWAC = FixedWindowAroundChunks; this TransformAndThresholdResults object
# is specific to the type of info needed in that case.
class FWACTransformAndThresholdResults(
BasicTransformAndThresholdResults):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.pos_threshold = pos_threshold
super(FWACTransformAndThresholdResults, self).__init__(
transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
def save_hdf5(self, grp):
super(FWACTransformAndThresholdResults, self).save_hdf5(grp)
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
@classmethod
def from_hdf5(cls, grp):
(transformed_neg_threshold, transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
neg_threshold = grp.attrs['neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score, other_info={}):
self.score = score
self.other_info = other_info
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = AbstractTransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, window_size, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
if (original_summed_score_track is None):
window_sum_function = get_simple_window_sum_function(window_size)
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = -np.log(1-sampled_cdf)/pos_laplace_lambda + mu
else:
sampled_cdf = self.rng.uniform()
val = mu + np.log(1-sampled_cdf)/neg_laplace_lambda
sampled_vals.append(val)
return np.array(sampled_vals)
class FlipSignNullDist(GenerateNullDist):
def __init__(self, num_seq_to_samp, shuffle_pos=False,
seed=1234, num_breaks=100,
lower_null_percentile=20,
upper_null_percentile=80):
self.num_seq_to_samp = num_seq_to_samp
self.shuffle_pos = shuffle_pos
self.seed = seed
self.rng = np.random.RandomState()
self.num_breaks = num_breaks
self.lower_null_percentile = lower_null_percentile
self.upper_null_percentile = upper_null_percentile
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track, windowsize, original_summed_score_track):
#summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
all_orig_summed_scores = np.concatenate(
original_summed_score_track, axis=0)
pos_threshold = np.percentile(a=all_orig_summed_scores,
q=self.upper_null_percentile)
neg_threshold = np.percentile(a=all_orig_summed_scores,
q=self.lower_null_percentile)
#retain only the portions of the tracks that are under the
# thresholds
retained_track_portions = []
num_pos_vals = 0
num_neg_vals = 0
for (single_score_track, single_summed_score_track)\
in zip(score_track, original_summed_score_track):
window_passing_track = [
(1.0 if (x > neg_threshold and x < pos_threshold) else 0)
for x in single_summed_score_track]
padded_window_passing_track = [0.0]*int(windowsize-1)
padded_window_passing_track.extend(window_passing_track)
padded_window_passing_track.extend([0.0]*int(windowsize-1))
pos_in_passing_window = window_sum_function(
[padded_window_passing_track])[0]
assert len(single_score_track)==len(pos_in_passing_window)
single_retained_track = []
for (val, pos_passing) in zip(single_score_track,
pos_in_passing_window):
if (pos_passing > 0):
single_retained_track.append(val)
num_pos_vals += (1 if val > 0 else 0)
num_neg_vals += (1 if val < 0 else 0)
retained_track_portions.append(single_retained_track)
print("Fraction of positions retained:",
sum(len(x) for x in retained_track_portions)/
sum(len(x) for x in score_track))
prob_pos = num_pos_vals/float(num_pos_vals + num_neg_vals)
self.rng.seed(self.seed)
null_tracks = []
for i in range(self.num_seq_to_samp):
random_track = retained_track_portions[
int(self.rng.randint(0,len(retained_track_portions)))]
track_with_sign_flips = np.array([
abs(x)*(1 if self.rng.uniform() < prob_pos else -1)
for x in random_track])
if (self.shuffle_pos):
self.rng.shuffle(track_with_sign_flips)
null_tracks.append(track_with_sign_flips)
return np.concatenate(window_sum_function(null_tracks), axis=0)
def get_null_vals(null_track, score_track, window_size,
original_summed_score_track):
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=window_size,
original_summed_score_track=original_summed_score_track)
else:
window_sum_function = get_simple_window_sum_function(window_size)
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = list(np.concatenate(null_summed_score_track, axis=0))
return null_vals
def subsample_if_large(arr):
if (len(arr) > SUBSAMPLE_CAP):
print("Subsampling!")
sys.stdout.flush()
arr = np.random.RandomState(1234).choice(a=arr, size=SUBSAMPLE_CAP,
replace=False)
return arr
def irval_to_probpos(irval, frac_neg):
#n(x):= pdf of null dist (negatives)
#p(x):= pdf of positive distribution
#f_p:= fraction of positives
#f_n:= fraction of negatives = 1-f_p
#o(x):= pdf of observed distribution = n(x)f_n + p(x)f_p
#The isotonic regression produces a(x) = o(x)/[o(x) + n(x)]
# o(x)/[o(x) + n(x)] = [n(x)f_n + o(x)f_p]/[n(x)(1+f_n) + p(x)]
# a(x)[n(x)(1+f_n) + p(x)f_p] = n(x)f_n + p(x)f_p
# a(x)n(x)(1+f_n) - n(x)f_n = p(x)f_p - a(x)p(x)f_p
# n(x)[a(x)(1+f_n) - f_n] = p(x)f_p[1 - a(x)]
# [a(x)/f_n + (a(x)-1)]/[1-a(x)] = (p(x)f_p)/(n(x)f_n) = r(x)
#p_pos = 1 / (1 + 1/r(x))
# = [a(x)/f_n + (a(x)-1)]/[a(x)/f_n + (a(x)-1) + (1-a(x))]
# = [a(x)/f_n + a(x)-1]/[a(x)/f_n]
# = [a(x) + f_n(a(x)-1)]/a(x)
# = 1 + f_n(a(x)-1)/a(x)
# = 1 + f_n(1 - 1/a(x))
#If solving for p_pos=0, we have -1/(1 - 1/a(x)) = f_n
#As f_n --> 100%, p_pos --> 2 - 1/a(x); this assumes max(a(x)) = 0.5
return np.minimum(np.maximum(1 + frac_neg*(
1 - (1/np.maximum(irval,1e-7))), 0.0), 1.0)
class SavableIsotonicRegression(object):
def __init__(self, origvals, nullvals, increasing, min_frac_neg=0.95):
self.origvals = origvals
self.nullvals = nullvals
self.increasing = increasing
self.min_frac_neg = min_frac_neg
self.ir = IsotonicRegression(out_of_bounds='clip',
increasing=increasing).fit(
X=np.concatenate([self.origvals, self.nullvals], axis=0),
y=([1.0 for x in self.origvals] + [0.0 for x in self.nullvals]),
sample_weight=([1.0 for x in self.origvals]
+[float(len(self.origvals))/len(self.nullvals)
for x in self.nullvals]))
#Infer frac_pos based on the minimum value of the ir probs
#See derivation in irval_to_probpos function
min_prec_x = self.ir.X_min_ if self.increasing else self.ir.X_max_
min_precision = self.ir.transform([min_prec_x])[0]
implied_frac_neg = -1/(1-(1/max(min_precision,1e-7)))
print("For increasing =",increasing,", the minimum IR precision was",
min_precision,"occurring at",min_prec_x,
"implying a frac_neg",
"of",implied_frac_neg)
if (implied_frac_neg > 1.0 or implied_frac_neg < self.min_frac_neg):
implied_frac_neg = max(min(1.0,implied_frac_neg),
self.min_frac_neg)
print("To be conservative, adjusted frac neg is",implied_frac_neg)
self.implied_frac_neg = implied_frac_neg
def transform(self, vals):
return irval_to_probpos(self.ir.transform(vals),
frac_neg=self.implied_frac_neg)
def save_hdf5(self, grp):
grp.attrs['increasing'] = self.increasing
grp.attrs['min_frac_neg'] = self.min_frac_neg
grp.create_dataset('origvals', data=self.origvals)
grp.create_dataset('nullvals', data=self.nullvals)
@classmethod
def from_hdf5(cls, grp):
increasing = grp.attrs['increasing']
min_frac_neg = grp.attrs['min_frac_neg']
origvals = np.array(grp['origvals'])
nullvals = | np.array(grp['nullvals']) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 08 17:46:45 2017
@author: apfranco
"""
import numpy as np
import scipy
from scipy.optimize import leastsq
def RockPhysicsCalibration(agd, OM):
# ALGORITMO PARA CALIBRACAO DE MODELOS DE FISICA DE ROCHA
#
# MODELOS
# 1 - porosidade de neutrons:
# phi = A + B phiE + C vsh ou
# 2 - raios gama:
# gr = grmin + (grmax - grmin) vsh
# 3 - modelo densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh * (1 - phiE);
# 4 - resistividade:
# 1/ Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
#
# DESCRICAO GERAL:
# O programa deve ser rodado para gerar os coefientes e densidades acima descritos
# para serem usados em etapas posteriores de inferencia de porosidade,
# volume de argila e saturacao. O programa fornece opcao de entrada de
# limites estratigraficos conhecidos, realizando uma calibracao geral para
# todo o pacote e tambem em grupos separados em funcao do volume de
# folhelho como funcao de um valor de corte (cutclay). O programa fornece 3
# opcoes de saida envolvendo calibracao em todo o segmento analizado, em
# segmentos menores definidos na entrada (secHoriz) ou em nesses mesmos segmentos
# menores subdivididos ainda mais em funcao do conteudo de folhelho.
#
# PARAMETROS DE ENTRADA:
# dados de perfis - raios gama, porosidade, densidade, VP e VS
# dados de testemunho (se disponiveis) - volume de argila, porosidade, densidade
# top, bot - limites superior e inferior da secao a ser analisada
# phiSand - porosidade de areia homogenea (zero em conteudo de argila)
# grmin, grmax - valores minimo e maximo para a conversao de raios gama em volume de folhelho
# cutclay - valor limite para a transicao de areia para folhelho (grao para matriz suportada)
# secHoriz - Matriz (nFac x 2) contendo os limites superior e inferior de cada unidade estratigrafica
# satUncert - =0 desliga seletor de calibracao para horizonte com oleo.
# Caso contrario iOut necesariamente igual a 3
# iOut - seletor de detalhamento de facies para saida de parametros 1, 2,
# ou 3, conforme explicado acima.
# modPhiC - seletor do tipo de porosidade de calibracao (porosidade
# efetiva): = 1 perfil porosidade de neutros; = 2 porosidade
# efetiva independente (ex. testemunho); = 3 porosidade efetiva
# calculada pela formula 1 acima.
# OBS: CUIDADO opcao modPhiC = 3 carece de aprimoramentos devendo ser usada em
# casos muito especificos. Em geral produz matrizes mal condicionadas.
#
# PARAMETROS DE SAIDA:
# calibData_nomePoco - arquivo contendo os dados de referencia para o processo de calibracao
# phiC
# clayC
# rhoC
# resC
# calibCPR_Vel_nomePoco - arquivo contendo os parametros do modelo linear de velocidade de Han
# facies
# phiSand
# neutron
# denLitho
# cValuesPhi
# cValuesChi
# covMatrixPar
# coefVP
# coefVS
# fluidProp
# fluidPars
print ("CHAMANDO A FUNCAO EM ALGO")
#Parametros de entrada
inputPars = agd.get_input()
well_uid = agd.get_well_uid()
log_index = OM.list('log', well_uid)[0]
indexes = log_index.get_index()[0]
z = indexes[0].data
topCL = inputPars.get('topCL', None) #Intervalo para calibracao (com agua)
botCL = inputPars.get('botCL', None)
top = inputPars.get('top', None) #Intervalo para inferencia
bot = inputPars.get('bot', None)
indLog = np.argwhere(np.logical_and(z>=top, z<=bot))
indLog = np.squeeze(indLog,1)
#Input dos Perfis de pressao
press_file = np.loadtxt('U:/bkp_Windows06nov2017/Documents/Pocos_Morena/MA20.prs')
z = z[indLog]
gr = inputPars.get('gr', None )
gr = gr[indLog]
gr = logInterp(gr,z)
phi = inputPars.get('phi', None )
phi = phi[indLog]
phi = logInterp(phi,z)
rhoFull = inputPars.get('rho', None )
rho = rhoFull[indLog]
rho = logInterp(rho,z)
res = inputPars.get('res', None )
res = res[indLog]
if (np.all(res == np.NaN)):
res = np.empty(np.size(indLog))
else:
res = logInterp(res,z)
fac = inputPars.get('fac', None )
fac = fac[indLog]
fac = np.array(np.floor(fac), dtype=int)
fac = logInterp(fac,z)
#Input dos Perfis de pressao
zProv = indexes[0].data
mpp = 0.0980665*press_file[:,0]
mtzp = press_file[:,1]
lpres, cpres = np.shape(press_file)
if (cpres == 3):
mmzp = press_file[:,cpres - 1]
else:
mmzp = np.empty([0,0])
nDP = np.size(mtzp)
tvdss = inputPars.get('tvdss', None )
tvdss = tvdss[indLog]
izp = np.empty(nDP, dtype=int)
if (np.size(mmzp) == 0):
indr = indLog
lindr = np.size(indr) - 1
tol = 0.1
for i in range (0, nDP):
indp = np.argwhere(np.logical_and(tvdss <= (mtzp[i] + tol), tvdss >= (mtzp[i] - tol)))
indp= np.squeeze(indp,1)
cizp = np.argwhere(np.logical_and(indp >= indr[0], indp <= indr[lindr]))
cizp= np.squeeze(cizp,1)
if (np.size(cizp) == 0):
izp[i] = np.argmin(np.abs(tvdss - mtzp[i]))
else:
izp[i] = indp[cizp[0]]
mzp = zProv[izp]
matsort = np.concatenate([[mzp],[mpp], [mtzp],[izp]]).T
indsort = np.argsort(matsort[:,0],0)
matsort = np.array([[matsort[indsort,0]],[matsort[indsort,1]],[matsort[indsort,2]],[matsort[indsort,3]]]).T
matsort = np.squeeze(matsort)
mzp = matsort[:,0]
mpp = matsort[:,1]
mtzp = matsort[:,2]
izp = matsort[:,3].astype(int)
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
else:
mzp = mmzp
for i in range (0, nDP):
izp[i] = np.argmin(np.abs(zProv - mzp[i]))
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
phiCore = np.empty([0,0])
secHoriz = np.array([top, bot])
#Parametros e dados de calibracao e saida
nFac = 4
modPhiC = 1 #indicador do tipo de dado de calibracao a ser usado como porosidade efetiva
#1: perfil de neutrons 2: perfil de porosidade efetiva
useCore = 0
iOut = 2
#iuseclay = 0 #indicador do tipo de argilosidade a ser usado
#0: vsh direto do perfil 1: clay (calculada atraves do GR)
#Parametros de densidade
rhoMin = np.array([2.55, 2.569, 2.623, 2.707]) #Existem 4 facies na regiao relatada
#Parametros de resistividade
mP = 2.0 # expoente de cimentacao em areias limpas: 1.3 (inconsolidado) - 2.0 (consol.)
nS = 2.0 # expoente de saturacao em areias limpas 1.5 - 2.0.
# E reduzido na presenca de laminacao e microporosidade
aT = 0.8 # constante da eq. de Archie
Rw = 0.028 # resistividade da agua
Rsh = 2.048 # resistividade do folhelho
resCoef = np.array([[mP, nS, aT*Rw, Rsh], [1.5, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh]])
# Secao de Propriedades dos fluidos e matrizes de areia e folhelho
#Parametros
#calculo da pressao
pres_poros = np.mean(mpp) # pressao de poro referencia para o calc da densidade
temp = 89.0 # temperatura oC
sal = 102400 # salinidade
RGO = 75.0 # razao gas oleo
API = 29.0 # grau API
G = 0.835 # gravidade especifica
#Ordenar parametros no vetor para chamada da funcao
fluidPars = np.array([pres_poros, temp, sal, RGO, API, G])
#AQUI COMECA O CODIGO secCalibVshPhiRhoRes_vpHan
#Trecho de calibracao
indCL = np.where(np.logical_and(z>=topCL, z<=botCL))
nData = np.size(z)
# Calculo de porosidade efetiva e vsh com estimativa dos valores
# de grmin e grmax em todo o pacote coberto pelos dados
# Transformacao dos dados observados
# Volume de folhelho a partir de rais gama
indSh = np.argwhere(fac==4)
indSh= np.squeeze(indSh,1)
indSd = np.argwhere(fac == 1)
indSd= np.squeeze(indSd,1)
if (np.size(indSh) == 0 and np.size(indSd) == 0):
grmax = np.percentile(gr, 95)
grmin = np.percentile(gr, 5)
else:
grmax = np.percentile(gr[indSh], 95) #146.3745
grmin = np.percentile(gr[indSd], 5) #54.2600
claye = vshGRcalc(gr, grmin, grmax)
#Por enquanto usando apenas modPhic == 1
if modPhiC == 1:
grlim = grmax
ind = np.where (gr>= grlim)
phiNsh = np.median(phi[ind])
phiEe = np.fmax(0.01, phi - claye*phiNsh)
modPhiC =2
elif (modPhiC == 2 and np.size(phiCore) == 0):
print ("Nao existe a funcao chamada aqui dentro")
#phiEe = phiSd2phiE (zR, claye, phiSand, secHoriz)
elif (modPhiC == 2 and useCore == 1 ):
phiEe = phiCore
#fluidProp matriz com valores para Kf e densidade para fases salmoura,
#oleo e gas, ordenados da seguinte forma:
#bulk_salmoura, bulk_oleo, bulk_gas (modulo variavel com a pressao
#rho_salmoura, rho_oleo, rho_gas (so a densidade sera fixa)
nDP = np.size(mpp)
fluidPropP = np.empty([nDP, 2, 3]) #esqueleto de nDP 'paginas' que guardara
#as matrizes 2x3 de retorno da funcao seismicPropFluids
for i in np.arange(0, nDP):
#atualizar pressao de poro
fluidPars[0] = mpp[i]
fluidPropP[i] = seismicPropFluids(fluidPars)
fluidProp = np.mean(fluidPropP, 0)
rhoFluids = fluidProp[1]
rhoW = rhoFluids[0]
rhoO = rhoFluids[1]
#rock physics model calibration
#selecao de perfis apenas na regial de calibracao com agua
phiC = phiEe[indCL]
clayC = claye[indCL]
rhoCL = rho[indCL]
resCL = res[indCL]
phiCL = phi[indCL]
facCL = fac[indCL]
# Calibracao para toda a secao
rhoMin_T = np.median(rhoMin);
opt = 2
if (opt == 1):
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes(phiCL, rhoCL, resCL, clayC, phiC, rhoMin_T, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes2(phiCL, rhoCL, resCL, clayC, phiC , rhoW, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar_T = np.concatenate([[cPhi_T[0]], [cPhi_T[1]], [cPhi_T[2]]])
denPar_T = np.concatenate([[rhoSd], [rhoWe], [rhoO], [rhoSh], [rhoDisp]])
resPar_T = cRes_T
[phiMod_T, rhoMod_T, resMod_T] = calibCPRRreMod(phiEe, claye, phiPar_T , denPar_T, resPar_T, modPhiC)
facies_T = np.ones((nData,1))
phiMod = np.zeros((nData,1))
rhoMod = np.zeros((nData,1))
resMod = np.zeros((nData,1))
phiPar = np.empty([nFac,3])
denPar = np.empty([nFac,5])
resPar = np.empty([nFac,4])
facH = np.zeros([np.size(facCL),1])
for i in range(0,nFac):
ind = np.argwhere(facCL == i + 1)
ind= np.squeeze(ind,1)
secPhi = phiCL[ind]
secRho = rhoCL[ind]
secRes = resCL[ind]
secClayC = clayC[ind]
secPhiC = phiC[ind]
#[cHan,vpMod(ind),s2] = calibHan(secVP,secPhiC,secClayC);
#coefHanVP(i,:) = cHan';
# a parte de porosidade de neutrons e densidade nao utiliza separacao
# e calibracao distinta para grupamentos em termos de volume de
# folhelho. Os coeficientes sao repetidos (iguais) para areia e folhelho
resCoef_line = np.empty((resCoef.shape[0],1))
resCoef_line[:,0] = resCoef[i]
if (opt == 1):
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes(secPhi, secRho, secRes, secClayC, secPhiC , rhoMin[i], resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes2(secPhi, secRho, secRes, secClayC, secPhiC , rhoW, resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar[i] = np.array([cPhi[0], cPhi[1], cPhi[2]])
denPar[i] = np.array([rhoSd, rhoWe, rhoO, rhoSh, rhoDisp])
resPar[i] = cRes
facH[ind] = i + 1
resPar_line = np.empty([1,nFac])
resPar_line[0,:] = resPar[i]
ind = np.argwhere(fac == i + 1)
ind= np.squeeze(ind,1)
passArg = np.array([rhoSd, rhoW, rhoSh])
[dataPhi, dataRho, dataRes] = calibCPRRreMod(phiEe[ind], claye[ind], phiPar[i],passArg, resPar_line, modPhiC)
phiMod[ind,0] = dataPhi
rhoMod[ind,0] = dataRho
resMod[ind] = dataRes
if (iOut == 1):
nOutFac = 1
facies = facies_T
neutron = phiPar_T
denLitho = denPar_T
rhoComp = rhoMod_T
phiComp = phiMod_T
resComp = resMod_T
elif (iOut == 2):
nOutFac = np.ones([nFac,1])
facies = facH
neutron = phiPar
denLitho = denPar
denLitho[:,4] = neutron[:,2]
rhoComp = rhoMod
phiComp = phiMod
resComp = resMod
else:
raise Exception ('Seletor de saida deve ser 1 ou 2')
r2Phi = rsquared (phiComp, phi)
r2Rho = rsquared (rhoComp, rho)
r2Res = rsquared (resComp, res)
print ("Fim da calibracao, com seguintes ajustes R2:\n Phi = %7.2f\n RHO = %7.2f\n RES = %7.2f\n" % (r2Phi, r2Rho, r2Res))
#Saida de Dados
def calibClayPhiRhoRes(phi, rho, Rt, vsh, phiE, rhoMin, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoMin - densidade media dos graos minerais constituintes da matriz da rocha
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
rhoPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
vecConc = vsh*(1-phiE)
B = np.concatenate([[phiE], [vecConc]])
xRho1 = fitNorm1(B, (rho - rhoMin), 10)
rhoPar[0] = rhoMin
rhoPar[1] = xRho1[0] + rhoMin
rhoPar[2] = xRho1[1] + rhoMin
rhoComp = np.dot(B,xRho1) + rhoMin
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0] #checar como vai se comportar sem lb e ub
RtPar = np.concatenate([cRes, xRes])
RtPar = RtPar.reshape(1, RtPar.size)
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibClayPhiRhoRes2(phi, rho, Rt, vsh, phiE, rhoWater, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoWater - densidade da agua
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1-phiE)
B = np.concatenate([[col1], [col2]]).T
rhoCte = rhoWater * phiE
xRho = fitNorm1(B, (rho - rhoCte),10)
rhoPar = np.empty(2)
rhoPar[0] = xRho[0]
rhoPar[1] = xRho[1]
rhoComp = | np.dot(B, xRho) | numpy.dot |
import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import add_dummy_feature
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.max(axis=0), 1)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 1)
assert_array_equal(X_trans.max(axis=0), 2)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_one_hot_encoder():
"""Test OneHotEncoder's fit and transform."""
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raise when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = | np.dot(X_fit, X_fit.T) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 15:08:54 2020
@author: Mateo
"""
# %% Preamble
import numpy as np
from HARK.interpolation import LinearInterp
from HARK.datasets.cpi.us.CPITools import cpi_deflator
from HARK import _log
__all__ = [
"parse_time_params",
"Sabelhaus_Song_cohort_trend",
"Sabelhaus_Song_all_years",
"sabelhaus_song_var_profile",
"Cagetti_income",
"CGM_income",
"parse_income_spec",
"find_profile",
]
# %% Tools for setting time-related parameters
def parse_time_params(age_birth, age_death):
"""
Converts simple statements of the age at which an agent is born and the
age at which he dies with certaintiy into the parameters that HARK needs
for figuring out the timing of the model.
Parameters
----------
age_birth : int
Age at which the agent enters the model, e.g., 21.
age_death : int
Age at which the agent dies with certainty, e.g., 100.
Returns
-------
dict
Dictionary with parameters "T_cycle" and "T_age" which HARK expects
and which map to the birth and death ages specified by the user.
"""
# T_cycle is the number of non-terminal periods in the agent's problem
T_cycle = age_death - age_birth
# T_age is the age at which the agents are killed with certainty in
# simulations (at the end of the T_age-th period)
T_age = age_death - age_birth + 1
return {"T_cycle": T_cycle, "T_age": T_age}
# %% Tools for finding the mean profiles of permanent income.
def age_log_poly_to_growth_rates(coefs, age_min, age_max):
"""
The deterministic component of permanent income is often expressed as a
log-polynomial of age. In multiple HARK models, this part of the income
process is expressed in a sequence of growth factors 'PermGroFac'.
This function computes growth factors from the coefficients of a
log-polynomial specification
The form of the polynomial is assumed to be
alpha_0 + age/10 * alpha_1 + age^2/100 * alpha_2 + ... + (age/10)^n * alpha_n
Be sure to adjust the coefficients accordingly.
Parameters
----------
coefs : numpy array or list of floats
Coefficients of the income log-polynomial, in ascending degree order
(starting with the constant).
age_min : int
Starting age at which the polynomial applies.
age_max : int
Final age at which the polynomial applies.
Returns
-------
GrowthFac : [float] of length age_max - age_min + 1
List of growth factors that replicate the polynomial.
P0 : float
Initial level of income implied my the polynomial
"""
# Figure out the degree of the polynomial
deg = len(coefs) - 1
# Create age matrices
age_10 = np.arange(age_min, age_max + 1).reshape(age_max - age_min + 1, 1) / 10
age_mat = np.hstack(list(map(lambda n: age_10 ** n, range(deg + 1))))
# Fing the value of the polynomial
lnYDet = np.dot(age_mat, np.array(coefs))
# Find the starting level
P0 = np.exp(lnYDet[0])
# Compute growth factors
GrowthFac = np.exp(np.diff(lnYDet))
# The last growth factor is nan: we do not know lnYDet(age_max+1)
GrowthFac = np.append(GrowthFac, np.nan)
return GrowthFac.tolist(), P0
def find_PermGroFacs(age_min, age_max, age_ret, AgePolyCoefs, ReplRate):
"""
Finds initial income and sequence of growth factors from a polynomial
specification of log-income, an optional retirement age and a replacement
rate.
Retirement income will be Income_{age_ret} * ReplRate.
Parameters
----------
age_min : int
Initial age at which to compute the income specification.
age_max : int
Maximum age up to which the income process must be specified.
age_ret : int
Age of retirement. Note that retirement happens after labor income is
received. For example, age_ret = 65 then the agent will receive labor
income up to age 65 and retirement benefits starting at age 66.
If age_ret is None, there will be no retirement.
AgePolyCoefs : numpy array or list of floats
Coefficients of the income log-polynomial, in ascending degree order
(starting with the constant). Income follows the specification:
ln(P)_age = \sum_{i=1}^{len(AgePolyCoefs)} (age/10)^i * AgePolyCoefs[i]
ReplRate : float
Replacement rate for retirement income.
Returns
-------
GroFacs : list
List of income growth factors.
Y0 : float
Level of income at age_min
"""
if age_ret is None:
# If there is no retirement, the age polynomial applies for the whole
# lifetime
GroFacs, Y0 = age_log_poly_to_growth_rates(AgePolyCoefs, age_min, age_max)
else:
# First find working age growth rates and starting income
WrkGroFacs, Y0 = age_log_poly_to_growth_rates(AgePolyCoefs, age_min, age_ret)
# Replace the last item, which must be NaN, with the replacement rate
WrkGroFacs[-1] = ReplRate
# Now create the retirement phase
n_ret_years = age_max - age_ret
RetGroFacs = [1.0] * (n_ret_years - 1) + [np.nan]
# Concatenate
GroFacs = WrkGroFacs + RetGroFacs
return GroFacs, Y0
def find_profile(GroFacs, Y0):
"""
Generates a sequence {Y_{t}}_{t=0}^N from an initial Y_0 and a sequence
of growth factors GroFac[n] = Y_{n+1}/Y_n
Parameters
----------
GroFacs : list or numpy array
Growth factors in chronological order.
Y0 : float
initial value of the series.
Returns
-------
Y : numpy array
Array with the values of the series.
"""
factors = np.array([Y0] + GroFacs)
Y = np.cumprod(factors)
return Y
# %% Tools for life-cycle profiles of income volatility
# The raw results shared by <NAME> contain the following two
# sets of estimates (with and without cohor trends), which we will
# use for constructing the age profiles.
# The first specification contains a cohort trend. The variance of
# (transitory or permanent) shocks to income of a person born in year
# "cohort" and who is now age "age" is
# age_dummy(age) + beta * (cohort - 1926)
# Where we have dummies for ages 27 to 54
Sabelhaus_Song_cohort_trend = {
"Ages": | np.arange(27, 55) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CIE xyY Colourspace
===================
Defines the *CIE xyY* colourspace transformations:
- :func:`XYZ_to_xyY`
- :func:`xyY_to_XYZ`
- :func:`xy_to_XYZ`
- :func:`XYZ_to_xy`
See Also
--------
`CIE xyY Colourspace IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_xyy.ipynb>`_ # noqa
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
(Last accessed 24 February 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['XYZ_to_xyY',
'xyY_to_XYZ',
'xy_to_XYZ',
'XYZ_to_xy']
def XYZ_to_xyY(XYZ,
illuminant=ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')):
"""
Converts from *CIE XYZ* colourspace to *CIE xyY* colourspace and reference
*illuminant*.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant : array_like, optional
Reference *illuminant* chromaticity coordinates.
Returns
-------
ndarray, (3,)
*CIE xyY* colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Output *CIE xyY* colourspace matrix is in domain [0, 1].
References
----------
.. [2] http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html
(Last accessed 24 February 2014)
Examples
--------
>>> XYZ_to_xyY(np.array([0.1180583421, 0.1034, 0.0515089229]))
array([ 0.4325, 0.3788, 0.1034])
"""
X, Y, Z = | np.ravel(XYZ) | numpy.ravel |
"""
Functions to map between the coordinates in image pixel space and geographical coordinates.
"""
import logging
from typing import Tuple
from types import MethodType # for binding a method dynamically to a class
import numpy
from . import geocoords
from ..io.complex.sicd_elements.blocks import Poly2DType, XYZPolyType
from ..io.DEM.DEM import DTEDList, GeoidHeight, DTEDInterpolator
__classification__ = "UNCLASSIFIED"
__author__ = ("<NAME>", "<NAME>")
#############
# Ground-to-Image (aka Scene-to-Image) projection.
# noinspection PyUnusedLocal
def _validate_coords(coords, sicd):
if not isinstance(coords, numpy.ndarray):
coords = numpy.array(coords, dtype=numpy.float64)
orig_shape = coords.shape
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (1, -1))
if coords.shape[-1] != 3:
raise ValueError(
'The coords array must represent an array of points in ECF coordinates, '
'so the final dimension of coords must have length 3. Have coords.shape = {}'.format(coords.shape))
# TODO: possibly check for coordinates too far from the sicd box?
return coords, orig_shape
def _ground_to_image(coords, coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uProj,
row_col_transform, ipp_transform, delta_gp_max, max_iterations):
"""
Basic level helper function.
Parameters
----------
coords : numpy.ndarray|tuple|list
coa_proj : COAProjection
uGPN : numpy.ndarray
SCP : numpy.ndarray
SCP_Pixel : numpy.ndarray
uIPN : numpy.ndarray
sf : float
row_ss : float
col_ss : float
uProj : numpy.ndarray
row_col_transform : numpy.ndarray
ipp_transform : numpy.ndarray
delta_gp_max : float
max_iterations : int
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
g_n = coords.copy()
im_points = numpy.zeros((coords.shape[0], 2), dtype=numpy.float64)
delta_gpn = numpy.zeros((coords.shape[0],), dtype=numpy.float64)
cont = True
iteration = 0
matrix_transform = numpy.dot(row_col_transform, ipp_transform)
# (3 x 2)*(2 x 2) = (3 x 2)
while cont:
# TODO: is there any point in progressively stopping iteration?
# It doesn't really save much computation time.
# I set it to iterate over everything or nothing.
# project ground plane to image plane iteration
iteration += 1
dist_n = numpy.dot(SCP - g_n, uIPN)/sf # (N, )
i_n = g_n + numpy.outer(dist_n, uProj) # (N, 3)
delta_ipp = i_n - SCP # (N, 3)
ip_iter = numpy.dot(delta_ipp, matrix_transform) # (N, 2)
im_points[:, 0] = ip_iter[:, 0]/row_ss + SCP_Pixel[0]
im_points[:, 1] = ip_iter[:, 1]/col_ss + SCP_Pixel[1]
# transform to ground plane containing the scene points and check how it compares
p_n = _image_to_ground_plane(im_points, coa_proj, g_n, uGPN)
# compute displacement between scene point and this new projected point
diff_n = coords - p_n
disp_pn = numpy.linalg.norm(diff_n, axis=1)
# should we continue iterating?
cont = numpy.any(disp_pn > delta_gp_max) or (iteration <= max_iterations)
if cont:
g_n += diff_n
return im_points, delta_gpn, iteration
def ground_to_image(coords, sicd, delta_gp_max=None, max_iterations=10, block_size=50000,
delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Transforms a 3D ECF point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
delta_gp_max : float|None
Ground plane displacement tol (m). Defaults to 0.1*pixel.
max_iterations : int
maximum number of iterations to perform
block_size : int|None
size of blocks of coordinates to transform at a time
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for
expressing `delta_arp` and `delta_varp` parameters.
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following
the SICD convention, he upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
coords, orig_shape = _validate_coords(coords, sicd)
row_ss = sicd.Grid.Row.SS
col_ss = sicd.Grid.Col.SS
pixel_size = numpy.sqrt(row_ss*row_ss + col_ss*col_ss)
if delta_gp_max is None:
delta_gp_max = 0.1*pixel_size
delta_gp_max = float(delta_gp_max)
if delta_gp_max < 0.01*pixel_size:
delta_gp_max = 0.01*pixel_size
logging.warning('delta_gp_max was less than 0.01*pixel_size, '
'and has been reset to {}'.format(delta_gp_max))
coa_proj = COAProjection(sicd, delta_arp, delta_varp, range_bias, adj_params_frame)
# establishing the basic projection components
SCP_Pixel = sicd.ImageData.SCPPixel.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array() # unit normal in row direction
uCol = sicd.Grid.Col.UVectECF.get_array() # unit normal in column direction
uIPN = numpy.cross(uRow, uCol) # image plane unit normal
uIPN /= numpy.linalg.norm(uIPN) # NB: uRow/uCol may not be perpendicular
cos_theta = numpy.dot(uRow, uCol)
sin_theta = numpy.sqrt(1 - cos_theta*cos_theta)
ipp_transform = numpy.array([[1, -cos_theta], [-cos_theta, 1]], dtype=numpy.float64)/(sin_theta*sin_theta)
row_col_transform = numpy.zeros((3, 2), dtype=numpy.float64)
row_col_transform[:, 0] = uRow
row_col_transform[:, 1] = uCol
SCP = sicd.GeoData.SCP.ECF.get_array()
uGPN = sicd.PFA.FPN.get_array() if sicd.ImageFormation.ImageFormAlgo == 'PFA' \
else geocoords.wgs_84_norm(SCP)
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
uSPN = sicd.SCPCOA.look*numpy.cross(VARP_SCP_COA, SCP-ARP_SCP_COA)
uSPN /= numpy.linalg.norm(uSPN)
# uSPN - defined in section 3.1 as normal to instantaneous slant plane that contains SCP at SCP COA is
# tangent to R/Rdot contour at SCP. Points away from center of Earth. Use look to establish sign.
sf = float(numpy.dot(uSPN, uIPN)) # scale factor
# prepare the work space
coords_view = numpy.reshape(coords, (-1, 3)) # possibly or make 2-d flatten
num_points = coords_view.shape[0]
if block_size is None or num_points <= block_size:
image_points, delta_gpn, iters = _ground_to_image(
coords_view, coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, delta_gp_max, max_iterations)
else:
image_points = numpy.zeros((num_points, 2), dtype=numpy.float64)
delta_gpn = numpy.zeros((num_points, ), dtype=numpy.float64)
iters = numpy.zeros((num_points, ), dtype=numpy.int16)
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block+block_size, num_points)
image_points[start_block:end_block, :], delta_gpn[start_block:end_block], \
iters[start_block:end_block] = _ground_to_image(
coords_view[start_block:end_block, :], coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, delta_gp_max, max_iterations)
start_block = end_block
if len(orig_shape) == 1:
image_points = numpy.reshape(image_points, (-1,))
elif len(orig_shape) > 1:
image_points = numpy.reshape(image_points, orig_shape[:-1]+(2, ))
delta_gpn = numpy.reshape(delta_gpn, orig_shape[:-1])
iters = numpy.reshape(iters, orig_shape[:-1])
return image_points, delta_gpn, iters
def ground_to_image_geo(coords, sicd, **kwargs):
"""
Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates.
This is implemented in accordance with the SICD Image Projections Description Document.
Parameters
----------
coords : numpy.ndarray|tuple|list
Lat/Lon/HAE coordinate to map to scene coordinates, of size `N x 3`.
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
kwargs : dict
See the key word arguments of :func:`ground_to_image`
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
return ground_to_image(geocoords.geodetic_to_ecf(coords), sicd, **kwargs)
############
# Image-To-Ground projections
def _ric_ecf_mat(rarp, varp, frame_type):
"""
Computes the ECF transformation matrix for RIC frame.
Parameters
----------
rarp : numpy.ndarray
varp : numpy.ndarray
frame_type : str
the final three characters should be one of ['ECI', 'ECF']
Returns
-------
numpy.ndarray
the RIC transform matrix (array)
"""
# Angular velocity of earth in radians/second, not including precession
w = 7292115.1467E-11
typ = frame_type.upper()[-3:]
vi = varp if typ == 'ECF' else varp + numpy.cross([0, 0, w], rarp)
r = rarp/numpy.linalg.norm(rarp)
c = numpy.cross(r, vi)
c /= numpy.linalg.norm(c) # NB: perpendicular to r
i = numpy.cross(c, r)
# this is the cross of two perpendicular normal vectors, so normal
return numpy.array([r, i, c], dtype=numpy.float64)
class COAProjection(object):
"""
The COA projection object - provide common projection functionality for all Image-to-R/Rdot projection.
"""
def __init__(self, sicd, delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
The SICD metadata structure.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for
expressing `delta_arp` and `delta_varp` parameters.
"""
if not sicd.can_project_coordinates():
raise ValueError('Insufficient metadata populated to formulate projection.')
time_coa_poly = sicd.Grid.TimeCOAPoly
# fall back to approximation if TimeCOAPoly is not populated
if time_coa_poly is None:
time_coa_poly = Poly2DType(Coefs=[[sicd.Timeline.CollectDuration/2, ], ])
logging.warning(
'Using (constant) approximation to TimeCOAPoly, which may result in poor projection results.')
self.time_coa_poly = time_coa_poly # type: Poly2DType
self.arp_poly = sicd.Position.ARPPoly # type: XYZPolyType
self.varp_poly = self.arp_poly.derivative(der_order=1, return_poly=True) # type: XYZPolyType
self.row_ss = sicd.Grid.Row.SS # type: float
self.col_ss = sicd.Grid.Col.SS # type: float
self.first_row = sicd.ImageData.FirstRow # type: int
self.first_col = sicd.ImageData.FirstCol # type: int
self.scp_row = sicd.ImageData.SCPPixel.Row # type: int
self.scp_col = sicd.ImageData.SCPPixel.Col # type: int
if delta_arp is None:
delta_arp = numpy.array([0, 0, 0], dtype=numpy.float64)
if not isinstance(delta_arp, numpy.ndarray):
delta_arp = numpy.array(delta_arp, dtype=numpy.float64)
if delta_arp.shape != (3, ):
raise ValueError('delta_arp must have shape (3, ). Got {}'.format(delta_arp.shape))
if delta_varp is None:
delta_varp = numpy.array([0, 0, 0], dtype=numpy.float64)
if not isinstance(delta_varp, numpy.ndarray):
delta_varp = numpy.array(delta_varp, dtype=numpy.float64)
if delta_varp.shape != (3, ):
raise ValueError('delta_varp must have shape (3, ). Got {}'.format(delta_varp.shape))
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
if sicd.SCPCOA.ARPPos is None or sicd.SCPCOA.ARPVel is None:
raise ValueError(
'The adj_params_frame is of RIC type, but one of SCPCOA.ARPPos or '
'SCPCOA.ARPVel is not populated.')
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
ric_matrix = _ric_ecf_mat(ARP_SCP_COA, VARP_SCP_COA, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
self.delta_arp = delta_arp # type: numpy.ndarray
self.delta_varp = delta_varp # type: numpy.ndarray
if range_bias is None:
range_bias = 0.0
else:
range_bias = float(range_bias)
self.range_bias = range_bias # type: float
# bind the method specific intermediate projection method
self._method_proj = MethodType(_get_type_specific_projection(sicd), self)
def _init_proj(self, im_points):
"""
Parameters
----------
im_points : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray,...]
"""
row_meters = (im_points[:, 0] + self.first_row - self.scp_row)*self.row_ss
col_meters = (im_points[:, 1] + self.first_col - self.scp_col)*self.col_ss
t_coa = self.time_coa_poly(row_meters, col_meters)
# calculate aperture reference position and velocity at target time
arp_coa = self.arp_poly(t_coa)
varp_coa = self.varp_poly(t_coa)
return row_meters, col_meters, t_coa, arp_coa, varp_coa
def projection(self, im_points):
"""
Perform the projection from image coordinates to R/Rdot coordinates.
Parameters
----------
im_points : numpy.ndarray
This array of image point coordinates, **expected to have shape (N, 2)**.
Returns
-------
Tuple[numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray]
* `r_tgt_coa` - range to the ARP at COA
* `r_dot_tgt_coa` - range rate relative to the ARP at COA
* `t_coa` - center of aperture time since CDP start for input ip
* `arp_coa` - aperture reference position at t_coa
* `varp_coa` - velocity at t_coa
"""
row_meters, col_meters, t_coa, arp_coa, varp_coa = self._init_proj(im_points)
r_tgt_coa, r_dot_tgt_coa = self._method_proj(row_meters, col_meters, t_coa, arp_coa, varp_coa)
# adjust parameters (TODO: after all the calculations?)
arp_coa += self.delta_arp
varp_coa += self.delta_varp
r_tgt_coa += self.range_bias
return r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa
def _get_type_specific_projection(sicd):
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_meters, col_meters, t_coa, arp_coa, varp_coa).
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
callable
"""
# triple-nested function - it was conceptually clearest...
def pfa_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
pfa = sicd.PFA
polar_ang_poly = pfa.PolarAngPoly
spatial_freq_sf_poly = pfa.SpatialFreqSFPoly
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
PFA specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa * ARP_minus_SCP, axis=-1) / rSCPTgtCoa
thetaTgtCoa = polar_ang_poly(t_coa)
dThetaDtTgtCoa = polar_ang_poly_der(t_coa)
# Compute polar aperture scale factor (KSF) and derivative wrt polar angle
ksfTgtCoa = spatial_freq_sf_poly(thetaTgtCoa)
dKsfDThetaTgtCoa = spatial_freq_sf_poly_der(thetaTgtCoa)
# Compute spatial frequency domain phase slopes in Ka and Kc directions
# NB: sign for the phase may be ignored as it is cancelled in a subsequent computation.
dPhiDKaTgtCoa = row_meters * numpy.cos(thetaTgtCoa) + col_meters * numpy.sin(thetaTgtCoa)
dPhiDKcTgtCoa = -row_meters * numpy.sin(thetaTgtCoa) + col_meters * numpy.cos(thetaTgtCoa)
# Compute range relative to SCP
deltaRTgtCoa = ksfTgtCoa * dPhiDKaTgtCoa
# Compute derivative of range relative to SCP wrt polar angle.
# Scale by derivative of polar angle wrt time.
dDeltaRDThetaTgtCoa = dKsfDThetaTgtCoa * dPhiDKaTgtCoa + ksfTgtCoa * dPhiDKcTgtCoa
deltaRDotTgtCoa = dDeltaRDThetaTgtCoa * dThetaDtTgtCoa
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def rgazcomp_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
az_sf = sicd.RgAzComp.AzSF
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
RgAzComp specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa*ARP_minus_SCP, axis=-1)/rSCPTgtCoa
deltaRTgtCoa = row_meters
deltaRDotTgtCoa = -numpy.linalg.norm(varp_coa, axis=-1)*az_sf*col_meters
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def inca_projection():
inca = sicd.RMA.INCA
r_ca_scp = inca.R_CA_SCP
time_ca_poly = inca.TimeCAPoly
drate_sf_poly = inca.DRateSFPoly
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
INCA specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
# compute range/time of closest approach
R_CA_TGT = r_ca_scp + row_meters # Range at closest approach
t_CA_TGT = time_ca_poly(col_meters) # Time of closest approach
# Compute ARP velocity magnitude (actually squared, since that's how it's used) at t_CA_TGT
VEL2_CA_TGT = numpy.sum(instance.varp_poly(t_CA_TGT)**2, axis=-1)
# Compute the Doppler Rate Scale Factor for image Grid location
DRSF_TGT = drate_sf_poly(row_meters, col_meters)
# Difference between COA time and CA time
dt_COA_TGT = t_coa - t_CA_TGT
r_tgt_coa = numpy.sqrt(R_CA_TGT*R_CA_TGT + DRSF_TGT*VEL2_CA_TGT*dt_COA_TGT*dt_COA_TGT)
r_dot_tgt_coa = (DRSF_TGT/r_tgt_coa)*VEL2_CA_TGT*dt_COA_TGT
return r_tgt_coa, r_dot_tgt_coa
return method_projection
def plane_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array()
uCol = sicd.Grid.Row.UVectECF.get_array()
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - (SCP + numpy.outer(row_meters, uRow) + numpy.outer(col_meters, uCol))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return method_projection
# NB: sicd.can_project_coordinates() has been called, so all required attributes
# must be populated
if sicd.Grid.Type == 'RGAZIM':
if sicd.ImageFormation.ImageFormAlgo == 'PFA':
return pfa_projection()
elif sicd.ImageFormation.ImageFormAlgo == 'RGAZCOMP':
return rgazcomp_projection()
elif sicd.Grid.Type == 'RGZERO':
return inca_projection()
elif sicd.Grid.Type in ['XRGYCR', 'XCTYAT', 'PLANE']:
return plane_projection()
else:
# NB: this will have been noted by sicd.can_project_coordinates(), but is
# here for completeness
raise ValueError('Unhandled Grid.Type'.format(sicd.Grid.Type))
def _validate_im_points(im_points, sicd):
"""
Parameters
----------
im_points : numpy.ndarray|list|tuple
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
numpy.ndarray
"""
if im_points is None:
raise ValueError('The argument cannot be None')
if not isinstance(im_points, numpy.ndarray):
im_points = numpy.array(im_points, dtype=numpy.float64)
orig_shape = im_points.shape
if len(im_points.shape) == 1:
im_points = numpy.reshape(im_points, (1, -1))
if im_points.shape[-1] != 2:
raise ValueError(
'The im_points array must represent an array of points in pixel coordinates, '
'so the final dimension of im_points must have length 2. Have im_points.shape = {}'.format(im_points.shape))
# check to ensure that the entries of im_points are not ridiculous
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
row_bounds = (-rows/2, 3*rows/2)
col_bounds = (-cols/2, 3*cols/2)
if numpy.any(
(im_points[:, 0] < row_bounds[0]) | (im_points[:, 0] > row_bounds[1]) |
(im_points[:, 1] < col_bounds[0]) | (im_points[:, 1] > col_bounds[1])):
raise ValueError(
'The sicd is has {} rows and {} cols. image_to_ground projection effort '
'requires row coordinates in the range {} and column coordinates '
'in the range {}'.format(rows, cols, row_bounds, col_bounds))
return im_points, orig_shape
def image_to_ground(im_points, sicd, block_size=50000, projection_type='HAE', **kwargs):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
kwargs : dict
keyword arguments relevant for the given projection type. See image_to_ground_plane/hae/dem methods.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF) corresponding input image coordinates. The interpretation
or meaning of the physical coordinates depends on `projection_type` chosen.
"""
p_type = projection_type.upper()
if p_type == 'PLANE':
return image_to_ground_plane(im_points, sicd, block_size=block_size, **kwargs)
elif p_type == 'HAE':
return image_to_ground_hae(im_points, sicd, block_size=block_size, **kwargs)
elif p_type == 'DEM':
return image_to_ground_dem(im_points, sicd, block_size=block_size, **kwargs)
else:
raise ValueError('Got unrecognized projection type {}'.format(projection_type))
def image_to_ground_geo(im_points, sicd, **kwargs):
"""
Transforms image coordinates to ground plane Lat/Lon/HAE coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
kwargs : dict
See the keyword arguments in :func:`image_to_ground`.
Returns
-------
numpy.ndarray
Ground Plane Point (in Lat/Lon/HAE coordinates) along the R/Rdot contour.
"""
return geocoords.ecf_to_geodetic(image_to_ground(im_points, sicd, **kwargs))
#####
# Image-to-Ground Plane
def _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ):
"""
Parameters
----------
r_tgt_coa : numnpy.ndarray
r_dot_tgt_coa : numnpy.ndarray
arp_coa : numnpy.ndarray
varp_coa : numnpy.ndarray
gref : numnpy.ndarray
uZ : numnpy.ndarray
Returns
-------
numpy.ndarray
"""
# Solve for the intersection of a R/Rdot contour and a ground plane.
arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1)
arpZ[arpZ > r_tgt_coa] = numpy.nan
# ARP ground plane nadir
aGPN = arp_coa - numpy.outer(arpZ, uZ)
# Compute ground plane distance (gd) from ARP nadir to circle of const range
gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ)
# Compute sine and cosine of grazing angle
cosGraz = gd/r_tgt_coa
sinGraz = arpZ/r_tgt_coa
# Velocity components normal to ground plane and parallel to ground plane.
vMag = numpy.linalg.norm(varp_coa, axis=-1)
vZ = numpy.dot(varp_coa, uZ)
vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution
# Orient X such that Vx > 0 and compute unit vectors uX and uY
uX = ((varp_coa - numpy.outer(vZ, uZ)).T/vX).T
uY = numpy.cross(uZ, uX)
# Compute cosine of azimuth angle to ground plane point
cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz)
cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane
# Compute sine of azimuth angle. Use LOOK to establish sign.
look = numpy.sign(numpy.dot( | numpy.cross(arp_coa-gref, varp_coa) | numpy.cross |
from numpy import pi, dot, exp, einsum
import numpy as np
class PopulationMonteCarlo(object):
"""
A Population Monte Carlo (PMC) sampler,
which combines expectation-maximization and
importance sampling
This code follows the notation and methodolgy in
http://arxiv.org/pdf/0903.0837v1.pdf
"""
def __init__(self, posterior, n, start, sigma, pool=None, quiet=False, student=False, nu=2.0):
"""
posterior: the posterior function
n: number of components to use in the mixture
start: estimated mean of the distribution
sigma: estimated covariance matrix
pool (optional): an MPI or multiprocessing worker pool
"""
self.posterior = posterior
mu = np.random.multivariate_normal(start, sigma, size=n)
if student:
self.components = [StudentsTComponent(1.0/n, m, sigma, nu) for m in mu]
else:
self.components = [GaussianComponent(1.0/n, m, sigma) for m in mu]
self.pool = pool
self.quiet=quiet #not currently used
def sample(self, n, update=True, do_kill=True):
"Draw a sample from the Gaussian mixture and update the mixture"
self.kill_count = n*1./len(self.components)/50.
self.kill = [False for c in self.components]
#draw sample from current mixture
component_index, x = self.draw(n)
#calculate likelihoods
if self.pool is None:
samples = list(map(self.posterior, x))
else:
samples = self.pool.map(self.posterior, x)
post = np.array([s[0] for s in samples])
extra = [s[1] for s in samples]
post[np.isnan(post)] = -np.inf
#update components
log_weights = self.update_components(x, post, update, do_kill)
return x, post, extra, component_index, log_weights
def draw(self, n):
"Draw a sample from the Gaussian mixture"
A = [m.alpha for m in self.components]
A = np.array(A)
A/=A.sum()
#Components to draw from
N = np.arange(len(self.components))
C = np.random.choice(N, size=n, replace=True, p=A)
for i in N:
count = np.sum(C==i)
if count<self.kill_count:
self.kill[i] = True
print("Component %d less than kill count (%d < %d)" % (i, count, self.kill_count))
x = np.array([self.components[c].sample() for c in C])
return C, x
def update_components(self, x, log_post, update, do_kill):
"Equations 13-16 of arxiv.org 0903.0837v1"
#x #n_sample*n_dim
log_Aphi = np.array([np.log(m.alpha) + m.log_phi(x) for m in self.components]) #n-component * n_sample
Aphi = np.array([m.alpha*m.phi(x) for m in self.components]) #n-component * n_sample
post = np.exp(log_post)
w = post/Aphi.sum(0) #n_sample
logw = log_post - np.log(Aphi.sum(0))
if not update:
return logw
w_norm = w/w.sum() #n_sample
logw_norm = | np.log(w_norm) | numpy.log |
import numpy as np
from sympy import zeros, eye, simplify, sqrt
from sympy import Matrix as mat
from ..genericas import print_verbose, norma, matriz_inversa
from .funcs_gram_schmidt import gram_schmidt
def householder(x, y, signo="+", normalizar=False):
"""Aplica la transformación de Householder. x e y pueden ser vectores fila o columna,
en cuyo caso aplicamos la transformación de una forma u otra.
Args:
x (vector): vector origen
y (vector): vector destino
signo (str, optional): + o -, si - aplica la transformación inversa. Defaults to "+".
normalizar (bool, optional): Normaliza los vectores para la transformación. Defaults to False.
Returns:
dict: {'H': la aplicación de transformación, 'e': el vector de eje de cambio.}
"""
if signo == "+":
mult = 1
else:
mult = -1
if normalizar:
x = simplify(x / norma(x))
y = simplify(y / norma(y))
if simplify(norma(x)) != simplify(norma(y)):
print(
f"AVISO!!! x tiene norma {norma(x)} e y tiene norma {norma(y)}. "
"Para Householder las normas tienen que ser iguales."
)
if simplify(norma(x - y)) == 0: # x==y
e = zeros(x.shape[0], x.shape[1])
else:
e = mult * (x - y) / (norma(x - y))
if (e.shape[0] == 1) and (e.shape[1] >= 1): # El vector es fila:
H = eye(x.shape[1]) - 2 * e.T * e
else:
H = eye(x.shape[0]) - 2 * e * e.T
if (e.shape[0] == 1) and (e.shape[1] >= 1) and (simplify(x * H) != y):
print(
"AVISO FILAS!!! xH != y. Para Householder las normas tienen que ser iguales."
)
elif (e.shape[1] == 1) and (e.shape[0] >= 1) and (simplify(H * x) != y):
print("AVISO!!! Hx != y. Para Householder las normas tienen que ser iguales.")
return simplify(H), simplify(e)
def factorizacion_QR(m, verbose=True, metodo="gram_schmidt"):
"""Aplica el método QR para la factorización de la matriz m. m = QR, con Q ortogonal y R triangular superior.
Args:
m (matriz): matriz a factorizar.
verbose (bool, optional): Imprime mensajes informativos. Defaults to True.
metodo (str, optional): "gram_schmidt" o "householder".
Householder funciona mejor para vectores proximos,
donde gram-schmidt deja de ser estable. Defaults to "gram_schmidt".
Returns:
dict: {'Q', 'R' , 'D'} ('D' en gram-schmidt solo).
"""
if metodo == "gram_schmidt":
print_verbose("Aplicamos QR con Gram Schmidt", verbose)
dict_gs = gram_schmidt(m)
P, C = dict_gs["P"], dict_gs["c"]
D = zeros(m.shape[0], m.shape[1])
for col in range(m.shape[0]):
D[col, col] = sqrt((P[:, col].T * P[:, col])[0])
Q = simplify(P * (matriz_inversa(D)))
R = simplify(D * (eye(m.shape[0]) + C))
print_verbose(
f"Q es \n{ | np.array(Q) | numpy.array |
# This script contains the Brianmodel class
# Calling makeneuron_ca() on a Brianmodel object will create a biophysical neuron
# Multiple other functions allow for plotting, animating, ...
from __future__ import division
#folder with parameters, equations and morphology
import os, sys
mod_path = os.path.abspath(os.path.join('..','Model'))
sys.path.append(mod_path)
from copy import deepcopy
import itertools as itools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
plt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'
import matplotlib.colors as colorz
import matplotlib.cm as clrm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import brian2 as br2
from brian2 import uF, cm, um, ohm, ms, siemens, mV, nA, us,psiemens
# This is the 3D plotting toolkit
from mpl_toolkits.mplot3d import Axes3D
#import parameters and equations for neuron
from oo_Parameters import *
from oo_equations_AMPAplast import *
from MorphologyData import *
from Visualisation_functions import *
from oo_initScripts import set_init_nrn
br2.start_scope()
br2.defaultclock.dt = defaultclock.dt
class BRIANModel(object):
"""
Neuron object in brian2
"""
def __init__(self, swc_model):
"""
Parameters
----------
swc_model: a char
path of the file containing the neuron model in .swc format
"""
# Brian morphology
self.morpho = br2.Morphology.from_file(swc_model)
morpho = self.morpho
# Store compartment numbers
self.segment,self.segment_swc,self.compStart,self.compEnd = get_swc(swc_model)
# Initialise an dictionary for distances to the soma per compartment
self.distances = {}
# Initialise an dictionary for lines to plot the neuron
self.lines = {}
# Add the first section as soma
self.sections = {morpho.type: [self.morpho[0], 0, 0]}
# Set a name and distances for the soma
self.sections['soma'][0].name = 'soma'
self.sections['soma'][0].f_x = self.morpho[0].x/meter
self.sections['soma'][0].f_y = self.morpho[0].y/meter
self.sections['soma'][0].f_z = self.morpho[0].z/meter
self.sections['soma'][0].dist = 0
self.distances['soma'] = [0.]
# Initialize the dendrites numerotation
dend_b = 0
# Register soma's children in a sections dictionary
for sec in morpho.children:
# Create an attribut "name" for all children of the soma
if str(sec.type) == "dend":
sec.name = sec.type[:4]+"_"+str(dend_b)
dend_b += 1
else:
sec.name = sec.type
# store final coordinates of the parent (=soma) segment
sec.f_x = self.morpho[0].x[0]/meter
sec.f_y = self.morpho[0].y[0]/meter
sec.f_z = self.morpho[0].z[0]/meter
sec.dist = self.distances['soma'][0]
# add distances to the parent
self.distances = calc_dist(self.distances, sec)
# get the coordinates for all compartments in this section
xn = sec.x/meter
yn = sec.y/meter
zn = sec.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (they correspond to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[sec.name] = [sec,line_num,line_num]
# Initialize the level value
level = [sec for sec in morpho.children]
while level != []:
for i, sec in enumerate(level):
for j, child in enumerate(sec.children):
# Create an attribut "name" for all children of sec
name = sec.name + str(j)
child.name = name
# Store parent final coordinates
child.f_x = sec.x[-1]/meter
child.f_y = sec.y[-1]/meter
child.f_z = sec.z[-1]/meter
# Store distances to the soma
child.dist = self.distances[sec.name][-1]
self.distances = calc_dist(self.distances, child)
# Get the coordinates for all compartments in this section
xn = child.x/meter
yn = child.y/meter
zn = child.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (corresponds to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[name] = [child, line_num,line_num]
level = [sec.children for sec in level]
# Flatten the list at this level
level = [sublist for sl in level for sublist in sl]
################################################################################
# THE FUNCTION BELOW CAN BE CALLED TO CREATE A BIOPHYSICAL NEURON
################################################################################
def makeNeuron_Ca(self,morphodata):
"""return spatial neuron"""
# Set Biophysics
neuron = self.biophysics(morphodata)
return neuron
def biophysics(self,morpho_data):
"""Inserting biophysics"""
neuron = br2.SpatialNeuron(morphology=self.morpho, model=eqs, \
Cm=Capacit, Ri=R_axial, threshold = "v/mV>0", refractory = "v/mV > -10",
threshold_location = 0, reset = 's_trace += x_reset*(taux/ms)',method='heun') #
# define the different parts of the neuron
N_soma = neuron[morpho_data['soma'][0]:morpho_data['soma'][-1]+1]
N_axon = neuron[morpho_data['axon'][0]:morpho_data['axon'][-1]+1]
N_basal = neuron[morpho_data['basal'][0]:morpho_data['basal'][-1]+1]
N_apical = neuron[morpho_data['apical'][0]:morpho_data['apical'][-1]+1]
Theta_low = morpho_data['thetalow']*mV
# insert leak conductance
neuron.gLeak = g_leak
# noise
neuron.noise_sigma = 0*pA # initial value membrane voltage
neuron.noise_avg = 0*pA # initial value membrane voltage
N_soma.noise_sigma = noise_std # initial value membrane voltage
N_soma.noise_avg = noise_mean # initial value membrane voltage
####################
# ACTIVE CHANNELS
####################
# Na channels soma, axon, apical dendrites
N_soma.gNav = somaNa
N_axon.gNav = axonNa
N_apical.gNav = apicalNa
neuron.thi1 = thi1_all
N_axon.thi1 = thi1_axn
neuron.thi2 = thi2_all
N_axon.thi2 = thi2_axn
#Kv channels
N_soma.gKv = somagKv
N_basal.gKv = dendgKv
N_apical.gKv = dendgKv
N_axon.gKv = axongKv
#Ca channels sina
N_soma.gCav = ratio_ca*somaCa
N_soma.gIt = (1-ratio_ca)*somaCa
#Ka channels soma
N_soma.gKa_prox = somaKap
#Ka channels dendrites, Na channels basal dendrites, Ca channels dendrites, axon initial segment
for sec in self.sections:
secNr = self.sections[sec][2]
seclen = len(self.sections[sec][0].x)
#BASAL
if secNr in morpho_data['basal']:
# decreasing Na channels
gNa_diff = 0.5*np.array(self.distances[sec][:])*psiemens/um**2
neuron[secNr:secNr+seclen].gNav = np.multiply(basalNa - gNa_diff,basalNa - gNa_diff>0 )
# increasing Ka channels
gKa_diff = 0.7*np.array(self.distances[sec][:])*psiemens/um**2
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*(np.array(self.distances[sec][:])<=30)
neuron[secNr:secNr+seclen].gIt = dendCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])>30) + somaCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])<=30)
#spines
addSpines = np.array(self.distances[sec][:]) > spinedist
noSpines = np.array(self.distances[sec][:]) <= spinedist
neuron[secNr:secNr+seclen].gLeak = noSpines*g_leak + addSpines*g_leak_dend
neuron[secNr:secNr+seclen].Cm = noSpines*Capacit + addSpines*Capacit_dend
#APICAL
if secNr in morpho_data['apical']:
#ratio of Ka channels
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*apicalKa
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*apicalKa
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*(np.array(self.distances[sec][:])<=30)
neuron[secNr:secNr+seclen].gIt = dendCa*(1.-ratio_ca)*( | np.array(self.distances[sec][:]) | numpy.array |
import numpy as np
import CCDutils
#This module contains the functions necessary for doing CCSD in the spin-orbital basis
##Spin-orbital-based utilities
def CCSDdoubles(F,Eri,T2,T1,nocc,nbas,variant):
#Get the right hand side of the spinorbital CCSD singles equations. p. 307-308 of Bartlett and Shavitt
niter = 1
#p.307
#Get CCD contribution
G = CCDutils.GHFCCD(F,Eri,T2,nocc,nbas,niter,variant)
# return G
G += np.einsum('cjab,ic->ijab',Eri[nocc:,:nocc,nocc:,nocc:],T1)
G -= np.einsum('ciab,jc->ijab',Eri[nocc:,:nocc,nocc:,nocc:],T1)
G -= np.einsum('ijkb,ka->ijab',Eri[:nocc,:nocc,:nocc,nocc:],T1)
G += np.einsum('ijka,kb->ijab',Eri[:nocc,:nocc,:nocc,nocc:],T1)
G -= np.einsum('ck,ic,kjab->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('ck,jc,kiab->ijab',F[nocc:,:nocc],T1,T2)
G -= np.einsum('ck,ka,ijcb->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('ck,kb,ijca->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('cdak,ic,kjdb->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('cdbk,ic,kjda->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('cdak,jc,kidb->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G += np.einsum('cdbk,jc,kida->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('ickl,ka,ljcb->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('jckl,ka,licb->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('ickl,kb,ljca->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G -= np.einsum('jckl,kb,lica->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cdkb,ka,ijcd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G += 0.5e0*np.einsum('cdka,kb,ijcd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
#p. 308
G += 0.5e0*np.einsum('cjkl,ic,klab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cikl,jc,klab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cdka,kc,ijdb->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G -= np.einsum('cdkb,kc,ijda->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G -= np.einsum('cikl,kc,ljab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cjkl,kc,liab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cdab,ic,jd->ijab',Eri[nocc:,nocc:,nocc:,nocc:],T1,T1)
G += np.einsum('ijkl,ka,lb->ijab',Eri[:nocc,:nocc,:nocc,:nocc],T1,T1)
G -= np.einsum('cjkb,ic,ka->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += np.einsum('cikb,jc,ka->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += np.einsum('cjka,ic,kb->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G -= np.einsum('cika,jc,kb->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += 0.5e0*np.einsum('cdkl,ic,jd,klab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += 0.5e0*np.einsum('cdkl,ka,lb,ijcd->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,ic,ka,ljdb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,jc,ka,lidb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,ic,kb,ljda->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,jc,kb,lida->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,kc,id,ljab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,kc,jd,liab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,kc,la,ijdb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,kc,lb,ijda->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
# -------------------------------
# G += np.einsum('cdkb,ic,ka,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
# G -= np.einsum('cdka,ic,kb,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1) wrong sign in diagram D8a, p. 306
# of Shavitt and Bartlett. Compare to
G -= np.einsum('cdkb,ic,ka,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
G += np.einsum('cdka,ic,kb,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
#from Crawford and Schaefer, An Introduction to Coupled Cluster Theory, Wiley ...
#-----------------------------------------------------------------------------
G += np.einsum('cjkl,ic,ka,lb->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T1,T1)
G -=np.einsum('cikl,jc,ka,lb->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T1,T1)
G += np.einsum('cdkl,ic,jd,ka,lb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T1,T1)
return G
def CCSDsingles(F,Eri,T2,T1,nocc,nbas):
#Get the right hand side of the spinorbital CCSD singles equations. p. 304 of Bartlett and Shavitt
#Driver
G = np.copy(F[:nocc,nocc:])
#Terms involving only doubles
G += np.einsum('kc,ikac->ia',F[:nocc,nocc:],T2)
G += 0.5e0*np.einsum('cdak,ikcd->ia',Eri[nocc:,nocc:,nocc:,:nocc],T2)
G -= 0.5e0*np.einsum('ickl,klac->ia',Eri[:nocc,nocc:,:nocc,:nocc],T2)
#Linear term involving only singles
G += np.einsum('icak,kc->ia',Eri[:nocc,nocc:,nocc:,:nocc],T1)
#Mixed Terms
G -= 0.5e0*np.einsum('cdkl,ic,klad->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cdkl,ka,ilcd->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('cdkl,kc,lida->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
#higher-order terms involving only singles
G -= np.einsum('ck,ic,ka->ia',F[nocc:,:nocc],T1,T1)
G += np.einsum('cdak,ic,kd->ia',Eri[nocc:,nocc:,nocc:,:nocc],T1,T1)
G -= np.einsum('ickl,ka,lc->ia',Eri[:nocc,nocc:,:nocc,:nocc],T1,T1)
G -= np.einsum('cdkl,ic,ka,ld->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T1)
#Don't forget other non-canonical terms
tol = 1.0e-07
F_offdiag = F - np.diag(np.diag(F))
if np.amax(abs(F_offdiag) > tol):
G += np.einsum('ca,ic->ia',F_offdiag[nocc:,nocc:],T1)
G -= np.einsum('ik,ka->ia',F_offdiag[:nocc,:nocc],T1)
return G
def CCSDsingles_fact(F,Eri,T2,T1,nocc,nbas):
#build intermediates according to Stanton et al. JCP 94(6) 1991
F_diag = np.diag(np.diag(F))
Tau_tilde = T2 + 0.50e0*(np.einsum('ia,jb->ijab',T1,T1)-np.einsum('ib,ja->ijab',T1,T1))
Fae = F[nocc:,nocc:] - F_diag[nocc:,nocc:]
Fae -= 0.5e0*(np.einsum('em,ma->ea',F[nocc:,:nocc],T1))
Fae += np.einsum('mf,fema->ea',T1,Eri[nocc:,nocc:,:nocc,nocc:])
Fae -= 0.5e0*np.einsum('mnaf,efmn->ea',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fmi = F[:nocc,:nocc] - F_diag[:nocc,:nocc]
Fmi += 0.5e0*(np.einsum('em,ie->im',F[nocc:,:nocc],T1))
Fmi += np.einsum('ne,iemn->im',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Fmi += 0.5e0*np.einsum('inef,efmn->im',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fme = F[nocc:,:nocc] + np.einsum('nf,efmn->em',T1,Eri[nocc:,nocc:,:nocc,:nocc])
#contract T with intermediates to get RHS of singles equation. (eq 1 in Stanton reference)
G = F[:nocc,nocc:] + np.einsum('ie,ea->ia',T1,Fae)
G -= np.einsum('ma,im->ia',T1,Fmi)
G += np.einsum('imae,em->ia',T2,Fme)
G -= np.einsum('nf,ifna->ia',T1,Eri[:nocc,nocc:,:nocc,nocc:])
G -= 0.5e0*np.einsum('imef,efma->ia',T2,Eri[nocc:,nocc:,:nocc,nocc:])
G -= 0.5e0*np.einsum('mnae,einm->ia',T2,Eri[nocc:,:nocc,:nocc,:nocc])
return G
def CCSDdoubles_fact(F,Eri,T2,T1,nocc,nbas):
#build intermediates according to Stanton et al. JCP 94(6) 1991
F_diag = np.diag(np.diag(F))
Tau_tilde = T2 + 0.50e0*(np.einsum('ia,jb->ijab',T1,T1)-np.einsum('ib,ja->ijab',T1,T1))
Tau = T2 + np.einsum('ia,jb->ijab',T1,T1) - np.einsum('ib,ja->ijab',T1,T1)
#2-index intermediates
Fae = F[nocc:,nocc:] - F_diag[nocc:,nocc:]
Fae -= 0.5e0*(np.einsum('em,ma->ea',F[nocc:,:nocc],T1))
Fae += np.einsum('mf,fema->ea',T1,Eri[nocc:,nocc:,:nocc,nocc:])
Fae -= 0.5e0*np.einsum('mnaf,efmn->ea',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fmi = F[:nocc,:nocc] - F_diag[:nocc,:nocc]
Fmi += 0.5e0*(np.einsum('em,ie->im',F[nocc:,:nocc],T1))
Fmi += np.einsum('ne,iemn->im',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Fmi += 0.5e0*np.einsum('inef,efmn->im',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fme = F[nocc:,:nocc] + np.einsum('nf,efmn->em',T1,Eri[nocc:,nocc:,:nocc,:nocc])
#4-index intermediates
Wijmn = Eri[:nocc,:nocc,:nocc,:nocc] + np.einsum('je,iemn->ijmn',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Wijmn -= np.einsum('ie,jemn->ijmn',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Wijmn += 0.25e0*np.einsum('ijef,efmn->ijmn',Tau,Eri[nocc:,nocc:,:nocc,:nocc])
Wefab = Eri[nocc:,nocc:,nocc:,nocc:] - np.einsum('mb,efam->efab',T1,Eri[nocc:,nocc:,nocc:,:nocc])
Wefab += np.einsum('ma,efbm->efab',T1,Eri[nocc:,nocc:,nocc:,:nocc])
Wefab += 0.25e0*np.einsum('mnab,efmn->efab',Tau,Eri[nocc:,nocc:,:nocc,:nocc])
Wejmb = Eri[nocc:,:nocc,:nocc,nocc:] + np.einsum('jf,efmb->ejmb',T1,Eri[nocc:,nocc:,:nocc,nocc:])
Wejmb -= np.einsum('nb,ejmn->ejmb',T1,Eri[nocc:,:nocc,:nocc,:nocc])
tau1 = np.einsum('jf,nb->jnfb',T1,T1)
Wejmb -= np.einsum('jnfb,efmn->ejmb',(0.5e0*T2+tau1),Eri[nocc:,nocc:,:nocc,:nocc])
#contract T with intermediates to get RHS of singles equation. (eq 2 in Stanton reference)
G = Eri[:nocc,:nocc,nocc:,nocc:] + np.einsum('ijae,eb->ijab',T2,Fae) - 0.5e0*np.einsum('ijae,mb,em->ijab',T2,T1,Fme)
G -= (np.einsum('ijbe,ea->ijab',T2,Fae) - 0.5e0*np.einsum('ijbe,ma,em->ijab',T2,T1,Fme))
G -= (np.einsum('imab,jm->ijab',T2,Fmi) + 0.5e0*np.einsum('imab,je,em->ijab',T2,T1,Fme))
G += (np.einsum('jmab,im->ijab',T2,Fmi) + 0.5e0*np.einsum('jmab,ie,em->ijab',T2,T1,Fme))
G += 0.5e0*(np.einsum('mnab,ijmn->ijab',Tau,Wijmn) + np.einsum('ijef,efab->ijab',Tau,Wefab))
G += (np.einsum('imae,ejmb->ijab',T2,Wejmb) - np.einsum('ie,ma,ejmb->ijab',T1,T1,Eri[nocc:,:nocc,:nocc,nocc:]))
G -= (np.einsum('jmae,eimb->ijab',T2,Wejmb) - np.einsum('je,ma,eimb->ijab',T1,T1,Eri[nocc:,:nocc,:nocc,nocc:]))
G -= (np.einsum('imbe,ejma->ijab',T2,Wejmb) - np.einsum('ie,mb,ejma->ijab',T1,T1,Eri[nocc:,:nocc,:nocc,nocc:]))
G += (np.einsum('jmbe,eima->ijab',T2,Wejmb) - np.einsum('je,mb,eima->ijab',T1,T1,Eri[nocc:,:nocc,:nocc,nocc:]))
G += np.einsum('ie,ejab->ijab',T1,Eri[nocc:,:nocc,nocc:,nocc:])
G -= np.einsum('je,eiab->ijab',T1,Eri[nocc:,:nocc,nocc:,nocc:])
G -= np.einsum('ma,ijmb->ijab',T1,Eri[:nocc,:nocc,:nocc,nocc:])
G += np.einsum('mb,ijma->ijab',T1,Eri[:nocc,:nocc,:nocc,nocc:])
return G
def solveccs(F,G1,T1,nocc,nvirt,x=4.0):
#solve singles amplitude equations
T1new = np.zeros(np.shape(T1),dtype=T1.dtype)
for i in range(nocc):
for a in range(nvirt):
aa = a + nocc
d = (F[i,i] - F[aa,aa])
T1new[i,a] = G1[i,a]/d
#Damp amplitudes to improve convergence
return(T1new/x + T1*(x-1.0)/x)
def GCCSEn(F,Eri,T1,nocc):
#Spin-orbital T1 contribution to the CC energy
eccs = np.einsum('abij,ia,jb',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1)
eccs -= np.einsum('abij,ib,ja',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1)
eccs *= 0.25e0
eccs += np.einsum('ai,ia',F[nocc:,:nocc],T1)
return eccs
#DIIS for singles
def diis_singles_setup(nocc,nvirt,diis_start,diis_dim,typ):
#use direct inversion of the iterative subspace (Pulay Chem Phys Lett 73(390), 1980) to extrapolate CC amplitudes.
#This function sets up the various arrays we need for the extrapolation.
Errors = np.zeros([diis_dim,nocc,nvirt],dtype=typ)
Ts = np.zeros([diis_dim,nocc,nvirt],dtype=typ)
Err_vec = np.zeros([nocc,nvirt],dtype=typ)
return Errors, Ts, Err_vec
def get_singles_Err(F,G,T,nocc,nvirt):
#Calculate the residual for the CC equations at a given value of T amplitudes
Err_vec = np.zeros((nocc,nvirt),dtype=T.dtype)
for i in range(nocc):
for a in range(nvirt):
aa = a + nocc
Err_vec[i,a] = G[i,a]-(F[i,i] - F[aa,aa] )*T[i,a]
error = np.amax(np.absolute(Err_vec))
return error, Err_vec
def diis_singles(diis_start,diis_dim,iteration,Errors,Ts,Told,Err_vec):
#use direct inversion of the iterative subspace (Pulay Chem Phys Lett 73(390), 1980) to accelerate convergence.
#This function performs the actual extrapolation
if (iteration > (diis_start + diis_dim)):
#extrapolate the amplitudes if we're sufficiently far into the CC iterations
#update error and amplitudes for next DIIS cycle. We DON'T want to store the extrapolated amplitudes in self.Ts
Errors = np.roll(Errors,-1,axis=0)
Errors[-1,:,:] = Err_vec
Ts = np.roll(Ts,-1,axis=0)
Ts [-1,:,:] = Told
#solve the DIIS Bc = l linear equation
B = | np.zeros((diis_dim+1,diis_dim+1),dtype=Told.dtype) | numpy.zeros |
import pandas as pd
import numpy as np
import logging
import itertools
def HitCallingFoldThreshold(frame, fold_threshold, assaylist, samplelist, ctrl):
# returns dataframe with hit yes or no
data = frame[samplelist].reindex(assaylist)
pdf = pd.DataFrame(columns = samplelist, index = assaylist)
pdf_quant = pd.DataFrame(columns = samplelist, index = assaylist)
for target in samplelist:
for guide in assaylist:
# Hitcalling
fold = np.median(data.loc[guide, target])/np.median(frame.loc[guide, ctrl])
pdf_quant.loc[guide,target] = fold
if fold > fold_threshold:
pdf.loc[guide, target] = 'positive'
else:
pdf.loc[guide, target] = 'negative'
return pdf, pdf_quant
def getCtrlMean(df,ctrl,args):
ctrl_values = df[ctrl].tolist()
assaylist = df.index.values.tolist()
if ctrl == args.cpcctrl:
# Exclude no_crRNA control for CPC
noCRNAidx = assaylist.index('no-crRNA')
del ctrl_values[noCRNAidx]
mean = np.mean(ctrl_values)
std = np.std(ctrl_values)
return(mean, std)
##### Process controls
def CheckOutliers(df,ctrl,direction, pass_ntc, pass_cpc):
""" Compare negative/positive controls with each other using the Z-score.
The NTC consists of using molecular-grade nuclease-free water in RT-PCR reactions instead of RNA. The NTC reactions for all primer and crRNA sets should not generate any signal. An assay is defined as positive, if it falls outside of three standard deviations of the mean of all NTCs. The NTC with the no-crRNA assay is only an outlier, if the signal is more than three standard deviations above the mean. If it is lower, no error occurs, since the background signal is generally lower for the no-crRNA assay. If any of the NTC reactions (RT-PCR) generates an outlying signal, sample contamination may have occurred. This will invalidate the assay of the positive viral marker.
The Combined RVP Positive Control consists of nine components (partial synthetic nucleic acids), combined as directed in the Reagent and Controls Preparation section to make the final control sample for the reaction. Combined RVP Positive Control consists of partial synthetic targets at 1e3 copy/ul. The Combined RVP Positive Control is prepared alongside each batch of clinical specimens and should be positive for all nine targets in the CARMEN RVP Assay.
If the CARMEN RVP PC generates a negative result for any target, this indicates a possible problem with primer mixes used in RT-PCR reactions or crRNA-cas13 detection and will invalidate the assay with the negative CPC. The assay is defined as negative, if it falls outside of three standard deviations of the mean of all CPCs. The CPC with the no-crRNA assay is expected to be negative, is excluded from the CPC mean, and will be invalid if the signal falls within three standard deviations of the mean of the CPCs.
"""
ctrl_values = df[ctrl].tolist()
logging.info("ctrl values: {} - {}".format(ctrl_values, len(ctrl_values)))
assaylist = df.index.values.tolist()
logging.info("assaylist: {} - {}".format(assaylist, len(assaylist)))
outliers = []
threshold = 3
mean = np.mean(ctrl_values)
std = np.std(ctrl_values)
for y in range(len(ctrl_values)):
# Formula for Z score = (Observation — Mean)/Standard Deviation
z_score= (ctrl_values[y] - mean)/std
logging.info("ctrl: {} - assay {}".format(ctrl_values[y], assaylist[y]))
# Check no crRNA and invalidate run if no-crRNA is positive for the CPC or the NTC
if assaylist[y] == 'no-crRNA' and direction == 'positive':
if np.abs(z_score) < threshold:
# this is a problem, because it should be negative. The whole assay will be invalid.
logging.warning("Run is invalid, because the CPC is not below threshold for no-crRNA (not negative enaugh).")
pass_cpc = False
elif assaylist[y] == 'no-crRNA' and direction == 'negative':
# value for no-crRNA in NTC has to be around the mean or lower than other negative samples, else:
if ctrl_values[y] > mean + threshold * std:
logging.warning("Run is invalid, because the NTC is above the threshold for no-crRNA")
pass_ntc = False
# Check that RNaseP is positive for CPC
elif assaylist[y] == 'RNaseP' and direction == 'positive':
if np.abs(z_score) > threshold:
# this is a problem, because it should be positive and have a small z_score (thus similiar to the others). The whole assay will be invalid.
logging.warning("Run is invalid, because the CPC is above the threshold for RNAseP.")
pass_cpc = False
# Check that RNaseP is negative for NTC
elif assaylist[y] == 'RNaseP' and direction == 'negative':
# value for RNaseP in NTC has to be around the mean of all the other negative samples, else:
if np.abs(z_score) > threshold:
logging.warning("Run is invalid, because the NTC is positive for RNaseP")
pass_ntc = False
else:
if | np.abs(z_score) | numpy.abs |
import os
import pandas as pd, numpy as np
from sklearn.metrics import r2_score, mean_squared_error, auc
from scipy.stats import pearsonr
from itertools import chain
from glob import glob
import pdb
def score(mols, nn, return_mse=False):
r2=r2_score(mols,nn)
rmse=(mean_squared_error(mols, nn, squared=False))/(max(mols)-min(mols))
mse=mean_squared_error(mols, nn, squared=True)
auc_diff=((auc(np.arange(len(mols)),mols)-auc(np.arange(len(nn)),nn))/auc(np.arange(len(mols)),mols))
pearson=pearsonr(mols,nn)
return (max(0,r2), rmse, auc_diff, pearson[0], mse) if return_mse==True else (max(0,r2), rmse, auc_diff, pearson[0])
def gen_perf_metrics(data,seasonal=False,exception=[]):
groups=data.groupby(['County','Year'])
r2s,rmses,auc_diffs,pearsons=list(),list(),list(),list()
probs=list()
for group in groups:
if group[0] not in exception:
mols=group[1]["MoLS"]
nn=group[1]["Neural Network"]
r2,rmse,auc_diff,pearson=score(mols,nn)
r2s.append(r2)
rmses.append(rmse)
auc_diffs.append(auc_diff)
pearsons.append(pearson)
r2s=np.asarray(r2s)
r2s = r2s[np.isfinite(r2s)]
rmses=np.asarray(rmses)
rmses = rmses[np.isfinite(rmses)]
auc_diffs=np.asarray(auc_diffs)
auc_diffs = auc_diffs[np.isfinite(auc_diffs)]
pearsons=np.asarray(pearsons)
pearsons = pearsons[np.isfinite(pearsons)]
to_return=[ | np.mean(r2s) | numpy.mean |
import unittest
import dolphindb as ddb
import dolphindb.settings as keys
import numpy as np
from numpy.testing import *
from setup import HOST, PORT, WORK_DIR
import pandas as pd
class DBInfo:
dfsDBName = 'dfs://testDatabase'
diskDBName = WORK_DIR + '/testDatabase'
def existsDB(dbName):
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
return s.run("existsDatabase('{db}')".format(db=dbName))
def dropDB(dbName):
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
s.run("dropDatabase('{db}')".format(db=dbName))
class DatabaseTest(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
@classmethod
def tearDown(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
def test_create_dfs_database_range_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.RANGE, partitions=[1, 11, 21], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([1, 11, 21], dtype=np.int32),
'partitionSites': None,
'partitionTypeName':'RANGE',
'partitionType': 2}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id': np.arange(1, 21), 'val': np.repeat(1, 20)})
t = self.s.table(data=df, tableAliasName='t')
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['id'], np.arange(1, 21))
assert_array_equal(re['val'], np.repeat(1, 20))
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['id'], np.arange(1, 21))
assert_array_equal(re['val'], np.repeat(1, 20))
def test_create_dfs_database_hash_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.HASH, partitions=[keys.DT_INT, 2], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': 2,
'partitionSites': None,
'partitionTypeName':'HASH',
'partitionType': 5}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id':[1,2,3,4,5], 'val':[10, 20, 30, 40, 50]})
t = self.s.table(data=df)
pt = db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id')
pt.append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(re['id']), df['id'])
assert_array_equal(np.sort(re['val']), df['val'])
dt = db.createTable(table=t, tableName='dt')
dt.append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(re['id']), df['id'])
assert_array_equal(np.sort(re['val']), df['val'])
def test_create_dfs_database_value_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.VALUE, partitions=[1, 2, 3], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([3, 1, 2], dtype=np.int32),
'partitionSites': None,
'partitionTypeName':'VALUE',
'partitionType': 1}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id':[1, 2, 3, 1, 2, 3], 'val':[11, 12, 13, 14, 15, 16]})
t = self.s.table(data=df)
pt = db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(df['id']), np.sort(re['id']))
assert_array_equal(np.sort(df['val']), np.sort(re['val']))
dt = db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(df['id']), np.sort(re['id']))
assert_array_equal(np.sort(df['val']), np.sort(re['val']))
def test_create_dfs_database_list_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.LIST, partitions=[['IBM', 'ORCL', 'MSFT'], ['GOOG', 'FB']],
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([np.array(['IBM', 'ORCL', 'MSFT']), np.array(['GOOG', 'FB'])]),
'partitionSites': None,
'partitionTypeName':'LIST',
'partitionType': 3}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'][0], dct['partitionSchema'][0])
assert_array_equal(re['partitionSchema'][1], dct['partitionSchema'][1])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'sym':['IBM', 'ORCL', 'MSFT', 'GOOG', 'FB'], 'val':[1,2,3,4,5]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='sym').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_date(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
dates=np.array(pd.date_range(start='20120101', end='20120110'), dtype="datetime64[D]")
db = self.s.database('db', partitionType=keys.VALUE, partitions=dates,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': np.array(pd.date_range(start='20120101', end='20120110'), dtype="datetime64[D]"),
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
df = pd.DataFrame({'datetime':np.array(['2012-01-01T00:00:00', '2012-01-02T00:00:00'], dtype='datetime64'), 'sym':['AA', 'BB'], 'val':[1,2]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='datetime').append(t)
re = self.s.run("schema(loadTable('{dbPath}', 'pt')).colDefs".format(dbPath=DBInfo.dfsDBName))
assert_array_equal(re['name'], ['datetime', 'sym', 'val'])
assert_array_equal(re['typeString'], ['NANOTIMESTAMP', 'STRING', 'LONG'])
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_month(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
months=np.array(pd.date_range(start='2012-01', end='2012-10', freq="M"), dtype="datetime64[M]")
print(months)
db = self.s.database('db', partitionType=keys.VALUE, partitions=months,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': months,
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
df = pd.DataFrame({'date': np.array(['2012-01-01', '2012-02-01', '2012-05-01', '2012-06-01'], dtype="datetime64"), 'val':[1,2,3,4]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='date').append(t)
scm = self.s.run("schema(loadTable('{dbPath}', 'pt')).colDefs".format(dbPath=DBInfo.dfsDBName))
assert_array_equal(scm['name'], ['date', 'val'])
assert_array_equal(scm['typeString'], ['NANOTIMESTAMP', 'LONG'])
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['date'], df['date'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_datehour(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
times=np.array(['2012-01-01T00:00', '2012-01-01T01:00', '2012-01-01T02:00'], dtype="datetime64")
self.s.database('db', partitionType=keys.VALUE, partitions=times,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': times,
'partitionSites': None
}
# re = self.s.run("schema(db)")
# print(re)
# self.assertEqual(re['databaseDir'], dct['databaseDir'])
# self.assertEqual(re['partitionType'], dct['partitionType'])
# assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
# script = '''
# dbName="dfs://testDatabase"
# db=database(dbName)
# t=table([2012.01.01T00:00:00, 2012.01.01T01:00:00, 2012.01.01T02:00:00] as time)
# pt=db.createPartitionedTable(t, `pt, `time).append!(t)
# exec count(*) from pt
# '''
# num = self.s.run(script)
# self.assertEqual(num, 3)
def test_create_dfs_database_value_partition_np_arange_date(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
dates=np.arange('2012-01-01', '2012-01-10', dtype='datetime64[D]')
db = self.s.database('db', partitionType=keys.VALUE, partitions=dates,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': dates,
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
df = pd.DataFrame({'datetime':np.array(['2012-01-01T00:00:00', '2012-01-02T00:00:00'], dtype='datetime64'), 'sym':['AA', 'BB'], 'val':[1,2]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='datetime').append(t)
re = self.s.run("schema(loadTable('{dbPath}', 'pt')).colDefs".format(dbPath=DBInfo.dfsDBName))
assert_array_equal(re['name'], ['datetime', 'sym', 'val'])
assert_array_equal(re['typeString'], ['NANOTIMESTAMP', 'STRING', 'LONG'])
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_arange_month(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
months=np.arange('2012-01', '2012-10', dtype='datetime64[M]')
db = self.s.database('db', partitionType=keys.VALUE, partitions=months,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': months,
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': months,
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal( | np.sort(re['partitionSchema']) | numpy.sort |
import numpy as np
import scipy.constants as cst
from scipy.interpolate import interp1d
import os
def laser_gain_step(J_in, g_in):
"""
Computes one iteration of Frantz-Nodvik gain simulation,
and returns the output normalised fluence and the gain left
in the crystal.
J is a number, g is a number
"""
J_out = np.log( np.exp(g_in) * ( np.exp(J_in)-1 ) +1 )
g_left = g_in - ( J_out - J_in )
return J_out, g_left
def laser_gain_crystal_length(J_in, g_in):
"""
Computes the laser gain over the length of the crystal, and returns
the output normalised fluence and the gain left in the crystal.
J is a number, g is a 1D array
"""
g_left = np.zeros_like(g_in)
J_out = np.copy(J_in)
for idx in np.arange(g_in.size):
J_out, g_left[idx] = laser_gain_step(J_out, g_in[idx])
return J_out, g_left
def laser_gain_crystal_section(J_in, g_in):
"""
Computes the laser gain over the length and spatial profile of the crystal,
and returns the output normalised fluence and the gain left in the crystal.
J is a 2D array, g is a 3D array
"""
n3, n2, n1 = | np.shape(g_in) | numpy.shape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 14:26:14 2019
@author: ranahamzaintisar
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import svd
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import accuracy_score as acc
import random
from sklearn.utils import shuffle
'''functions built'''
## Function to split into training and test dataset and create target vector z:
def training_test_split(dataset):
split_data = np.array_split(dataset,10)
training =[]
test = []
for i in range(len(split_data)):
np.random.shuffle(split_data[i])
train_test_split = np.array_split(split_data[i],2)
for item in train_test_split[0]:
if i == 0:
new = np.append(item,10) #class label 10 for digit 0
training.append(new)
else:
new = np.append(item,i) # class labels for other digits
training.append(new)
for item in train_test_split[1]:
if i == 0:
new = np.append(item,10)
test.append(new)
else:
new = np.append(item,i)
test.append(new)
# Training dataset with target vector Z
training_dataset = pd.DataFrame(training)
training_dataset[240] = training_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
training_dataset = pd.get_dummies(training_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
training_dataset = training_dataset.loc[:, training_dataset.nunique(axis=0) > 1]
# Test dataset with target vector Z
test_dataset = pd.DataFrame(test)
test_dataset[240] = test_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
test_dataset = pd.get_dummies(test_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
test_dataset = test_dataset.loc[:, test_dataset.nunique(axis=0) > 1]
return training_dataset , test_dataset
## function to seperate feature vectors from binary target vectors
def split_features_labels(data):
label_col = [x for x in data.columns if isinstance(x, str)]
return (data.drop(label_col, axis=1),
data[label_col])
def split_features_labels_cv(data):
label_col = [x for x in data.columns if x>239]
return (data.drop(label_col, axis=1),
data[label_col])
## function to center the data
def center(df):
cols = df.columns
for field in cols:
mean_field = df[field].mean()
# account for constant columns
if np.all(df[field] - mean_field != 0):
df.loc[:, field] = (df[field] - mean_field)
return df
## Function to find coorelation matrix of the centered data point:
def coor_c(df):
df_matrix = df.as_matrix()
df_matrix_transpose = df_matrix.transpose()
coor_matrix = np.dot(df_matrix_transpose,df_matrix)
n = coor_matrix.shape[1]
normal_coor_matrix = np.multiply(coor_matrix,1/n)
return normal_coor_matrix
##Function Computing the eigenvalues and right eigenvectors of coorelation matrix.
#and returning them in decending order
def eigen(coor_matrix):
#compute the eigen vector and values
eig_val_cov, eig_vec_cov = np.linalg.eig(coorelation_matrix_train )
## sort eigen vector and eigen values from high to low
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_val_cov[i]), eig_vec_cov[:,i]) for i in range(len(eig_val_cov))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
#seperate the sorted pair
eigen_val_decending =[]
for i in eig_pairs:
eigen_val_decending.append(i[0])
eigen_vec_decending = []
for i in eig_pairs:
eigen_vec_decending.append(i[1])
return eigen_val_decending,eigen_vec_decending
## function to reaturn number of desiered PC features and padded with Bias
def pc_features(eigen_vec,eigen_val,centered_data,num_pc):
s_pc = num_pc
pc_vectors = np.stack(eigen_vec[0:s_pc],axis=0)
pc_eigen_val = np.stack(eigen_val[0:s_pc],axis=0)
pc_features = np.dot(pc_vectors,centered_data.as_matrix().transpose()).transpose()
#add bias to the features:
feat_df= pd.DataFrame(pc_features)
bias = np.full(pc_features.shape[0],1)
feat_df['bias']=bias
features = feat_df.as_matrix()
return features,pc_eigen_val
## Ridge regression function using formula 39 ML notes
def ridge_reg(features,target,a):
##computing the SVD
semi_def_matrix = np.dot(features,features.transpose())
target_matrix = target.as_matrix()
num_data=semi_def_matrix.shape[0]
identity_matrix = np.identity(num_data)
alpha = a
alpha_sq= alpha**2
r_mat = alpha_sq*identity_matrix
ridge_matrix = semi_def_matrix+r_mat
ridge_matrix_inv = np.linalg.inv(ridge_matrix)
wopt_inv= np.matmul(np.matmul(ridge_matrix_inv,features).transpose(),target_matrix)
wopt = wopt_inv.transpose()
## use the wopt to find hypothesis vectors
hypothesis_matrix = np.matmul(wopt,features.transpose()).transpose()
## use hypothesis vectors to find prediction
prediction = []
for row in hypothesis_matrix:
pred = np.zeros_like(row,dtype='int')
index = np.argmax(row)
pred[index]=1
prediction.append(pred)
df_pred = pd.DataFrame(prediction)
pred_matrix = df_pred.as_matrix()
return pred_matrix , target_matrix
def misclass_rate(pred,actual):
return 1-((sum(np.array([np.argmax(a) for a in pred])==np.array([np.argmax(a) for a in actual]))).astype("float")/len(actual))
def meansq_error(pred,actual):
return np.mean((pred - actual)**2)
##cross validation with alpha
def cv_ridge(dataset,no_fold,tune_grid,numdim):
#take the training dataframe with the target vectors
cv_df = dataset.copy()
# make k fold splits
a = []
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k=5
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_val)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,numdim)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,numdim)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
def cv_ridge_kmeans(dataset,no_fold,tune_grid,cnum):
cv_df = dataset.copy()
# make k fold splits
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
# use the Kmeans for feature selection
new_feat = kmeans_algorithm(feature_train.as_matrix(),cnum)
new_feat_v = kmeans_algorithm(feature_val.as_matrix(),cnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(new_feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(new_feat_v,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
### crossvalidation with feature number(PCA features)
def cv_features(dataset,no_fold,tune_grid,alpha):
cv_df = dataset.copy()
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for dimnum in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_val)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,dimnum)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,dimnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
### exploring with K-means feature
def kmeans_algorithm (dataframe, n):
# create a copy of the 2-d image vecotr array
data_copy = dataframe.copy()
# shuffe 2-d image vector arrays along the first axis(row)
np.random.shuffle(data_copy)
# take the first n image vector arrays, from the shuffeld 2-d image vector array, as initial random codebook vector assignments
codebook = data_copy[:n]
# Compute the eucledian disance between vector arrays in dataset and the randomly selected codebook vectors
# substract each codebook vector from the dataset image vectors.
# numpy broadcasting allows to substract all dataset image vectors form the codebook vector array even if their shape dont match.
# Step 1: extend the codebook vector array by adding a new dimension in between the two existing dimensions
# extending a new dimension allows us to use the rule of broadcasting- array of unequal dimension are when compatable if one the array dimension is 1 here the extended dimension is 1.
extend_codebook = codebook[:,np.newaxis]
# Step 2: substract extended codebook vector array form image vector array
difference = dataset - extend_codebook
#find the absolute distance from the difference, abs distance = ||difference||
abs_dist_extended = | np.sqrt((difference)**2) | numpy.sqrt |
import numpy as np
import matplotlib.pyplot as plt
class TriggersSubclass(object):
pass
class Triggers(object):
def __init__(self, TriggerSet=None, FlashSet=None, PockelsSet=None, IISet=None, CMOSSet=None, ADCSet=None, PlotOn=0):
"""Trigger = TriggersMPTS.Triggers([1,0], [1,100],[1,3e6,200,20,20,1000], [1,15, 10, 5,5,0], [1,0,10,10,5,2],[1,0,0.2e5], 1)
M.Kantor version on 28.07.2017
===================================
1. Return pulse timing of measurement cycle for multipass TS diagnostic in AUG
2. Time sequences of the pulses are calculated for 8 channels from input data.
3. The channels are activated in accordance to the operation mode
4. All times in the measurement cycle are counted from the first Pockels cell (PC)
trigger in laser burst
InTime - absolute times of unit input
OutTime - absolute times of unit output
Delay - time delay between units outputs (Start) and unit input trigger
========================
TriggerSet = [TriggerMode,AUGShotStart]
FlashSet = [FlashOn, FlashWidth (us)]
[PockelsOn,PockelsFirstTime (us),PockelsPeriod (us),PockelsN,
PockelsWidth (us),PockelsRetard (us)]
[IIOn,IIBeforeN, IIAfterN, IIGateLaserWidth (us), IIGatePlasmaWidth
(us),IIGateLaserFiberDelay (us)]
[CMOSLaserOn,CMOSPlasmaOn, CMOSBeforeN, CMOSAfterN, CMOSTrigWidth (us), CMOSDeltaGate (us)]
================================
TriggerSet = [TriggerMode,AUGShotStart]
TriggerMode - 1 - Burst is started from AUG, T1 and T2 modes
0 - start from Simmer timer (manual start) (M1 and M2)
-1 - Start from Burst trigger (M2 mode without laser)
AUGShotStart, time of AUG T06 pulse (us) which triggers Burst timer
FlashSet - array.
FlashSet = [FlashOn, FlashWidth (us)]
FlashOn - Enable/disable of flash triggers
FlashWidth - the width of the flash trigger (us)
PockelsSet - array:
[PockelsOn,PockelsFirstTime (us),PockelsPeriod (us),PockelsN, PockelsWidth (us),PockelsRetard (us)]
PockelsOn - Enable/disable of Pockels triggers
PockelsFirstTime - time of the first PC pulse, us
PockelsPeriod period of this pulses (us),
PockelsN the number of PC pulses,
PockelsWidth - duration of PC pulses (us),
PockelsRetard - Delay of the 1st Pockels pulse after Flash
IISet - array
[IIOn,IIBeforeN, IIAfterN, IIGateLaserWidth (us), IIGatePlasmaWidth (us),IIGateLaserFiberDelay (us)]
IIOn - Enable/disable of the II triggers
IIBeforeN - the number of twin pulses before PC pulses
IIAfterN - the number of twin pulses after PC pulses
IIGateLaserWidth - GateLaser, duration of II gates during laser pulses(us),
the first pulse in twins
IIGatePlasmaWidth - GatePlasma, duration of II gate between laser pulses for pplasma light measurements (us)
the second pulse in twins
IIGateLaserFiberDelay - fine delay of II gates triggered by the ruby laser via fiber for fine sinc of laser pulses.
The delay is also applicable for triggers from IIGatePlasma
CMOSSet - array, settings triggeres for CMOS cameras
[CMOSLaserOn,CMOSPlasmaOn, CMOSBeforeN, CMOSAfterN, CMOSTrigWidth (us), CMOSDeltaGate (us)]
CMOSLaserOn, CameraPlasmaOn - Enable/Disable switch of Camera1 and Camera2
CMOSBeforeN - the number of twin CMOS pulses before the first II gate
CMOSAfterN - the number of twin CMOS pulses after the last II gate
CMOSTrigWidth - duration of trigger pulses of CMOS cameras, (us)
CMOSDeltaGate - Difference between CMOS and II gates,CMOSDeltaGate> = 0, us
CMOSDeltaGate is set in the cameras
If two camears are active than the CMOSLaser trigger is generated
for the 1st camera and the CMOSPlasma is generated for the 2nd camera
Otherwise, all pulses are generated for a single camera
ADCSet array. The zero time is synchronized with the 1st PC pulses in the bursts
[ADCOn,StartADC,StopADC]
if 1 then ADCSet is calculated from the pulse sequence
Trigger times of Cahrge and Simmer timers are defined in Timers.m
==========================================================================
Data range and defaults settings for timers [Min,Step,Max,Default]
==========================================================================
Operation modes:
T-modes - TS is triggered from AUG
AUG triggeres:
TZ60 - pretrigger at -60 ms comes to Charge timer
TS06 - start of plasma shot comes to Burst timer
SimmerTimer is triggered by delayed output of the Carge timer
T1 mode: All three timers are disabled.
Operator manually enables Charge timer before plasma shot
All timers are desibled after plasma shot
T2 mode: Charge timer is enabled, Simmer and Burst are disabled
Simmer and Burst are enabled by Charge trigger and disabled after
plasma shot
M-modes - TS is triggered by operator without any AUG triggeres.
Laser charged by operator and Charge output does not trigger Simmer timer
Operation with laser is started from Simmer timer which triggeres Burst
timer after some delay
Operation without laser is started from Burst timer
which cannot be triggered by Simmer timer.
All timers are enabled manually
M1 mode: Manual trigger of the diagnostic without plasma
(Rayleigh calibration, stray light measurements)
M2 mode: Manual trigger of the diagnostic without laser
(spectral calibrations,alignment of the cameras)
M3 mode: Manual trigger of ruby laser
without CMOS cameras and image intensifier.
"""
# ====================================================
# Default values:
SimmerMinWait = 2e6 # us minimal delay between simmer and flash triggers
TriggerModeDef = 1 # 1 - Burst is started from AUG TS06 pulse,
# 0 - start from Simmer timer (manual start)
# Laser defaults:
FlashOnDef = 1
PockelsFirstTimeDef = 0 # us, Absolute time of the 1st Pockels pulse
PockelsWidthDef = 10 # us
PockelsRetardDef = 1000 # us, delay of the 1st Pockels pulse from the flash start
PockelsNDef = 10
PockelsPeriodDef = 200 # us
# image intensifier defaults:
IIBeforeNDef = 10
IIAfterNDef = 20
IIGateLaserWidthDef = 2
IIGatePlasmaWidthDef = 20
IIPlasmaRetardWidthDef = 5 # us
# CMOS defaults
CMOSBeforeNDef = 10
CMOSAfterNDef = 20
CMOSTrigWidthDef = 5 # us
CMOSDeltaGateDef = 2 # us
CMOSLaserGateDef = IIGateLaserWidthDef + CMOSDeltaGateDef
CMOSPlasmaGateDef = IIGatePlasmaWidthDef + CMOSDeltaGateDef
IIOnDef = 1
CMOSOnDef = [1, 1]
AUGShotStartDef = PockelsFirstTimeDef - (IIBeforeNDef + CMOSBeforeNDef) * PockelsPeriodDef
if TriggerSet is None:
AUGShotStart = AUGShotStartDef # trigger from AUG
TriggerModeRange = [-1, 1, 1, TriggerModeDef]
# Burst, is triggered from AUG or Simmer timer.
# triggers Flash, IIGatePlasma,CMOSPlasmaOnRange and ADC
# absolute time of output pulses from Burst timer:
if TriggerModeDef:
# Start from AUGShotStart
BurstInTimeDef = AUGShotStartDef
else:
# Start form Simmer
BurstInTimeDef = 0
BurstDelayDef = SimmerMinWait
BurstOutTimeDef = BurstInTimeDef + BurstDelayDef
BurstWidthDef = 100
BurstPeriodDef = 3e5
BurstNDef = 1
BurstPeriodRange = [1e5, 1e5, 1e7, BurstPeriodDef]
BurstWidthRange = [100, 100, 100, BurstWidthDef]
BurstNRange = [1, 1, 4, BurstNDef]
# Laser times
# Flash, pulses starting flash discharge. One per a pulse from Burst timer
# absolute time of output pulses from Burst trigger:
FlashOutTimeDef = PockelsFirstTimeDef - PockelsRetardDef
# Delay between output and input triggers:
FlashDelayDef = FlashOutTimeDef - BurstOutTimeDef
FlashInTimeDef = BurstOutTimeDef
FlashOnRange = [0, 1, 1, FlashOnDef]
FlashDelayRange = [0, 10, 10000, FlashDelayDef] # Delay of Flash output
FlashPeriodRange = [0, 0, 0, 0]
FlashWidthRange = [50, 50, 200, 100]
FlashNRange = [1, 1, 1, 1]
# FlashBool, Gate of all Pockels pulses. Triggered by Flash
# absolute time of output pulses from Flash trigger:
# Delay between output and input triggers:
# Pockels, Gates of Pockels cell. Reapeted in each burst. Triggered by Flash
# absolute time of the 1st output pulse from Pockels trigger:
PockelsOnRange = [0, 1, 1, FlashOnDef] # Delay between output and input triggers:
PockelsRetardDef = PockelsFirstTimeDef - FlashInTimeDef
PockelsRetardRange = [0, 1, 10000, PockelsRetardDef]
PockelsPeriodRange = [50, 25, 1000, PockelsPeriodDef]
PockelsWidthRange = [1, 1, 30, PockelsWidthDef]
PockelsNRange = [0, 1, 100, PockelsNDef]
# II times:
# IIPlasmaRetard, Trigger of IIGatesLaser. Triggered by IIGatePlasma
# absolute time of output pulse from IIPlasmaRetard:
# Delay between starts of plasma and laser II gates:
IIPlasmaRetardDelayDef = np.fix((PockelsPeriodDef + IIGatePlasmaWidthDef - IIGateLaserWidthDef) / 2) # us
IIPlasmaRetardDelayRange = [0, 0.1, 2000, IIPlasmaRetardDelayDef]
IIPlasmaRetardPeriodRange = [0, 0, 0, 0]
IIPlasmaRetardWidthRange = [1, 1, 5, IIPlasmaRetardWidthDef]
# IIGatePlasma, Gates of image intensifier between pockels gates. Triggered by Burst timer
# absolute time of output pulses from IIGatePlasma:
IIGatePlasmaOutTimeDef = PockelsFirstTimeDef + PockelsWidthDef - IIGateLaserWidthDef - IIBeforeNDef * PockelsPeriodDef - IIPlasmaRetardDelayDef
# Delay between output and input triggers:
IIGatePlasmaDelayDef = IIGatePlasmaOutTimeDef - BurstOutTimeDef
IIGatePlasmaPeriodRange = PockelsPeriodRange
IIGatePlasmaWidthRange = [1, 1, 100, IIGatePlasmaWidthDef] # formulas
IIGatePlasmaNRange = [0, 1, 100, PockelsNDef + IIBeforeNDef + IIAfterNDef]
# IIGateLaser, Gate of image intensifier triggered by IIPlasmaRetard or
# external laser trigger
# absolute time of output pulses from IIGateLaser:
# IIGateLaserOutTimeDef = IIPlasmaRetardOutTimeDef
# Delay between output and input triggers:
IIGateLaserPeriodRange = [0, 0, 0, 0]
IIGateLaserWidthRange = [0.1, 0.1, 10, IIGateLaserWidthDef]
# Sum of IIGateLaser and IIGatePlasma
# IIGate = IIGateLaser | IIGatePlasma
# CMOS times:
# CMOSLaser trigger of Laser CMOS
# absolute time of output pulse from CMOSLaser:
CMOSLaserOutTimeDef = PockelsFirstTimeDef + PockelsWidthDef - IIGateLaserWidthDef - IIBeforeNDef * PockelsPeriodDef
# Delay between output and input triggers:
CMOSLaserDelayDef = np.fix((PockelsPeriodDef + CMOSPlasmaGateDef - CMOSLaserGateDef) / 2) # us
CMOSLaserPeriodRange = [0, 0, 0, 0]
# CMOSPlasma, Pulses opening Plasma CMOS camera and triggered by Burst timer:
# The exposure time is set by the camera software
# absolute time of output pulse from CMOSPlasma:
CMOSPlasmaOutTimeDef = CMOSLaserOutTimeDef - CMOSLaserDelayDef # wrong?
CMOSPlasmaOutTimeDef = IIGatePlasmaOutTimeDef - IIGatePlasmaDelayDef - np.fix(CMOSDeltaGateDef / 2) - CMOSTrigWidthDef
# Delay between output and input triggers:
CMOSPlasmaNRange = [0, 1, 100, PockelsNDef + IIBeforeNDef + IIAfterNDef + CMOSBeforeNDef + CMOSAfterNDef]
# ADC
# absolute time of output pulses from ADC:
ADCStartDef = min([FlashInTimeDef, IIGatePlasmaOutTimeDef, CMOSPlasmaOutTimeDef])
ADCDelayDef = ADCStartDef - BurstOutTimeDef
ADCWidthDef = (PockelsNDef + IIBeforeNDef + IIAfterNDef + CMOSBeforeNDef + CMOSAfterNDef) * PockelsPeriodDef
ADCOnRange = [0, 1, 1, 1]
ADCStartRange = [-5000, 10, 0, ADCDelayDef]
ADCPeriodRange = [0, 0, 0, 0]
ADCWidthRange = [100, 100, 50000, ADCWidthDef]
ADCNRange = FlashNRange
# end Default values
# ==============================
# ==========================================================================
# Read input data:
# ==========================================================================
# Readout Pockels ====================================== =
PockelsOn = PockelsOnRange[3]
PockelsFirstTime = PockelsFirstTimeDef
PockelsPeriod = PockelsPeriodRange[3]
PockelsWidth = PockelsWidthRange[3]
PockelsN = PockelsNRange[3]
PockelsRetard = PockelsRetardRange[3]
if PockelsSet is not None:
PockelsOn = PockelsSet[0]
PockelsFirstTime = PockelsSet[1]
PockelsPeriod = PockelsSet[2]
PockelsN = PockelsSet[3]
PockelsWidth = PockelsSet[4]
PockelsRetard = PockelsSet[5]
# Readout Flash ======================================
FlashOn = FlashOnRange[3]
FlashDelay = FlashDelayRange[3]
FlashPeriod = FlashPeriodRange[3]
FlashWidth = FlashWidthRange[3]
FlashN = FlashNRange[3]
if FlashSet is not None:
FlashOn = FlashSet[0]
FlashWidth = FlashSet[1]
# Readout II ====================================== =
# IIGatePlasma:
IIOn = IIOnDef
IIGatePlasmaOn = IIOn
IIGatePlasmaPeriod = IIGatePlasmaPeriodRange[3]
IIGatePlasmaWidth = IIGatePlasmaWidthRange[3]
IIGatePlasmaN = IIGatePlasmaNRange[3]
IIBeforeN = IIBeforeNDef
IIAfterN = IIAfterNDef
IIPlasmaRetardOn = 1
IIPlasmaRetardWidth = IIPlasmaRetardWidthRange[3]
IIPlasmaRetardDelay = IIPlasmaRetardDelayRange[3]
IIPlasmaRetardN = 1
IIPlasmaRetardPeriod = IIPlasmaRetardPeriodRange[3]
IIGateLaserOn = IIOn
IIGateLaserWidth = IIGateLaserWidthRange[3]
IIGateLaserPeriod = IIGateLaserPeriodRange[3]
IIGateLaserN = 1
if IISet is not None:
IIOn = IISet[0]
IIGatePlasmaOn = IIOn
IIGateLaserOn = IIOn
IIBeforeN = IISet[1]
IIAfterN = IISet[2]
IIGateLaserWidth = IISet[3]
IIGatePlasmaWidth = IISet[4]
IIGateLaserFiberDelay = IISet[5]
IIGatePlasmaPeriod = PockelsPeriod
IIGatePlasmaN = IIBeforeN + IIAfterN + PockelsN
IIPlasmaRetardDelay = np.fix((PockelsPeriod + IIGatePlasmaWidth - IIGateLaserWidth) / 2) # us
# IIPlasmaRetard
# Readout CMOS =======================================
# CMOSPlasma
CMOSLaserOn = CMOSOnDef[0]
CMOSPlasmaOn = CMOSOnDef[1]
CMOSPlasmaPeriod = PockelsPeriodRange[3]
CMOSPlasmaN = CMOSPlasmaNRange[3]
CMOSLaserN = 1
CMOSLaserPeriod = CMOSLaserPeriodRange[3]
CMOSTrigWidth = CMOSTrigWidthDef
CMOSDeltaGate = CMOSDeltaGateDef
CMOSBeforeN = CMOSBeforeNDef
CMOSAfterN = CMOSAfterNDef
CMOSLaserGate = IIGateLaserWidth + CMOSDeltaGate
CMOSPlasmaGate = IIGatePlasmaWidth + CMOSDeltaGate
# Delay between output and input triggers:
if CMOSSet is not None:
CMOSLaserOn = CMOSSet[0]
CMOSPlasmaOn = CMOSSet[1]
CMOSBeforeN = CMOSSet[2]
CMOSAfterN = CMOSSet[3]
CMOSTrigWidth = CMOSSet[4]
CMOSDeltaGate = CMOSSet[5]
CMOSLaserGate = IIGateLaserWidth + CMOSDeltaGate
CMOSPlasmaGate = IIGatePlasmaWidth + CMOSDeltaGate
CMOSPlasmaN = CMOSBeforeN + CMOSAfterN + IIBeforeN + IIAfterN + PockelsN
CMOSPlasmaPeriod = PockelsPeriod
if not (CMOSLaserOn and CMOSPlasmaOn):
CMOSMaxWidth = max([CMOSLaserGate, CMOSPlasmaGate])
CMOSLaserGate = CMOSMaxWidth
CMOSPlasmaGate = CMOSMaxWidth
# Readout ADC ============================================
ADCOn = ADCOnRange[3]
ADCStart = ADCStartRange[3]
ADCPeriod = ADCPeriodRange[3]
ADCWidth = ADCWidthRange[3]
ADCN = ADCNRange[3]
ADCStart = ADCStartDef
ADCWidth = ADCWidthDef
if ADCSet is not None:
ADCOn = ADCSet[0]
if len(ADCSet) == 3:
ADCStart = ADCSet[1]
ADCWidth = ADCSet[2] - ADCSet[1]
else:
ADCStart = 0
ADCWidth = 0
# Readout Burst ========================================================== =
TriggerMode = TriggerModeRange[3]
AUGShotStart = AUGShotStartDef
BurstWidth = BurstWidthDef
BurstPeriod = BurstPeriodDef
BurstN = BurstNDef
if TriggerSet is not None:
TriggerMode = TriggerSet[0]
AUGShotStart = TriggerSet[1]
if TriggerMode == 1:
AUGShotStart = TriggerSet[1]
else:
AUGShotStart = SimmerMinWait + PockelsRetard
BurstPeriod = BurstPeriodRange[3]
BurstWidth = BurstWidthRange[3]
BurstN = BurstNRange[3]
BurstOn = 1
FlashBoolOn = FlashOn
FlashBoolN = BurstN
FlashBoolPeriod = BurstPeriod
FlashBoolWidth = np.fix(PockelsN * PockelsPeriod / 10) * 10
# ====================================================== =
# Absolute times of input triggers:
# ====================================================== =
# FlashInTime = PockelsFirstTime-PockelsRetard
# FlashInTime = PockelsFirstTime-PockelsRetard-FlashDelay # ok
MaxBeforeInterval = max([(IIBeforeN + CMOSBeforeN + 1) * PockelsPeriod, PockelsRetard]) + PockelsPeriod
if TriggerMode: # start from AUG (T modes)
BurstInTime = AUGShotStart
BurstDelay = BurstInTime - AUGShotStart # wrong
else: # Start from Simmer (manual modes)
BurstInTime = 0
BurstDelay = SimmerMinWait
BurstOutTime = BurstInTime + BurstDelay
PockelsFirstTimeMin = BurstOutTime + MaxBeforeInterval
PockelsFirstTimeDelta = PockelsFirstTimeMin - PockelsFirstTime
PockelsFirstTime = max([PockelsFirstTimeMin, PockelsFirstTime])
PockelsFirstTimeDelta = max([PockelsFirstTimeDelta, 0])
FlashOutTime = PockelsFirstTime - PockelsRetard
FlashInTime = BurstOutTime
FlashDelay = FlashOutTime - FlashInTime
PockelsInTime = FlashOutTime
FlashBoolOutTime = np.fix(PockelsFirstTime - PockelsPeriod / 2)
FlashBoolInTime = FlashOutTime
FlashBoolDelay = FlashBoolOutTime - FlashBoolInTime
IIGatePlasmaInTime = BurstOutTime
IIGatePlasmaOutTime = PockelsFirstTime + PockelsWidth - IIGateLaserWidth - IIBeforeN * PockelsPeriod - IIPlasmaRetardDelay
IIGatePlasmaDelay = IIGatePlasmaOutTime - IIGatePlasmaInTime
IIPlasmaRetardInTime = IIGatePlasmaOutTime
IIPlasmaRetardOutTime = IIPlasmaRetardInTime + IIPlasmaRetardDelay
IIGateLaserInTime = IIPlasmaRetardOutTime
IIGateLaserOutTime = PockelsFirstTime + PockelsWidth - IIGateLaserWidth - IIBeforeN * PockelsPeriod
IIGateLaserDelay = IIGateLaserOutTime - IIGateLaserInTime
CMOSLaserOutTime= IIGateLaserOutTime-CMOSTrigWidth-(PockelsWidth-IIGateLaserWidth) - np.fix(CMOSDeltaGate/2)-(CMOSBeforeN-1)*PockelsPeriod;
CMOSLaserDelay=np.fix((PockelsPeriod+CMOSPlasmaGate-CMOSLaserGate)/2);
CMOSLaserInTime=CMOSLaserOutTime-CMOSLaserDelay;
CMOSPlasmaOutTime=CMOSLaserInTime;
CMOSPlasmaDelay=CMOSPlasmaOutTime-BurstOutTime;
CMOSPlasmaInTime=CMOSPlasmaOutTime- CMOSPlasmaDelay;
# ==========================================================================
# settings basic pulses which are repeatedly generated
# There are four parent triggeres which are started by the Burst timer:
# Flash, IIGatePlasma, CMOSPlasma and ADC
# Other triggeres are started by the parent outputs
# times of parent triggers are counted from the output of the Burst timer.
# ==========================================================================
# Burst - grand parent:
BurstBase = np.array([[0, 0], [0, 1], [BurstWidth, 1], [BurstWidth, 0]])
Burst = BurstBase
BurstTimeWindow = [Burst[0, 0], Burst[-1, 0]]
FlashBase = np.array([[0, 0], [0, 1], [FlashWidth, 1], [FlashWidth, 0]])
FlashBoolBase = np.array([[0, 0], [0, 1], [FlashBoolWidth, 1], [FlashBoolWidth, 0]])
PockelsBase = np.array([[0, 0], [0, 1], [PockelsWidth, 1], [PockelsWidth, 0]])
IIGatePlasmaBase = np.array([[0, 0], [0, 1], [IIGatePlasmaWidth, 1], [IIGatePlasmaWidth, 0]])
IIPlasmaRetardBase = np.array([[0, 0], [0, 1], [IIPlasmaRetardWidth, 1], [IIPlasmaRetardWidth, 0]])
IIGateLaserBase = np.array([[0, 0], [0, 1], [IIGateLaserWidth, 1], [IIGateLaserWidth, 0]])
# ==========================================================================
# ==========================================================================
# Time sequences from triggers:
# ==========================================================================
# Flash is triggered by Burst:
Flash = FlashBase # single pulse
Flash[:, 0] = Flash[:, 0] + FlashInTime + FlashDelay
FlashBool = FlashBoolBase # single pulse
FlashBool[:, 0] = FlashBool[:, 0] + FlashBoolOutTime
Pockels = np.array([PockelsBase[:, 0], PockelsBase[:, 1]]).T
for i in range(1, PockelsN):
Pockels = np.append(Pockels, np.array([PockelsBase[:, 0] + i * PockelsPeriod, PockelsBase[:, 1]]).T, axis=0)
if PockelsFirstTime == FlashInTime + FlashDelay + PockelsRetard:
Pockels[:, 0] = Pockels[:, 0] + PockelsFirstTime
else:
print('PockelsFirstTime')
return
LaserTimeWindow = [Flash[1, 1], Pockels[-1, 0] + PockelsWidth]
# IIGatePlasma is triggered by Flash:
IIGatePlasma = np.array([IIGatePlasmaBase[:, 0], IIGatePlasmaBase[:, 1]]).T
IIPlasmaRetard = np.array([IIPlasmaRetardBase[:, 0], IIPlasmaRetardBase[:, 1]]).T
IIGateLaser = np.array([IIGateLaserBase[:, 0], IIGateLaserBase[:, 1]]).T
for i in range(1, IIBeforeN + PockelsN + IIAfterN):
IIGatePlasma = np.append(IIGatePlasma, np.array([IIGatePlasmaBase[:, 0] + i * PockelsPeriod, IIGatePlasmaBase[:, 1]]).T, axis=0)
IIPlasmaRetard = np.append(IIPlasmaRetard, np.array([IIPlasmaRetardBase[:, 0] + i * PockelsPeriod, IIPlasmaRetardBase[:, 1]]).T, axis=0)
IIGateLaser = np.append(IIGateLaser, np.array([IIGateLaserBase[:, 0] + i * PockelsPeriod, IIGateLaserBase[:, 1]]).T, axis=0)
if IIGatePlasmaOutTime == BurstOutTime + IIGatePlasmaDelay:
IIGatePlasma[:, 0] = IIGatePlasma[:, 0] + IIGatePlasmaOutTime
else:
raise 'IIGatePlasmaOutTime'
if IIPlasmaRetardOutTime == IIGatePlasmaOutTime + IIPlasmaRetardDelay:
IIPlasmaRetard[:, 0] = IIPlasmaRetard[:, 0] + IIPlasmaRetardOutTime
else:
raise 'IIPlasmaRetardOutTime'
if IIGateLaserOutTime == BurstOutTime + IIGatePlasmaDelay + IIPlasmaRetardDelay:
IIGateLaser[:, 0] = IIGateLaser[:, 0] + IIGateLaserOutTime
else:
raise 'IIGateLaserOutTime'
# IIGateLaser(IIGateLaser[:, 0] >FlashBool[0, 0]&IIGateLaser[:, 0] <FlashBool[-1, 0], 2) = 0
IITimeWindow = [IIGatePlasma[0, 0], IIGateLaser[-1, 0] + IIGateLaserWidth]
# CMOSPlasma is triggered by Flash:
CMOSPlasmaBase = | np.array([[0, 0], [0, 1], [CMOSTrigWidth, 1], [CMOSTrigWidth, 0]]) | numpy.array |
import numpy as np
from numpy import linalg as LA
class Duffing:
"""
Create a duffing object by specifying it's parameter delta as input at initialization of the object
It is a bistable dynamical system with 2 stable steady states and one unstable steady state
"""
def __init__(self, dl):
self.delta = dl # duffing parameter
self.tau = 0.1 # parameter for bin_classifier()
self.dt = 0.001 # time step
self.control = 0 # initialize control as 0
self.max_control = 4
self.seed = np.random.seed(0)
self.state = None
self.desired_state = [1.0, 0.0] # desired state, also a stable fixed point
self.fp2 = [0.0, 0.0] # unstable fixed point
self.fp3 = [-1.0, 0.0] # stable fixed point
self.X = None
self.U = None
def reset(self):
"""
:return: randomly initialized state of the duffing object
"""
self.state = | np.random.uniform(low=-4, high=4, size=(2,)) | numpy.random.uniform |
import sys
import re
import numpy as np
from tqdm import tqdm_notebook
# https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
from sklearn.neural_network import MLPClassifier as SKL_MLPClassifier
from ..algorithm_utils import Algorithm
class MLPClassifier(Algorithm):
def __init__(self, *args, seed=42, epochs=200, **kwargs):
super().__init__('mlp', 'MLPClassifier', 'MLP', seed)
self._args = args
self._kwargs = kwargs
self._kwargs['hidden_layer_sizes'] = self._kwargs.get('hidden_layer_sizes', (32, 32, 32, 3))
self._kwargs['activation'] = self._kwargs.get('activation', 'tanh')
self._kwargs['solver'] = self._kwargs.get('solver', 'adam')
self._kwargs['batch_size'] = self._kwargs.get('batch_size', 256)
self._kwargs['random_state'] = seed
self._kwargs['max_iter'] = epochs
self._kwargs['verbose'] = True
# tol=1e-4, learning_rate_init=1e-4, alpha=0.0001
def can_handle_time_dim(self):
return False
def __call__(self):
model = SKL_MLPClassifier(*self._args, **self._kwargs)
return model
def transform(self, y):
classes = | np.unique(y) | numpy.unique |
""" test_group_generator: test the GroupGenerator() object of StudyGroups. """
#pylint: disable=C0103,R0201,W0212
import unittest
import numpy as np
try:
import study_groups as sg
except ImportError:
import sys
sys.path.append('..')
import study_groups as sg
class TestInitialization(unittest.TestCase):
""" Tests that all initialization succeeds (or fails) as intended. """
def test_init(self):
""" Tests that missing filenames or bad input files fail, and that correct input files
are properly ingested.
"""
np.testing.assert_raises(TypeError, sg.GroupGenerator)
gg = sg.GroupGenerator('test0.yaml')
np.testing.assert_equal(gg.n, 12)
np.testing.assert_equal(gg.config['names'], ['Alice', 'Bob', 'Charisma', 'Dexter',
'Emily', 'Franklin', 'Greta', 'Hamlet',
'Ivy', 'Jasper', 'Katie', 'Louis'])
np.testing.assert_equal(gg.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
np.testing.assert_equal(gg.matrix, np.zeros((12, 12)))
np.testing.assert_raises(TypeError, sg.GroupGenerator, 'test_failure1.yaml')
np.testing.assert_raises(TypeError, sg.GroupGenerator, 'test_failure2.yaml')
gg = sg.GroupGenerator('test3.yaml')
np.testing.assert_equal(gg.n, 6)
np.testing.assert_equal(gg.config['names'], ['Alice', 'Bob', 'Charisma',
'Dexter', 'Emily', 'Franklin'])
np.testing.assert_equal(gg.indices, [0, 1, 2, 3, 4, 5])
np.testing.assert_equal(gg.matrix, np.zeros((6, 6)))
def test_rng_seed(self):
""" Tests that the random number seed is actually seeding when set in a yaml file, to
produce replicable results.
"""
#pylint: disable=W0612
gg = sg.GroupGenerator('test0.yaml')
ri = sg.group_generator.np.random.rand()
np.random.seed(271)
ri_truth = | np.random.rand() | numpy.random.rand |
# coding:utf-8
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import numpy as np
import os
import cv2
# from collections import Counter
# import matplotlib.image as mpimg
# import matplotlib.pyplot as plt # plt 用于显示图片
import imageio
import pickle
from PIL import Image
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10):
cls_num = 10
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, train=True,
download=True, target_transform=None, transform=None):
super(IMBALANCECIFAR10, self).__init__(root, train, transform,
target_transform, download)
| np.random.seed(rand_number) | numpy.random.seed |
import abc
from abc import abstractmethod
import numpy as np
from src.approx import FeatureMap
def egreedy(vals: np.ndarray, eps: float) -> int:
"""
Egreedy action selection
"""
if np.random.rand() < eps:
return np.random.choice(vals.size)
else:
# randomize over all actions with maximum qval - this prevents issues when all qvals are equal
max_q = np.max(vals)
return np.random.choice(np.arange(vals.size)[vals == max_q])
class Learner(abc.ABC):
@abstractmethod
def select_action(self, state: np.ndarray) -> int:
pass
@abstractmethod
def start(self, state: np.ndarray) -> int:
pass
@abstractmethod
def update(self, nstate: np.ndarray, reward: float, done: bool) -> int:
pass
def __call__(self, state: np.ndarray) -> int:
return self.select_action(state)
class Qlearner(Learner):
"""
Epsilon greedy Qlearner with linear qvalue approximation
"""
def __init__(self, feature_map: FeatureMap, n_actions: int, lr: float, discount: float, eps: float):
self.phi = feature_map
self.weights = np.zeros((feature_map.n_features, n_actions))
self.lr = lr
self.discount = discount
self.eps = eps
self.feat = None
self.action = None
def get_state_value(self, states):
features = self.phi(states)
return self.get_values(features)
def get_values(self, features):
return features @ self.weights
def select_action(self, state):
features = self.phi(state).flatten()
values = self.get_values(features).flatten()
return egreedy(values, self.eps)
def start(self, state):
self.feat = self.phi(state).flatten()
self.action = self.select_action(state)
return self.action
def update(self, nstate, reward, done):
qvals = self.get_values(self.feat)
nphi = self.phi(nstate).flatten()
nqvals = self.get_values(nphi)
delta = reward - qvals[self.action]
delta += self.discount * | np.max(nqvals) | numpy.max |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009 <NAME> and <NAME> <<EMAIL>>
# Copyright (C) 2011-2013 <NAME> <<EMAIL>>
#
# License: MIT (see COPYING file)
import numpy as np
from ..histogram import fullhistogram
__all__ = [
'lbp',
'lbp_transform',
]
def lbp_transform(image, radius, points, ignore_zeros=False, preserve_shape=True):
'''
transformed = lbp(image, radius, points, ignore_zeros=False, preserve_shape=True)
Compute Linear Binary Pattern Transform
The return value are the transformed pixel values **histogram** of feature counts, where position ``i``
corresponds to the number of pixels that had code ``i``. The codes are
compressed so that impossible codes are not used. Therefore, this is the
``i``th feature, not just the feature with binary code ``i``.
Parameters
----------
image : ndarray
input image (2-D numpy ndarray)
radius : number (integer or floating point)
radius (in pixels)
points : integer
nr of points to consider
ignore_zeros : boolean, optional
whether to ignore zeros. Note that if you set this to ``True``, you
will need to set ``preserve_shape`` to False. (default: False)
preserve_shape : boolean, optional
whether to return an array with the same shape as ``image``. (default:
True)
Returns
-------
features : 1-D numpy ndarray
histogram of features. See above for a caveat on the interpretation of
these.
Reference
---------
Gray Scale and Rotation Invariant Texture Classification with Local Binary Patterns
Ojala, <NAME>, <NAME>, T. Lecture Notes in Computer Science (Springer)
2000, ISSU 1842, pages 404-420
'''
from ..interpolate import shift
from mahotas.features import _lbp
if ignore_zeros and preserve_shape:
raise ValueError('mahotas.features.lbp_transform: *ignore_zeros* and *preserve_shape* cannot both be used together')
image = | np.asanyarray(image, dtype=np.float64) | numpy.asanyarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 10:30:16 2018
@author: chrelli
"""
import time, os, sys, shutil
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import math
# small utilities
import csv
from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
import cv2
# for recording and connecting to the intel realsense librar
import pyrealsense as pyrs
#import multiprocessing
from multiprocessing import Process
# for cloud handling
#from pyntcloud import PyntCloud
# import handy Functions
from utils.common_utils import *
from utils.recording_utils import *
from utils.cloud_utils import *
#from fitting_utils import *
#from merge_and_filter_clouds import filter_and_downsample_cloud
# h5py for acessing data
import h5py
# ALLSO JIT STUFF
from numba import jit, njit
#%% read from processed frames
from utils.reading_utils import most_recent_recording_folders,read_shifted_stamps
# processed frames are kept here:
top_folder, _ = most_recent_recording_folders()
# the reference frames
frames,ts,n_dropped = read_shifted_stamps(0,top_folder)
def read_processed_frame(top_folder,frame,voxel = 0.003, n_padding_digits = 8):
raw = np.load(top_folder+'/npy/frame_'+str(frame).rjust(n_padding_digits,'0')+'.npy')
#todo make column order and split
positions = raw[:,0:3]*voxel
weights = raw[:,3]
return positions,weights
#%%
positions,weights = read_processed_frame(top_folder,15000)
color3d(positions,weights)
#%% Ask for clicks!
from utils.fitting_utils import click_one_mouse
#todo is it called head or nose, make up my mind!
hip_click,mid_click,nose_click = click_one_mouse(positions)
#%% Now, we make a function, which spits out the constants
@njit
def mouse_body_size_constants(body_scale = 1,use_old=False):
## HIP is a prolate ellipsoid, centered along the x axis
a_hip_min = 0.04/2 #m
a_hip_max = 0.065/2 #m
b_hip_min = 0.03/2 #m
b_hip_max = 0.046/2 #m
d_hip = 0.019 #m
# converting it to the new terminology
a_hip_0 = a_hip_min #m
a_hip_delta = a_hip_max - a_hip_min #m
b_hip_0 = b_hip_min #m
b_hip_delta = b_hip_max - b_hip_min #m
## NOSE is prolate ellipsoid, also along the head direction vector
# here, there is no re-scaling
a_nose = 0.040/2 #m
b_nose = 0.025/2 #m
d_nose = 0.016 #m
if use_old:
## HIP is a prolate ellipsoid, centered along the x axis
a_hip_min = 0.018 #m
a_hip_max = 0.025 #m
b_hip_min = 0.011 #m
b_hip_max = 0.02 #m
d_hip = 0.015 #m
# converting it to the new terminology
a_hip_0 = a_hip_min #m
a_hip_delta = a_hip_max - a_hip_min #m
b_hip_0 = b_hip_min #m
b_hip_delta = b_hip_max - b_hip_min #m
## NOSE is prolate ellipsoid, also along the head direction vector
# here, there is no re-scaling
a_nose = 0.020 #m
b_nose = 0.010 #m
d_nose = 0.014 #m
return a_hip_0,a_hip_delta,b_hip_0,b_hip_delta,d_hip,a_nose,b_nose,d_nose
# these are the constants!
a_hip_0,a_hip_delta,b_hip_0,b_hip_delta,d_hip,a_nose,b_nose,d_nose = mouse_body_size_constants()
#%% Make a function to declare the shape of the mouse
from utils.fitting_utils import rotate_body_model
@njit
def mouse_body_geometry(beta,gamma,s,theta,phi):
"""
This function calculates the configuration of the mouse body
In this configureation, it has four free parameters: azimuth and elevation of the nose/hip
Returns the points, which define the model: center-points and radii
theta el is elevation of the head (in xz plane)
phi lr is head rotation in xy plane
beta,gamma,s is hip pitch,yaw and spine scaling
theta,phi is nose pitch,yaw (i.e. around y and z, respectively since major body is along x axis)
"""
# get the constants for the body model
a_hip_0,a_hip_delta,b_hip_0,b_hip_delta,d_hip,a_nose,b_nose,d_nose = mouse_body_size_constants()
# calculate the spine
a_hip = a_hip_0 + s * a_hip_delta
b_hip = b_hip_0 + (1-s)**2 * b_hip_delta
# CAlculate the nescessary rotation matrices
R_body = rotate_body_model(0,beta,gamma)
R_head = rotate_body_model(0,theta,phi)
R_nose = R_body @ R_head
# And now we get the
c_hip = np.array([0,0,0])
c_mid = np.array([d_hip,0,0])
c_nose = c_mid + R_head @ np.array([d_nose,0,0])
# and the Q matrices
Q_hip = R_body @ np.diag(np.array([1/a_hip**2,1/b_hip**2,1/b_hip**2])) @ R_body.T
Q_nose = R_nose @ np.diag(np.array([1/a_nose**2,1/b_nose**2,1/b_nose**2])) @ R_nose.T
# now, just return the coordinates and the radii
return R_body,R_nose,c_mid,c_hip,c_nose,a_hip,b_hip,a_nose,b_nose,Q_hip,Q_nose
#R_body,R_nose,c_mid,c_hip,c_nose,a_hip,b_hip,a_nose,b_nose,Q_hip,Q_nose = mouse_body_geometry(beta,gamma,s,theta,phi)
#%% make a function to plot the mouse as a wireframe! (with points, if desired)
# add 2 cm as the center of the hip (to try is out)
#%% Now, generate a pretty good guess
#beta,gamma,s,theta,phi = -.1,.3,.9,.1,.4
from utils.fitting_utils import good_guess
beta,gamma,s,theta,phi,t_body = good_guess(hip_click,mid_click,nose_click)
s=.7
plot_mouse_body(beta,gamma,s,theta,phi,t_body,positions = positions,weights = None)
#%% for generating a good body model!
positions,weights = read_processed_frame(top_folder,8000)
#8000 has legs
color3d(positions)
hip_click,mid_click,nose_click = click_one_mouse(positions)
measure_dist(positions,weights,nose_click-hip_click,side = True)
beta,gamma,s,theta,phi,t_body = good_guess(hip_click,mid_click,nose_click)
s=.9
plot_mouse_body(beta,gamma,s,theta,phi,t_body,positions = positions,weights = None)
x0_guess = np.hstack((beta,gamma,s,theta,phi,t_body))
#%% Loss function for one mouse
@njit
def min_along_axis(raw):
N = raw.shape[0]
distances = np.empty(N)
for i in range(N):
distances[i] = np.min( np.abs(raw[i,:]) )
return distances
@jit
def min_along_axis2(raw):
return np.min(raw,1)
@njit
def jit_norm(positions):
return np.sqrt(np.sum(np.power(positions,2),0))
@njit
def distance_to_1_mouse(x0,posx,posy,posz):
"""
this calculates the shortest distance from any point to the hull of the mouse, given
the free parameters in x0. The free parameters
The variables, packed into x0 are:
x0 = [beta,gamma,s,theta,phi,t_body]
= [beta,gamma,s,theta,phi,t_body[0],t_body[1],t_body[2]]
positions is a Nx3 numpy array of the cloud points!
"""
# and the positions have to be separate vectors for some tuple/scipy bullshit reason
#TODO cehck if this is really true, could be more momory efficient
positions = np.column_stack((posx,posy,posz))
# first do the first mouse:
# x0 has the parameters of the function, need to unpack the angles, the translation and the angles
beta = x0[0]
gamma = x0[1]
s = x0[2]
theta = x0[3]
phi = x0[4]
t_body = x0[5:8]
# get the coordinates c of the mouse body in it's own reference frame
R_body,R_nose,c_mid,c_hip,c_nose,a_hip,b_hip,a_nose,b_nose,Q_hip,Q_nose = mouse_body_geometry(beta,gamma,s,theta,phi)
# Now, calculate the distance vectors from the origin of the hip, mid and head
p_hip = (positions - ( c_hip + t_body) ).T
p_nose = (positions - ( R_body @ c_nose + t_body) ).T
# and the distance to the body
delta_hip_0 = np.abs( 1 - 1 / np.sqrt(np.sum(p_hip*(Q_hip @ p_hip),0)) ) * jit_norm(p_hip)
delta_nose_0 = np.abs( 1 - 1 / np.sqrt(np.sum(p_nose*(Q_nose @ p_nose),0)) ) * jit_norm(p_nose)
#distances = np.vstack((delta_hip_0,delta_nose_0))
# njit trick to get the minimum distance along axis
# not sure if actually faster, though
# 1) First calculate the distance
difference = delta_hip_0 - delta_nose_0
# 2) ask if the numbers are negative
logic = difference > 0
# if the numbers are negative, then the the hip dist is already the smallest
# 3) since it's pairs, this gives the minimum
minimum_dist = delta_hip_0 - difference*logic
# return the minimum distance
return minimum_dist
@njit
def wrapped_loss(x0,posx,posy,posz):
return np.sum(distance_to_1_mouse(x0,posx,posy,posz))
#%% Try fitting test case of one mouse body!
# select frame
positions,weights = read_processed_frame(top_folder,15000)
#8000 has legs
#plot the frame
color3d(positions)
# click the mouse to generate starting positions
hip_click,mid_click,nose_click = click_one_mouse(positions)
#convert the clicks to a guess
beta,gamma,s,theta,phi,t_body = good_guess(hip_click,mid_click,nose_click)
s=.6
# plot the mouse body
plot_mouse_body(beta,gamma,s,theta,phi,t_body,positions = positions,weights = None)
#%%
x0_guess = np.hstack((beta,gamma,s,theta,phi,t_body))
from scipy.optimize import minimize,Bounds
opt = ({'maxiter': 1000})
res = minimize(wrapped_loss, x0_guess,
args=(positions[:,0],positions[:,1],positions[:,2]),
bounds = Bounds(hard_lo,hard_hi,keep_feasible=False),
method = 'SLSQP',options = opt)
x_fit = res.x
optimality = res.fun
plot_mouse_body(x_fit[0],x_fit[1],x_fit[2],x_fit[3],x_fit[4],x_fit[5:8],positions = positions,weights = None)
#%% Define the overall boundaries for the variables of the mouse body
@njit
def hard_limits():
"""
defines the absolute hard limits on the values
The sequence of variables is
alpha, beta, gamma, t_body, theta, phi
+-+-+-+-+ +-+-+-+-+-+-+
|H|a|r|d| |l|i|m|i|t|s|
+-+-+-+-+ +-+-+-+-+-+-+
"""
# Let's set the limits of the bounding box like this:
# we're dropping alpha, just beta, gamma, t and theta,phi
x_range = 0.3*np.array([-1,1]) #[m]
y_range = x_range
z_range = 0.1*np.array([0,1]) #[m]
# beta is the body pitch, from - pi/2 (vertical) to slightly more than 0 (horizontal)
beta_range = np.pi * | np.array([-.5,.1]) | numpy.array |
import time
import numpy as np
import collections
class PrettyTable():
'''
For format output the simplex table
'''
def __init__(self):
# all element is str
self.table = []
return None
def add_row(self, row):
self.table.append(row)
return None
def pretty(self, hlines=[], vlines=[], col_width='c'):
n_row, n_col = len(self.table), len(self.table[0])
for i, e in enumerate(hlines):
if e < 0: hlines[i] = n_row + e - 1
for i, e in enumerate(vlines):
if e < 0: vlines[i] = n_col + e - 1
# column width
col_width = [0 for j in range(n_col)]
for row in self.table:
for j, e in enumerate(row):
col_width[j] = max(col_width[j], len(e))
if col_width in ['c', 'center']:
col_width = np.array(col_width)
col_width[1:-1] = np.max(col_width[1:-1])
elif col_width in ['e', 'each']:
pass
elif col_width in ['a', 'all']:
col_width[:] = | np.max(col_width) | numpy.max |
#!/usr/bin/env python
import click
from functools import partial
from multiprocessing import Pool
import numpy as np
import os
import pandas as pd
import pickle
from pybedtools import BedTool
from pybedtools.helpers import cleanup
import re
import subprocess as sp
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
bar_format = "{percentage:3.0f}%|{bar:20}{r_bar}"
from utils import get_file_handle
# Globals
scripts_dir = os.path.dirname(os.path.realpath(__file__))
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
}
@click.command(no_args_is_help=True, context_settings=CONTEXT_SETTINGS)
@click.argument(
"matrix_dir", type=click.Path(exists=True, resolve_path=True)
)
@click.argument(
"genome_file", type=click.Path(exists=True, resolve_path=True)
)
@click.argument(
"regions_idx", type=click.Path(exists=True, resolve_path=True)
)
@click.option(
"-d", "--dummy-dir",
help="Dummy directory.",
type=click.Path(resolve_path=True),
default="/tmp/",
show_default=True
)
@click.option(
"-o", "--output-dir",
help="Output directory.",
type=click.Path(resolve_path=True),
default="./",
show_default=True
)
@click.option(
"-t", "--threads",
help="Threads to use.",
type=int,
default=1,
show_default=True
)
def main(**args):
# Create output dir
if not os.path.exists(args["output_dir"]):
os.makedirs(args["output_dir"])
# Get already processed TFs
tfs = set()
for tsv_file in os.listdir(args["output_dir"]):
m = re.search("^(\S+).(train|validation|test).tsv.gz$", tsv_file)
tfs.add(m.group(1))
# Get matrix files
matrix_files = []
for matrix_file in os.listdir(args["matrix_dir"]):
m = re.search("^matrix2d.(\S+).ReMap.sparse.npz$", matrix_file)
if m.group(1) not in tfs:
matrix_files.append(os.path.join(args["matrix_dir"], matrix_file))
# Get regions idx
handle = get_file_handle(args["regions_idx"], mode="rb")
regions_idx = pickle.load(handle)
handle.close()
idx_regions = {v: k for k, v in regions_idx.items()}
# Get FASTA sequences
kwargs = {"total": len(matrix_files), "bar_format": bar_format}
pool = Pool(args["threads"])
p = partial(_to_ExplaiNN, genome_file=args["genome_file"],
idx_regions=idx_regions, dummy_dir=args["dummy_dir"],
output_dir=args["output_dir"])
for _ in tqdm(pool.imap(p, matrix_files), **kwargs):
pass
def _to_ExplaiNN(matrix_file, genome_file, idx_regions,
dummy_dir="/tmp/", output_dir="./"):
# Initialize
prefix = re.search("^matrix2d.(\S+).ReMap.sparse.npz$",
os.path.split(matrix_file)[1]).group(1)
# Load matrix 2D as numpy array
matrix2d = np.load(matrix_file)["arr_0"]
# Get ones and zeros
matrix1d = np.nanmax(matrix2d, axis=0)
ones = np.where(matrix1d == 1.)[0]
zeros = | np.where(matrix1d == 0.) | numpy.where |
""" Basis terms Module
Author: <NAME>
Date: 7/10/2017
"""
__all__ = ['PolyBasis', 'MonicPoly', 'Hermite1d', 'Legendre1d', 'Laguerre1d']
import numpy as np
import math
import scipy.special as sp
import itertools as itls
class Hermite1d(object):
"""
Class that constructs 1-dimensional Hermite polynomials.
"""
_degree = None
def __init__(self, degree = 1):
"""
Ininializes the object
"""
assert isinstance(degree, int)
assert degree > 0
self._degree = degree
def eval(self, x):
H = np.zeros(self._degree + 1)
H[0] = 1.
H[1] = x
for i in range(2,H.shape[0]):
H[i] = (x * H[i-1] - (i-1) * H[i-2] )
H = H / [math.sqrt(math.factorial(i)) for i in range(H.shape[0])]
return H
def __call__(self, x):
N = x.shape[0]
H = np.zeros((N, self._degree + 1))
for i in range(N):
H[i,:] = self.eval(x[i])
return H
class Legendre1d(object):
"""
Class that contains 1-dimensional Legendre polynomials.
"""
_degree = None
def __init__(self, degree = 1):
"""
Initializes the object
"""
assert isinstance(degree, int)
assert degree > 0
self._degree = degree
def eval(self, x):
H = np.zeros(self._degree + 1)
H[0] = 1.
H[1] = x
for i in range(2, H.shape[0]):
H[i] = ( (2.*i-1.)* x * H[i-1] - (i-1.)* H[i-2] ) / i
H = H / [math.sqrt(1. / (2.*i+1.)) for i in range(H.shape[0])]
return H
def __call__(self, x):
N = x.shape[0]
H = np.zeros((N, self._degree + 1))
for i in range(N):
H[i,:] = self.eval(x[i])
return H
class Laguerre1d(object):
"""
Class that contains 1-dimensional Laguerre polynomials.
"""
_degree = None
def __init__(self, degree = 1):
"""
Initializes the object
"""
assert isinstance(degree, int)
assert degree > 0
self._degree = degree
def eval(self, x):
H = | np.zeros(self._degree + 1) | numpy.zeros |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = | N.array([2,1,4]) | numpy.array |
import unittest
import numpy as np
from batchgenerators.augmentations.normalizations import range_normalization, zero_mean_unit_variance_normalization, \
mean_std_normalization, cut_off_outliers
class TestNormalization(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
def test_range_normalization_per_channel(self):
print('Test test_range_normalization_per_channel. [START]')
data = 10*np.random.random((32, 4, 64, 56, 48))
data[:,0,:,:] = 30*data[:,0,:,:]
data3 = 5 * np.ones((8, 2, 64, 56, 48))
data4 = np.array([])
rng1 = (0,1)
rng2 = (-2,2)
rng3 = (0,1)
data_normalized = range_normalization(data, rnge=rng1, per_channel=True)
data_normalized2 = range_normalization(data, rnge=rng2, per_channel=True)
data_normalized3 = range_normalization(data3, rnge=rng3, per_channel=True)
data_normalized4 = range_normalization(data4, rnge=rng1, per_channel=True)
print('Test normalization with range [0,1]. [START]')
for b in range(data.shape[0]):
for c in range(data.shape[1]):
self.assertAlmostEqual(data_normalized[b, c, :, :].max(), rng1[1], msg="not scaled to correct max range limit")
self.assertAlmostEqual(data_normalized[b, c, :, :].min(), rng1[0], msg="not scaled to correct min range limit")
print('Test normalization with range [0,1]. [DONE]')
print('Test normalization with range [-2,2]. [START]')
for b in range(data.shape[0]):
for c in range(data.shape[1]):
self.assertAlmostEqual(data_normalized2[b, c, :, :].max(), rng2[1], msg="not scaled to correct max range limit")
self.assertAlmostEqual(data_normalized2[b, c, :, :].min(), rng2[0], msg="not scaled to correct min range limit")
print('Test normalization with range [-2,2]. [DONE]')
print('Test normalization of constant data with range [0,1]. [START]')
for b in range(data3.shape[0]):
for c in range(data3.shape[1]):
self.assertAlmostEqual(data_normalized3[b, c, :, :].max(), rng3[0], msg="not scaled to correct max range limit")
self.assertAlmostEqual(data_normalized3[b, c, :, :].min(), rng3[0], msg="not scaled to correct min range limit")
print('Test normalization of constant data with range [0,1]. [DONE]')
print('Test normalization of empty data array with range [0,1]. [START]')
self.assertEqual(data_normalized4.size, 0, msg="not an empty array")
print('Test normalization of empty data array with range [0,1]. [DONE]')
# print('Test RuntimeWarning of constant data with zero eps. [START]')
# self.assertWarns(RuntimeWarning, range_normalization, data3, rnge = rng3, per_channel = True, eps = 0)
# print('Test RuntimeWarning of constant data with zero eps. [DONE]')
print('Test test_range_normalization_per_channel. [DONE]')
def test_range_normalization_whole_image(self):
print('Test test_range_normalization_whole_image. [START]')
data = 10 * np.random.random((32, 4, 64, 56, 48))
data[:, 0, :, :] = 3 * data[:, 0, :, :]
data3 = 5 * np.ones((8, 2, 64, 56, 48))
data4 = np.array([])
rng1 = (0, 1)
rng2 = (-2, 2)
rng3 = (0, 1)
data_normalized1 = range_normalization(data, rnge=rng1, per_channel=False, eps=0)
data_normalized2 = range_normalization(data, rnge=rng2, per_channel=False)
data_normalized3 = range_normalization(data3, rnge=rng3, per_channel=False)
data_normalized4 = range_normalization(data4, rnge=rng1, per_channel=False)
print('Test normalization with range [0,1]. [START]')
for b in range(data.shape[0]):
self.assertAlmostEqual(data_normalized1[b].min(), rng1[0], delta=1e-4, msg="not scaled to correct min range limit")
self.assertAlmostEqual(data_normalized1[b].max(), rng1[1], delta=1e-4, msg="not scaled to correct max range limit")
self.assertEqual(np.unravel_index(np.argmax(data_normalized1[b], axis=None), data_normalized1[b].shape)[0], 0, msg="max not in the right channel")
print('Test normalization with range [0,1]. [DONE]')
print('Test normalization with range [-2, 2]. [START]')
for b in range(data.shape[0]):
self.assertAlmostEqual(data_normalized2[b].min(), rng2[0], delta=1e-4, msg="not scaled to correct min range limit")
self.assertAlmostEqual(data_normalized2[b].max(), rng2[1], delta=1e-4, msg="not scaled to correct max range limit")
self.assertEqual(np.unravel_index(np.argmax(data_normalized2[b], axis=None), data_normalized1[b].shape)[0], 0, msg="max not in the right channel")
print('Test normalization with range [-2, 2]. [DONE]')
print('Test normalization of constant data with range [0,1]. [START]')
for b in range(data3.shape[0]):
self.assertAlmostEqual(data_normalized3[b].min(), rng3[0], delta=1e-4, msg="not scaled to correct min range limit")
self.assertAlmostEqual(data_normalized3[b].max(), rng3[0], delta=1e-4, msg="not scaled to correct max range limit")
# self.assertEqual(np.unravel_index(np.argmax(data_normalized3[b], axis=None), data_normalized1[b].shape)[0], 0, msg="max not in the right channel")
print('Test normalization of constant data with range [0,1]. [DONE]')
print('Test normalization of empty data array with range [0,1]. [START]')
self.assertEqual(data_normalized4.size, 0, msg="not an empty array")
print('Test normalization of empty data array with range [0,1]. [DONE]')
# print('Test RuntimeWarning of constant data with zero eps. [START]')
# self.assertWarns(RuntimeWarning, range_normalization, data3, rnge = rng3, per_channel = False, eps = 0)
# print('Test RuntimeWarning of constant data with zero eps. [DONE]')
print('Test test_range_normalization_whole_image. [DONE]')
def test_zero_mean_unit_variance_per_channel(self):
print('Test test_zero_mean_unit_variance_per_channel. [START]')
data = np.random.random((32, 4, 64, 56, 48))
data2 = 5 * np.ones((32, 4, 64, 56, 48))
data3 = np.array([])
data_normalized1 = zero_mean_unit_variance_normalization(data, per_channel=True, epsilon=0)
data_normalized2 = zero_mean_unit_variance_normalization(data2, per_channel=True)
data_normalized3 = zero_mean_unit_variance_normalization(data3, per_channel=True)
print('Test standard use-case. [START]')
for b in range(data.shape[0]):
for c in range(data.shape[1]):
self.assertAlmostEqual(data_normalized1[b, c, :, :].mean(), 0, msg="mean not zeros")
self.assertAlmostEqual(data_normalized1[b, c, :, :].std(), 1, msg="std not 1")
print('Test standard use-case. [DONE]')
print('Test constant input data. [START]')
for b in range(data2.shape[0]):
for c in range(data2.shape[1]):
self.assertAlmostEqual(data_normalized2[b, c, :, :].mean(), 0, msg="mean not zeros")
self.assertAlmostEqual(data_normalized2[b, c, :, :].std(), 0, msg="std not 1")
print('Test constant input data. [DONE]')
# print('Test RuntimeWarning of constant data with zero eps. [START]')
# self.assertWarns(RuntimeWarning, zero_mean_unit_variance_normalization, data2, per_channel=True, epsilon=0)
# print('Test RuntimeWarning of constant data with zero eps. [DONE]')
print('Test normalization of empty data array. [START]')
self.assertEqual(data_normalized3.size, 0, msg="not an empty array")
print('Test normalization of empty data array. [DONE]')
print('Test test_zero_mean_unit_variance_per_channel. [DONE]')
def test_zero_mean_unit_variance_whole_image(self):
print('Test test_zero_mean_unit_variance_whole_image. [START]')
data = np.random.random((32, 4, 64, 56, 48))
data2 = 5 * | np.ones((32, 4, 64, 56, 48)) | numpy.ones |
"""Multivariate Normal Distribution."""
import numpy
from scipy import special
from chaospy.bertran import bindex
from .normal import normal
from ..baseclass import Dist
class MvNormal(Dist):
"""
Multivariate Normal Distribution.
Args:
loc (float, Dist): Mean vector
scale (float, Dist): Covariance matrix or variance vector if scale is a 1-d vector.
Examples:
>>> distribution = chaospy.MvNormal([1, 2], [[1, 0.6], [0.6, 1]])
>>> print(distribution)
MvNormal(loc=[1.0, 2.0], scale=[[1.0, 0.6], [0.6, 1.0]])
>>> print(chaospy.Cov(distribution))
[[1. 0.6]
[0.6 1. ]]
>>> mesh = numpy.meshgrid(*[numpy.linspace(0, 1, 5)[1:-1]]*2)
>>> print(numpy.around(distribution.inv(mesh), 4))
[[[0.3255 1. 1.6745]
[0.3255 1. 1.6745]
[0.3255 1. 1.6745]]
<BLANKLINE>
[[1.0557 1.4604 1.8651]
[1.5953 2. 2.4047]
[2.1349 2.5396 2.9443]]]
>>> print(numpy.around(distribution.fwd(distribution.inv(mesh)), 4))
[[[0.25 0.5 0.75]
[0.25 0.5 0.75]
[0.25 0.5 0.75]]
<BLANKLINE>
[[0.25 0.25 0.25]
[0.5 0.5 0.5 ]
[0.75 0.75 0.75]]]
>>> print(numpy.around(distribution.pdf(distribution.inv(mesh)), 4))
[[0.0991 0.146 0.1452]
[0.1634 0.1989 0.1634]
[0.1452 0.146 0.0991]]
>>> print(numpy.around(distribution.sample(4), 4))
[[ 1.395 -0.2003 2.6476 0.9553]
[ 3.1476 0.6411 1.5946 1.7647]]
>>> print(numpy.around(distribution.mom((1, 2)), 4))
7.4
"""
def __init__(self, loc=[0, 0], scale=[[1, .5], [.5, 1]]):
loc = numpy.asfarray(loc)
scale = numpy.asfarray(scale)
assert len(loc) == len(scale)
self._repr = {"loc": loc.tolist(), "scale": scale.tolist()}
C = numpy.linalg.cholesky(scale)
Ci = numpy.linalg.inv(C)
Dist.__init__(self, C=C, Ci=Ci, loc=loc)
def _cdf(self, x, C, Ci, loc):
return special.ndtr(numpy.dot(Ci, (x.T-loc.T).T))
def _ppf(self, q, C, Ci, loc):
return (numpy.dot(C, special.ndtri(q)).T+loc.T).T
def _pdf(self, x, C, Ci, loc):
det = numpy.linalg.det(numpy.dot(C,C.T))
x_ = numpy.dot(Ci.T, (x.T-loc.T).T)
out = numpy.ones(x.shape)
out[0] = numpy.e**(-.5*numpy.sum(x_*x_, 0))/numpy.sqrt((2*numpy.pi)**len(Ci)*det)
return out
def _bnd(self, x, C, Ci, loc):
scale = numpy.sqrt(numpy.diag(numpy.dot(C,C.T)))
lo,up = numpy.zeros((2,)+x.shape)
lo.T[:] = (-7.5*scale+loc)
up.T[:] = (7.5*scale+loc)
return lo, up
def _mom(self, k, C, Ci, loc):
scale = numpy.dot(C, C.T)
out = 0.
for idx, kdx in enumerate(bindex(k, dim=len(C), sort="G")):
coef = numpy.prod(special.comb(k.T, kdx).T, 0)
diff = k.T - kdx
pos = diff >= 0
diff = diff*pos
pos = numpy.all(pos)
loc_ = numpy.prod(loc**diff)
out += pos*coef*loc_*isserlis_moment(tuple(kdx), scale)
return float(out)
def __len__(self):
return len(self.prm["C"])
def isserlis_moment(k, scale):
"""
Raw statistical moments centralized Normal distribution using Isserlis'
theorem.
Args:
k (Tuple[int, ...]):
Moment orders.
scale (ndarray):
Covariance matrix defining dependencies between variables.
Returns:
Raw statistical moment of order ``k`` given covariance ``scale``.
Examples:
>>> scale = 0.5*numpy.eye(3)+0.5
>>> isserlis_moment((2, 2, 2), scale)
3.5
>>> isserlis_moment((0, 0, 0), scale)
1.0
>>> isserlis_moment((1, 0, 0), scale)
0.0
>>> isserlis_moment((0, 1, 1), scale)
0.5
>>> isserlis_moment((0, 0, 2), scale)
1.0
"""
if not isinstance(k, numpy.ndarray):
k = | numpy.asarray(k) | numpy.asarray |
"""
computes individual-level functional connectivity gradients
by aligning them to the group-level gradients
usage: $ python s02_hipp_fc_grads_indiv.py HCP_165840 LSUB
"""
import os, sys
import h5py
import numpy as np
from brainspace.gradient import GradientMaps
# data dir
ddir = '../data/tout_hippoc/'
odir = '../data/tout_hippoc_grad/'
# LEFT HIPPOCAMPUS
# read-in group-level fc gradient
group_gradient_file = '../data/tout_group/Hmean709connGradients_left.h5'
with h5py.File(group_gradient_file, 'r') as g:
group_gradients = | np.array(g['gradients_']) | numpy.array |
'''
Image Restriction Function File
Description: This file contains all functions for the verifying image restrictions.
'''
import os
import re
import cv2
import yaml
from datetime import datetime
import exifread
import numpy as np
class imgRestFuncs(object):
'Class containing all image restriction functions'
def __init__(self, confPath):
restrict_conf = {'imgLengthMax': 6000, 'imgWidthMin': 100, 'acceptedFileTypes': ['.jpeg', '.jpg', '.png', '.JPG'], 'acceptedMobileDevices': ['iPhone 5', 'iPhone 5s', 'iPhone 6', 'iPhone 6s', 'DROIDX', 'SM-G730V', 'iPhone SE', 'SM-G920V'], 'imgMaxSizeNumber': 4000000, 'imgWidthMax': 6000, 'imgLengthMin': 100}
if os.path.exists(confPath + "/config/image_restriction_conf.yaml"):
self.criteria = self._import_yaml(confPath + "/config/image_restriction_conf.yaml")
else:
if not os.path.exists(confPath + "/config/"):
os.makedirs(confPath + "/config/")
with open(confPath + "/config/image_restriction_conf.yaml", 'w') as outfile:
yaml.dump(restrict_conf, outfile, default_flow_style=False)
self.criteria = self._import_yaml(confPath + "/config/image_restriction_conf.yaml")
def sigm(self, x):
'''
Purpose: sigmoid function that takes in a value and returns a value from 0 to 1
Inputs: float
Outputs: None
Returns: Float between 0, 1
Assumptions: N/A
'''
return 1 / (1 + np.exp(-x))
def is_device(self, device):
'''
Purpose: The purpose of this function is to determine whether or not the device the
image was taken on is an accepted mobile device.
Inputs: string device
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
if device in self.criteria['acceptedMobileDevices']:
return True
else:
return False
def is_edited(self, modified, created):
'''
Purpose: The purpose of this function is to determine whether or not the image was
altered from its original form. I.e. do the modification and creation dates coincide.
Inputs: datetime created, datetime modified
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
if (created == modified):
return True
else:
return False
def is_landscape(self, pathname):
'''
Purpose: The purpose of this function is to determine whether or not the image
contains a direct landscape with sky and view.
Inputs: pathname for image
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
img = cv2.imread(pathname,1)
return self._is_sky(img)
def is_size(self, fileSize):
'''
Purpose: The purpose of this function is to determine whether or not the size of
the image is less than or equal to 200kb.
Inputs: dict exifData, int imgMaxSize, int imgMaxSizeBytesShort,
string fileSize
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
if(fileSize > self.criteria['imgMaxSizeNumber']):
return False
else:
return True
def is_type(self, fileType):
'''
Purpose: The purpose of this function is to determine whether or not the image is
an accepted file type.
Inputs: string fileType
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
if fileType in self.criteria['acceptedFileTypes']:
return True
else:
return False
def is_res(self, imageWidth, imageLength):
'''
Purpose: The purpose of this function is to determine whether or not the image
exceeds the minimum resolution.
Inputs: int imageWidth, int imageLength
Outputs: None
Returns: Boolean
Assumptions: N/A
'''
if (imageWidth >= self.criteria['imgWidthMin']) and (imageLength >= self.criteria['imgLengthMin']):
if (imageWidth <= self.criteria['imgWidthMax']) and (imageLength <= self.criteria['imgLengthMax']):
return True
else:
return False
def _is_sky(self, img):
'''
Purpose: The purpose of this function is to determine whether or not the image contains
a valid sky or not.
Inputs: numpy.ndarray (loaded image information)
Outputs: None
Returns: Boolean (valid or invalid image)
Assumptions: N/A
'''
syn0 = np.array([[0.6106635051820115, -1.2018987127529588, -10.344605820189082, 1.1911213385074928, -6.818421664371254, 0.7888012143578024, 0.1930026599192343, 2.3468732267729644, -0.8629627172245428, -4.855127665505846, -8.782456796605247, -6.495787542595586, -1.42453153150294, -0.91145196348796, -0.34523737705411006],
[-1.3963274415314406, -1.4612339780784143, -2.9000212540397685, -3.9905541370795463, -3.4490261869089287, -4.30542395055999, -2.6069427860345145, 7.201038210239841, -2.205826668689026, -2.493364425571145, -1.9813891706545306, -2.235792731073901, -7.475941696773453, -2.68683663270719, 4.173252030927632],
[-0.5585916670209942, 0.3126863684210608, 2.142283443670229, 0.6422582372446218, 0.8699959804142926, 1.2677877625877656, 0.697665181045127, -4.116900256696914, 0.8735456225659666, -0.842712533453469, 1.1200739327640843, -0.703797233889045, 3.3491098693459187, 1.1383933429060538, -1.1608021413621255],
[-0.0272945986039962, 1.3810803094898392, -0.3000751044667501, 0.530598483693932, -0.25230337237162953, 1.227322205409595, 0.7475404385595492, -4.708759516668004, 1.5170799948290143, -1.309427991379729, 0.13045771401578515, -1.2421270434590852, 5.141812566546993, 1.7478932634716013, -1.230678486397662],
[-1.5471106279095554, -2.524731157065115, 1.0015792402542971, -3.649008251507766, -0.43193380458921354, -3.64779032623984, -1.2585955585366164, 7.075627752142407, -2.3434697661076553, -0.17324616725164094, 0.012324380796953634, 0.1201495802730507, -6.468182569926108, -1.0450745719122267, 3.1541002784637886],
[0.5316498085997584, 1.8187154828158774, 0.6800840386512677, 3.154341773471645, -0.633596948312113, 2.770528037922082, 0.22043514814321089, -7.246507554283216, 1.3361606503168058, -1.8011391721619912, -0.7156002807301286, -0.37783520885870486, 6.373115811402003, 0.22971478266471973, -2.857966397739584]])
syn1 = np.array([[5.177044095570317],
[6.5898220063556],
[-20.881638524287233],
[8.880383432994854],
[-14.676726398416983],
[9.192745916291782],
[5.80497325212264],
[-16.424434027307676],
[6.820380663953862],
[-9.664844259044122],
[-17.73177812938899],
[-11.809681114121691],
[14.747050641950713],
[6.009983025197835],
[-9.571035518824162]])
mask = | np.zeros(img.shape[:2], np.uint8) | numpy.zeros |
from __future__ import division
import numpy as np
from data_tools import get_inputFileParameter
import matplotlib
matplotlib.use('PDF') # or Agg (for png), SVG, PS
import matplotlib.pyplot as plt
from data_tools import commentHdr
#--------------------------------------------------------------------------------------------
def uvel(DI, profName="uvel") :
mfile = DI['pdir']+"means_" + profName + ".dat"
sfile = DI['pdir']+"sig_" + profName + ".dat"
data = np.loadtxt(mfile, comments=commentHdr)
y = data[:,1:]
data = | np.loadtxt(sfile, comments=commentHdr) | numpy.loadtxt |
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_validate
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression
from sklearn.linear_model import ElasticNet, ElasticNetCV, enet_path
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import auc, roc_curve
from utils import kernel
from mics import classifier_mics
'''
函数名尽量保持了和scikit-learn相同的函数名,便于理解函数的作用
没有写留一法实现的函数,如果要用留一法直接在K折交叉验证参数中将折数设置为样本个数即实现了留一法(scikit-learn官方文件推荐)
不推荐在网格搜索法中使用留一法,当待选参数较多时会让模型开销极大
'''
class lasso():
'''LASSO特征选择的方法集锦,直接在class中选择是否进行交叉验证
输入:
X_train, X_test, y_train, y_test: 训练集和测试集的特征与标签
feature_name: 特征名称,顺序和X的列必须对应
path: 记录文件的存储路径,自行定义
cv_val:布尔型,是否进行网格搜索交叉验证
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, path, cv_val=True):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def lasso(self, alpha, cv):
'''使用LASSO进行特征选择,只进行一次,选择特征系数不为0的特征作为结果
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 参数alpha
cv: int, 如果进行交叉验证,cv的折数
输出:
best_alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 选择的训练集特征矩阵
new_test_feature: 选择后的测试集特征矩阵
new_feature_name: 选择后的特征名称
feature_weight: 选择后特征对应的系数
'''
if self.cv_val is True:
model_lasso = LassoCV(alphas=alpha, cv=cv)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = model_lasso.alpha_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
model_lasso = Lasso(alpha=alpha)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def lasso_shuffle(self, shuffle_time, alpha_range, cv=10):
'''通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数
输入:
shuffle_time: 进行shuffle循环的次数
alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list
cv: 如果进行交叉验证的话,折数
输出:
new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取)
new_test_feature: 特征选择后的测试集特征
select_feature_name: 选择出来的特征名
select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数
feature_weight: 对应特征名,每个特征的系数
select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取
'''
# 将返回的值存入txt文件中
lasso_txt = open(os.path.join(self.path, 'lasso_shuffle.txt'), 'w')
lasso_txt.write('LASSO parameters set:\n')
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('Grid search: % s' % self.cv_val)
lasso_txt.write('\nAlpha range: % s' % alpha_range)
lasso_txt.write('\nShuffle time: % s' % shuffle_time)
lasso_txt.write('\nGrid search cv-fold: % s' % cv)
lasso_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_lasso = LassoCV(alphas=alpha_range, cv=cv)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_lasso.alpha_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_lasso = Lasso(alpha=alpha_range)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
def logis_lasso(self, alpha, cv):
'''使用logistic LASSO进行特征选择,可以选择是否使用交叉验证选择惩罚参数alpha
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 惩罚参数,这里因为是LASSO所以就相当于是alpha
cv:如果进行交叉验证,次数
输出:
best alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 训练集特征选择后的特征矩阵
new_train_feature: 测试集特征选择后的特征矩阵
new_feature_name: 特征选择后的特征名称
feature_weight: 选择后每个特征对应的权重
'''
if self.cv_val is True:
logis_lasso = LogisticRegressionCV(Cs=alpha, cv=cv, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(np.ravel(logis_lasso.coef_))
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = logis_lasso.Cs_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
logis_lasso = LogisticRegression(C=alpha, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(logis_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def logis_lasso_shuffle(self, alpha_range, shuffle_time=100, cv=10):
'''使用logistic lasso进行特征选择,通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数
输入:
shuffle_time: 进行shuffle循环的次数
alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list
cv: 如果进行交叉验证的话,折数
输出:
new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取)
new_test_feature: 特征选择后的测试集特征
select_feature_name: 选择出来的特征名
select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数
feature_weight: 对应特征名,每个特征的系数
select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取
'''
# 将返回的值存入txt文件中
lasso_txt = open(os.path.join(self.path, 'logistic lasso_shuffle.txt'), 'w')
lasso_txt.write('LASSO parameters set:\n')
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('Grid search: % s' % self.cv_val)
lasso_txt.write('\nAlpha range: % s' % alpha_range)
lasso_txt.write('\nShuffle time: % s' % shuffle_time)
lasso_txt.write('\nGrid search cv-fold: % s' % cv)
lasso_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_lasso = LogisticRegressionCV(Cs=alpha_range, cv=cv, penalty='l1')
model_lasso.fit(X, y)
coef = pd.Series(np.ravel(model_lasso.coef_))
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_lasso.Cs_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_lasso = LogisticRegression(C=alpha_range, penalty='l1')
model_lasso.fit(X, y)
coef = pd.Series(np.ravel(model_lasso.coef_))
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
class elastic_net():
'''elastic net用于特征选择,可以选择组特征
输入:
X_train: 输入的训练集特征矩阵
X_test: 输入的测试集特征矩阵
y_train: 输入的训练集标签
y_test: 输入的测试集标签
feature_name: 特征矩阵对应的特征名
cv_val:布尔型,是否进行网格搜索交叉验证
path: 结果存储的路径
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, cv_val, path):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def elastic_net(self, l1, alphas, cv):
if self.cv_val is True:
elas = ElasticNetCV(l1_ratio=l1, alphas=alphas, cv=cv)
elas.fit(self.X_train, self.y_train)
coef = pd.Series(elas.coef_)
print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'ElasticNetCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = elas.alpha_
best_l1_ratio = elas.l1_ratio_
best_coef = elas.coef_
best_alphas = elas.alphas_
best_mse_path = elas.mse_path_
print('-----------------------------')
print('Best Elastic Net alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(elas, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# print(X_new_test.shape)
# print(model.get_support())
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
elas = ElasticNet(l1_ratio=l1, alpha=alphas)
elas.fit(self.X_train, self.y_train)
coef = pd.Series(elas.coef_)
print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'ElasticNetCV')
os.makedirs(img_path, exist_ok=True)
coef1 = elas.coef_
sparse = elas.sparse_coef_
# 将elas中权重不为0的特征选择出来
model = SelectFromModel(elas, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def elasticnet_shuffle(self, l1_range, alphas_range, shuffle_time=100, cv=10, freq_seq=False):
'''通过多次shuffle循环来求特征的权重,最后通过每次循环被筛选特征出现的频率来选择
输入:
freq_seq: 是否根据每个特征出现的频率对特征排序,False使用原始特征顺序,只是抽调部分特征
'''
# 将返回的值存入txt文件中
elas_txt = open(os.path.join(self.path, 'elastic net_shuffle.txt'), 'w')
elas_txt.write('Elastic Net parameters set:\n')
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('Grid search: % s' % self.cv_val)
elas_txt.write('\nL1_ratio range: % s' % l1_range)
elas_txt.write('\nAlpha range: % s' % alphas_range)
elas_txt.write('\nShuffle time: % s' % shuffle_time)
elas_txt.write('\nGrid search cv-fold: % s' % cv)
elas_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_elas = ElasticNetCV(l1_ratio=l1_range, alphas=alphas_range, cv=cv)
model_elas.fit(X, y)
coef = pd.Series(model_elas.coef_)
print("% s th shuffle, Elastic net picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_elas.alpha_
l1_ratio = model_elas.l1_ratio_
alphas = model_elas.alphas_
mse_path = model_elas.mse_path_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_elas.coef_
# 提取非零特征的mask
model = SelectFromModel(model_elas, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值,这里的平均值只是为了返回每个特征的权重均值,特征选择过程中不使用
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
# 如果freq_seq为True,那么按照特征出现的频率为他们排序,否则按照原始顺序
if freq_seq is True:
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
elif freq_seq is False:
for k in feature_freq:
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
elas_txt.write('\nSelected feature index:\n')
elas_txt.write(str(select_feature_index))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature weight: \n')
elas_txt.write(str(feature_weight))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature name:\n')
elas_txt.write(str(select_feature_name))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature appearance frequency:\n')
elas_txt.write(str(select_feature_name_freq))
elas_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_elas = ElasticNet(l1_ratio=l1_range, alpha=alphas_range)
model_elas.fit(X, y)
coef = pd.Series(model_elas.coef_)
print("% s th shuffle, Elastic net picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 绘制elastic net的路径
# from itertools import cycle
# alphas_enet, coefs_enet, _ = enet_path(X, y, eps=5e-3, l1_ratio=l1_range,
# fit_intercept=False)
# plt.figure(1)
# colors = cycle(['b', 'r', 'g', 'c', 'k'])
# neg_log_alphas_enet = -np.log10(alphas_enet)
# for coef_e in coefs_enet:
# l2 = plt.plot(neg_log_alphas_enet, coef_e)
#
# plt.xlabel('-Log(alpha)')
# plt.ylabel('coefficients')
# plt.xlim((0, 2.2))
# plt.ylim((-0.1, 0.1))
# plt.axis('tight')
# plt.show()
# 将每一次循环的coef都进行相加
coef_sum += model_elas.coef_
# 提取非零特征的mask
model = SelectFromModel(model_elas, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
# 如果freq_seq为True,那么按照特征出现的频率为他们排序,否则按照原始顺序
if freq_seq is True:
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
elif freq_seq is False:
for k in feature_freq:
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
elas_txt.write('\nSelected feature index:\n')
elas_txt.write(str(select_feature_index))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature weight: \n')
elas_txt.write(str(feature_weight))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature name:\n')
elas_txt.write(str(select_feature_name))
elas_txt.write('\n---------------------------------------------\n')
elas_txt.write('\nSelected feature appearance frequency:\n')
elas_txt.write(str(select_feature_name_freq))
elas_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
class SVM():
'''支持向量机进行分类的方法集锦,包括普通SVM, shuffle_SVM, nested SVM
输入:
X: 输入的特征矩阵
y: 特征矩阵对应的标签
path: 结果存储的路径
属性:
weight: 每个特征的SVM权重,因此长度和特征数量相同,list形式(注意该属性只有在核函数为linear时才有效)
'''
# 初始化类属性SVM特征权重
weight = 0
def __init__(self, X, y, path):
self.X = X
self.y = y
self.path = path
def svm_only(self, kernel='linear', ratio=0.5, gamma=0.1, C=10, cv=3, gridsearch=True):
'''进行单次SVM,可以选择使用网格搜索法寻找最优参数
输入:
kernel: 核函数选择
ratio: 训练集和测试集的比例,默认为0.5
gamma: 超参数gamma(RBF专用),如果选择网格搜索法应该使用list,如果不使用参数搜索为int
C: 超参数C,如果选择了网格搜索法应该使用list,如果不使用参数搜索为int
cv: 交叉验证的次数,如果进行交叉验证网格搜索法,交叉验证的折数
gridsearch: 布尔型,是否使用网格搜索法寻找SVM最佳超参数
输出:
best_para: dict型,如果进行网格搜索法,得到的最佳参数
pred_train: 训练集预测结果
y_score_train: 训练集预测结果的概率
pred_test: 测试集预测结果
y_score_test: 测试集预测结果的概率
'''
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=ratio, stratify=self.y)
if gridsearch is True:
svm = SVC(kernel=kernel, gamma=gamma, C=C, probability=True)
para = {
'gamma': gamma,
'C': C,
}
grid = GridSearchCV(svm, para, n_jobs=1, verbose=1, scoring='accuracy', cv=cv)
grid.fit(X_train, y_train)
pred_train = grid.predict(X_train)
pred_test = grid.predict(X_test)
y_score_train = grid.predict_proba(X_train)
y_score_test = grid.predict_proba(X_test)
best_para = grid.best_params_
# 输出SVM最佳参数
print('SVM CV Best score: %0.3f' % grid.best_score_)
print('SVM CV Best parameters set:')
print('-------------------------------------------')
for param_name in sorted(best_para.keys()):
print('\t%s: %r' % (param_name, best_para[param_name]))
# 将SVM的特征权重存储在类属性weight中
if kernel == 'linear':
weight = svm.coef_
else:
print('SVM coefficient is only available when using linear kernel function.')
else:
svm = SVC(kernel=kernel, gamma=gamma, C=C, probability=True)
svm.fit(X_train, y_train)
pred_train = svm.predict(X_train)
pred_test = svm.predict(X_test)
y_score_train = svm.predict_proba(X_train)
y_score_test = svm.predict_proba(X_test)
best_para = {'gamma': gamma, 'C': C}
# 将SVM的特征权重存储在类属性weight中
if kernel == 'linear':
weight = svm.coef_
else:
print('SVM coefficient is only available when using linear kernel function.')
return pred_train, y_score_train, pred_test, y_score_test, best_para
def svm_shuffle(self, outer, para, svm_metrics, shuffle_time=100):
'''SVM不进行超参数网格搜索,直接指定超参数,然后使用该模型对数据进行多次shuffle,最后取平均结果
该函数中绘制ROC的方法中,fpr不是真正的fpr,而是自定义的等差数列,然后根据真实tpr和fpr的趋势来进行插值,获得tpr
这样绘制的ROC是对的,不过最终的AUC计算是根据插值后的ROC计算的,和真实的AUC有微小误差,不过无妨
输入:
outer: 每一次shuffle进行交叉验证时,交叉验证的折数
para: dict型,SVM的参数,包括:
kernel: 目前仅支持linear, rbf
C: 惩罚参数,linear和rbf都有
gamma: 如果使用rbf核函数,则有这个参数
svm_metrics: list型,SVM输出结果后需要计算的指标,目前支持accuracy, precision, recall, f1, sensitivity, specificity
必须写着完整名,不能用缩写
shuffle_time: 进行shuffle的次数
输出:
train_means: dict型,键值对应于svm_metrics定义的指标,对应的值为list型,具体是shuffle_time次数的训练集中对应指标
在交叉验证过程中的平均值,该dict返回后可以通过mean来求总体的平均值。
train_std: 和上面的dict类似,不同的是计算的是所有shuffle的标准差而不是均值
test_means: 和上面的dict类似,不用的是计算的是测试集中的均值
test_std: 和上面的dict类似,计算的是测试集中的标准差
roc_dict: dict型,返回的是与绘制ROC相关的list,包含:
tpr_train: list,训练集中每次shuffle的tpr交叉验证平均值
tpr_test: 测试集中每次shuffle的tpr交叉验证平均值
tpr_list_train: 二维list,训练集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
tpr_list_test: 二维list,测试集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
fpr_train: list, 训练集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
fpr_test: list, 测试集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
auc_list_train: list, 记录了训练集每次shuffle计算得到的AUC
auc_list_test: list, 记录了测试集每次shuffle计算得到的AUC
auc_train: float, 训练集上所有shuffle的AUC的平均值
auc_test: float, 测试集上所有shuffle的AUC的平均值
前四个dict主要是为了绘制shuffle和每种指标的关系图,mean用于绘制指标的曲线,std可以绘制标准差的变化区域
roc_dict真正实用的是tpr_train, tpr_test, fpr_train, fpr_test,这四个list再各自做平均后就可以获取绘制ROC的所有参数,
auc_list可以绘制shuffle和AUC的曲线图,其他的值用处不大,仅仅以防万一要用
'''
from mics import classifier_mics
# 初始化SVM权重为0
svm_weight = 0
svm_weight_cv = 0
# 将svm参数写入txt文档
svm_shuffle_path = os.path.join(self.path, 'svm_shuffle')
os.makedirs(svm_shuffle_path, exist_ok=True)
svm_txt = open(os.path.join(self.path, 'svm_shuffle_result.txt'), 'w')
svm_txt.write('Support Vector Machine Shuffle parameters set:\n')
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('Kernel type: % s' % para['kernel'])
svm_txt.write('\nC value: % s' % para['C'])
if para['kernel'] == 'rbf':
svm_txt.write('\nGamma value: % s' % para['gamma'])
svm_txt.write('\nShuffle time: % s' % shuffle_time)
svm_txt.write('\nCross validation-fold: % s' % outer)
svm_txt.write('\nsvm metrics: % s\n' % svm_metrics)
svm_txt.write('\n---------------------------------------------\n')
# 传入svm_metrics中的每个指标都初始化空的train和test的均值和方差list
metrics_num = len(svm_metrics)
for name in svm_metrics:
exec('train_{}_means = []'.format(name))
exec('train_{}_std = []'.format(name))
exec('test_{}_means = []'.format(name))
exec('test_{}_std = []'.format(name))
shuffle_path = os.path.join(self.path, 'svm', 'shuffle')
os.makedirs(shuffle_path, exist_ok=True)
# 直接将fpr定义为等差数列
meanfpr_outer_train = np.linspace(0, 1, 100)
meanfpr_outer_test = np.linspace(0, 1, 100)
# 将tpr和auc定义为空,最终tpr_outer_test和meanfpr_outer_test的长度相同,auc和shuffle的次数相同
tpr_outer_train = []
auc_list_train = []
tpr_outer_test = []
auc_list_test = []
for i in range(shuffle_time):
# 外嵌套每一折的分配方法
outer_cv = StratifiedKFold(n_splits=outer, shuffle=True, random_state=i)
# 根据svm模型的核函数来选择具体模型形式
if para['kernel'] == 'rbf':
svm = SVC(kernel=para['kernel'], C=para['C'], gamma=para['gamma'], probability=True)
elif para['kernel'] == 'linear':
svm = SVC(kernel=para['kernel'], C=para['C'], probability=True)
# 内循环,计算每次内循环的平均tpr
tpr_inner_train = []
tpr_inner_test = []
# 每一折的四大指标进行初始化,只初始化svm_metrics中要求给的
for name in svm_metrics:
exec('{}_inner_train = []'.format(name))
exec('{}_inner_test = []'.format(name))
for train, test in outer_cv.split(self.X, self.y):
svm.fit(self.X[train], self.y[train])
# 求SVM的输出结果
pred_train = svm.predict(self.X[train])
pred_test = svm.predict(self.X[test])
prob_train = svm.predict_proba(self.X[train])
prob_test = svm.predict_proba(self.X[test])
# 如果使用的SVM核函数是linear则将权重进行累加
if para['kernel'] == 'linear':
svm_weight_cv += np.ravel(svm.coef_)
else:
print('SVM coefficient is only available when using linear kernel function.')
# 计算四大指标
mics = classifier_mics(self.y[train], pred_train, prob_train,
self.y[test], pred_test, prob_test, 'svm_shuffle_result')
accuracy_train, precision_train, recall_train, f1_train = mics.mics_sum_train()
accuracy_test, precision_test, recall_test, f1_test = mics.mics_sum_test()
sensitivity_train, sensitivity_test = mics.sensitivity()
specificity_train, specificity_test = mics.specificity()
# 虽然四大指标都算了,但是只向list中添加svm_metrics中要求给的
for name in svm_metrics:
exec('{}_inner_train.append({}_train)'.format(name, name))
exec('{}_inner_test.append({}_test)'.format(name, name))
# 计算fpr和tpr
fpr_train, tpr_train, thres_train = roc_curve(self.y[train], prob_train[:, 1])
fpr_test, tpr_test, thres_test = roc_curve(self.y[test], prob_test[:, 1])
# 根据meanfpr_outer_test的长度,通过fpr和tpr的范围进行插值
tpr_inner_train.append(np.interp(meanfpr_outer_train, fpr_train, tpr_train))
tpr_inner_test.append(np.interp(meanfpr_outer_test, fpr_test, tpr_test))
tpr_inner_train[-1][0] = 0.0
tpr_inner_test[-1][0] = 0.0
# 计算每一次shuffle交叉验证的SVM权重平均值
svm_weight_cv /= outer
# 将每一次shuffle的权重值相加
svm_weight += svm_weight_cv
# 计算每次shuffle时,每折tpr的平均值作为该次shuffle的tpr
meantpr_inner_train = np.mean(tpr_inner_train, axis=0)
meantpr_inner_test = np.mean(tpr_inner_test, axis=0)
meantpr_inner_train[-1] = 1.0
meantpr_inner_test[-1] = 1.0
# 计算每次shuffle的auc并存储在zuc_list中
mean_auc_train = auc(meanfpr_outer_train, meantpr_inner_train)
mean_auc_test = auc(meanfpr_outer_test, meantpr_inner_test)
auc_list_train.append(mean_auc_train)
auc_list_test.append(mean_auc_test)
# 计算完auc之后,将每一次shuffle的tpr放进tpr_outer_test中
tpr_outer_train.append(meantpr_inner_train)
tpr_outer_test.append(meantpr_inner_test)
# 将外层嵌套循环的每种指标存储在list中
for name in svm_metrics:
# 存储训练过程中交叉验证每个指标的平均值
exec('{}_inner_train = np.array({}_inner_train)'.format(name, name))
exec("train_{}_means.append({}_inner_train.mean())".format(name, name))
# 存储训练过程中交叉验证每个指标的标准差
exec("train_{}_std.append({}_inner_train.std())".format(name, name))
# 存储测试过程中交叉验证每个指标的平均值
exec('{}_inner_test = np.array({}_inner_test)'.format(name, name))
exec("test_{}_means.append({}_inner_test.mean())".format(name, name))
# 存储测试过程中交叉验证每个指标的标准差
exec("test_{}_std.append({}_inner_test.std())".format(name, name))
meantpr_outer_train = np.mean(tpr_outer_train, axis=0)
meantpr_outer_test = np.mean(tpr_outer_test, axis=0)
final_auc_train = auc(meanfpr_outer_train, meantpr_outer_train)
final_auc_test = auc(meanfpr_outer_test, meantpr_outer_test)
# 计算所有shuffle后的SVM权重平均值,并将该平均值赋给类属性weight
svm_weight /= shuffle_time
SVM.weight = svm_weight
# 为了简洁,将绘制ROC曲线有关的变量用一个dict来表示
roc_dict = {}
roc_dict['tpr_train'] = meantpr_outer_train
roc_dict['tpr_test'] = meantpr_outer_test
roc_dict['tpr_list_train'] = tpr_outer_train
roc_dict['tpr_list_test'] = tpr_outer_test
roc_dict['fpr_train'] = meanfpr_outer_train
roc_dict['fpr_test'] = meanfpr_outer_test
roc_dict['auc_list_train'] = auc_list_train
roc_dict['auc_list_test'] = auc_list_test
roc_dict['auc_train'] = final_auc_train
roc_dict['auc_test'] = final_auc_test
# 为了简洁,将训练、测试过程中的指标平均值和标准差以字典形式存储,再返回
train_means = {}
train_std = {}
test_means = {}
test_std = {}
for name in svm_metrics:
exec("train_means['{}'] = train_{}_means".format(name, name))
exec("train_std['{}'] = train_{}_std".format(name, name))
exec("test_means['{}'] = test_{}_means".format(name, name))
exec("test_std['{}'] = test_{}_std".format(name, name))
# 将输出存在txt文件中
for name in svm_metrics:
svm_txt.write('\n---------------------------------------------\n')
exec("svm_txt.write('Train set {} mean value: % s' % np.mean(train_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Train set {} max value: % s' % np.max(train_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Train set {} min value: % s' % np.min(train_means['{}']))".format(name, name))
svm_txt.write('\n---------------------------------------------\n')
exec("svm_txt.write('Test set {} mean value: % s' % np.mean(test_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Test set {} max value: % s' % np.max(test_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Test set {} min value: % s' % np.min(test_means['{}']))".format(name, name))
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('\nTrain set AUC mean value: % s' % np.mean(roc_dict['auc_list_train']))
svm_txt.write('\nTrain set AUC max value: % s' % np.max(roc_dict['auc_list_train']))
svm_txt.write('\nTrain set AUC min value: % s' % np.min(roc_dict['auc_list_train']))
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('\nTest set AUC mean value: % s' % np.mean(roc_dict['auc_list_test']))
svm_txt.write('\nTest set AUC max value: % s' % np.max(roc_dict['auc_list_test']))
svm_txt.write('\nTest set AUC min value: % s' % np.min(roc_dict['auc_list_test']))
# 存储SVM权重值
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('\nSVM weight: % s' % svm_weight)
return train_means, train_std, test_means, test_std, roc_dict
def svm_nested(self, para, svm_metrics, shuffle_time=100, inner=5, outer=10, log=True):
'''SVM内外嵌套交叉验证法,然后使用该模型对数据进行多次shuffle,最后取平均结果
该函数中绘制ROC的方法中,fpr不是真正的fpr,而是自定义的等差数列,然后根据真实tpr和fpr的趋势来进行插值,获得tpr
这样绘制的ROC是对的,不过最终的AUC计算是根据插值后的ROC计算的,和真实的AUC有微小误差,不过无妨
输入:
outer: 每一次shuffle进行交叉验证时,交叉验证的折数
inner: 内部网格搜索法交叉验证时,交叉验证的折数
para: dict型,SVM的参数,包括:
kernel: 目前仅支持linear, rbf
C: 惩罚参数,linear和rbf都有
gamma: 如果使用rbf核函数,则有这个参数
svm_metrics: list型,SVM输出结果后需要计算的指标,目前支持accuracy, precision, recall, f1, sensitivity, specificity。
必须写着完整名,不能用缩写
shuffle_time: 进行shuffle的次数
log: bool型,是否将网格搜索法每一折的相信信息存入文件中
输出:
train_means: dict型,键值对应于svm_metrics定义的指标,对应的值为list型,具体是shuffle_time次数的训练集中对应指标
在交叉验证过程中的平均值,该dict返回后可以通过mean来求总体的平均值。
train_std: 和上面的dict类似,不同的是计算的是所有shuffle的标准差而不是均值
test_means: 和上面的dict类似,不用的是计算的是测试集中的均值
test_std: 和上面的dict类似,计算的是测试集中的标准差
roc_dict: dict型,返回的是与绘制ROC相关的list,包含:
tpr_train: list,训练集中每次shuffle的tpr交叉验证平均值
tpr_test: 测试集中每次shuffle的tpr交叉验证平均值
tpr_list_train: 二维list,训练集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
tpr_list_test: 二维list,测试集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
fpr_train: list, 训练集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
fpr_test: list, 测试集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
auc_list_train: list, 记录了训练集每次shuffle计算得到的AUC
auc_list_test: list, 记录了测试集每次shuffle计算得到的AUC
auc_train: float, 训练集上所有shuffle的AUC的平均值
auc_test: float, 测试集上所有shuffle的AUC的平均值
前四个dict主要是为了绘制shuffle和每种指标的关系图,mean用于绘制指标的曲线,std可以绘制标准差的变化区域
roc_dict真正实用的是tpr_train, tpr_test, fpr_train, fpr_test,这四个list再各自做平均后就可以获取绘制ROC的所有参数,
auc_list可以绘制shuffle和AUC的曲线图,其他的值用处不大,仅仅以防万一要用
'''
from mics import classifier_mics
# 将svm参数写入txt文档
svm_shuffle_path = os.path.join(self.path, 'svm_nested')
os.makedirs(svm_shuffle_path, exist_ok=True)
svm_txt = open(os.path.join(self.path, 'svm_nested_result.txt'), 'w')
svm_txt.write('Nested Support Vector Machine parameters set:\n')
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('Kernel type: % s' % para['kernel'])
svm_txt.write('\nC value: % s' % para['C'])
if para['kernel'] == 'rbf':
svm_txt.write('\nGamma value: % s' % para['gamma'])
svm_txt.write('\nShuffle time: % s' % shuffle_time)
svm_txt.write('\nGrid Search Cross validation-fold: % s' % inner)
svm_txt.write('\nCross validation-fold: % s' % outer)
svm_txt.write('\nsvm metrics: % s\n' % svm_metrics)
svm_txt.write('\n---------------------------------------------\n')
# 传入scoring中的每个指标都初始化空的train和test的均值和方差list
metrics_num = len(svm_metrics)
for name in svm_metrics:
exec('train_{}_means = []'.format(name))
exec('train_{}_std = []'.format(name))
exec('test_{}_means = []'.format(name))
exec('test_{}_std = []'.format(name))
shuffle_path = os.path.join(self.path, 'svm', 'nest_cv')
os.makedirs(shuffle_path, exist_ok=True)
# 直接将fpr定义为等差数列
meanfpr_outer_train = np.linspace(0, 1, 100)
meanfpr_outer_test = np.linspace(0, 1, 100)
# 将tpr和auc定义为空,最终tpr_outer_test和meanfpr_outer_test的长度相同,auc和shuffle的次数相同
tpr_outer_train = []
auc_list_train = []
tpr_outer_test = []
auc_list_test = []
for i in range(shuffle_time):
# 内嵌套和外嵌套每一折的分配方法
inner_cv = StratifiedKFold(n_splits=inner, shuffle=True, random_state=i)
outer_cv = StratifiedKFold(n_splits=outer, shuffle=True, random_state=i)
# 内循环,计算每次内循环的平均tpr
tpr_inner_train = []
tpr_inner_test = []
# 每一折的四大指标进行初始化,只初始化svm_metrics中要求给的
for name in svm_metrics:
exec('{}_inner_train = []'.format(name))
exec('{}_inner_test = []'.format(name))
for train, test in outer_cv.split(self.X, self.y):
svm = SVC(probability=True)
# 网格搜索法
grid = GridSearchCV(svm, para, scoring='accuracy', cv=inner_cv, refit=True,
return_train_score=True)
grid.fit(self.X[train], self.y[train])
# 返回最佳参数
best_para = grid.best_params_
# 如果需要记录网格法每一折的结果,则记录以下指标
if log is True:
test_means_log = grid.cv_results_['mean_test_score']
test_std_log = grid.cv_results_['std_test_score']
train_means_log = grid.cv_results_['mean_train_score']
train_std_log = grid.cv_results_['std_train_score']
cv_para_log = grid.cv_results_['params']
# 将每一折的结果记录写入文件中
logfile_name = os.path.join(shuffle_path, 'log.txt')
file_log = open(logfile_name, mode='a', encoding='utf-8')
file_log.write('-------------------------------------\n')
file_log.write('cv results:\n')
file_log.write('best parameters: %s\n' % cv_para_log)
file_log.write('mean test score: %s\n' % test_means_log)
file_log.write('std_test_score: %s\n' % test_std_log)
file_log.write('mean_train_score: %s\n' % train_means_log)
file_log.write('train_std_log: %s\n' % train_std_log)
# 输出SVM最佳参数
print('SVM CV Best score: %0.3f' % grid.best_score_)
print('SVM CV Best parameters set:')
print('-------------------------------------------')
for param_name in sorted(best_para.keys()):
print('\t%s: %r' % (param_name, best_para[param_name]))
# 将每一次shuffle的结果写入文件
logfile_name = os.path.join(shuffle_path, 'log.txt')
file_log = open(logfile_name, mode='a', encoding='utf-8')
file_log.write('------------------------------------\n')
file_log.write('%s th shuffle results: \n' % i)
file_log.write('best parameters: %s\n' % best_para)
# 求SVM的输出结果
pred_train = grid.predict(self.X[train])
pred_test = grid.predict(self.X[test])
prob_train = grid.predict_proba(self.X[train])
prob_test = grid.predict_proba(self.X[test])
# 计算四大指标
mics = classifier_mics(self.y[train], pred_train, prob_train,
self.y[test], pred_test, prob_test, 'svm_nest_result')
accuracy_train, precision_train, recall_train, f1_train = mics.mics_sum_train()
accuracy_test, precision_test, recall_test, f1_test = mics.mics_sum_test()
sensitivity_train, sensitivity_test = mics.sensitivity()
specificity_train, specificity_test = mics.specificity()
# 虽然四大指标都算了,但是只向list中添加svm_metrics中要求给的
for name in svm_metrics:
exec('{}_inner_train.append({}_train)'.format(name, name))
exec('{}_inner_test.append({}_test)'.format(name, name))
# 计算fpr和tpr
fpr_train, tpr_train, thres_train = roc_curve(self.y[train], prob_train[:, 1])
fpr_test, tpr_test, thres_test = roc_curve(self.y[test], prob_test[:, 1])
# 根据meanfpr_outer_test的长度,通过fpr和tpr的范围进行插值
tpr_inner_train.append(np.interp(meanfpr_outer_train, fpr_train, tpr_train))
tpr_inner_test.append( | np.interp(meanfpr_outer_test, fpr_test, tpr_test) | numpy.interp |
"""
__author__: HashTagML
license: MIT
Created: Thursday, 8th April 2021
"""
import collections
import copy
import os
import random
import textwrap
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union, Any
import bounding_box.bounding_box as bb
import matplotlib.pyplot as plt
import mediapy as media
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from PIL import Image, ImageDraw, ImageOps
from .config import COLORS, IMAGE_BORDER, IMAGE_EXT
def check_num_imgs(images_dir: Union[str, os.PathLike]) -> int:
"""Checks number of images in given directory"""
images_dir = Path(images_dir)
file_counts = collections.Counter(p.suffix for p in images_dir.iterdir())
return sum([file_counts.get(ext, 0) for ext in IMAGE_EXT])
def check_df_cols(df_cols: List, req_cols: List) -> bool:
"""Verifies whether input dataframe contains required columns or not.
Args:
df_cols (List): List of columns in the input dataframe.
req_cols (List): List of required columns.
Returns:
bool: ``True`` if all required columns are present, otherwise ``False``.
"""
for r_col in req_cols:
if r_col not in df_cols:
return False
return True
class Resizer(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is matched to output_size. If int, smaller
of image edges is matched to output_size keeping aspect ratio the same.
"""
def __init__(self, expected_size: Tuple = (512, 512)):
assert isinstance(expected_size, tuple)
self.expected_size = expected_size
def __call__(self, sample):
img_path, anns = sample["image_path"], sample["anns"]
img = self._get_resized_img(img_path)
bboxes = self._regress_boxes(anns)
return img, bboxes
def _set_letterbox_dims(self):
""" Get letterbox resize dimensions of the images."""
iw, ih = self.orig_dim
ew, eh = self.expected_size
scale = min(eh / ih, ew / iw)
nh = int(ih * scale)
nw = int(iw * scale)
self.new_dim = (nw, nh)
offset_x, offset_y = (ew - nw) // 2, (eh - nh) // 2
self.offset = (offset_x, offset_y)
upsample_x, upsample_y = iw / nw, ih / nh
self.upsample = (upsample_x, upsample_y)
def _get_resized_img(self, img_path: str):
"""Resizes the image."""
img = Image.open(img_path)
self.orig_dim = img.size
self._set_letterbox_dims()
img = img.resize(self.new_dim)
new_img = Image.new("RGB", self.expected_size, color=(255, 255, 255))
new_img.paste(img, self.offset)
return new_img
def _regress_boxes(self, bboxes: np.ndarray):
"""Regress the bounding boxes based on resize."""
if not len(bboxes):
return []
if not hasattr(bboxes, "ndim"):
bboxes = np.array(bboxes)
# bboxes[:, 2] += bboxes[:, 0]
# bboxes[:, 3] += bboxes[:, 1]
bboxes[:, 0] = bboxes[:, 0] / self.upsample[0]
bboxes[:, 1] = bboxes[:, 1] / self.upsample[1]
bboxes[:, 2] = bboxes[:, 2] / self.upsample[0]
bboxes[:, 3] = bboxes[:, 3] / self.upsample[1]
bboxes[:, 0] += self.offset[0]
bboxes[:, 1] += self.offset[1]
bboxes[:, 2] += self.offset[0]
bboxes[:, 3] += self.offset[1]
return bboxes
def plot_boxes(
img: Image,
bboxes: np.ndarray,
scores: Optional[List] = None,
class_map: Optional[Dict] = dict(),
class_color_map: Optional[Dict] = dict(),
**kwargs,
) -> Image:
"""Plots bounding boxes annotations on the images.
Args:
img (Image): Pillow image on which annotations to be drawn.
bboxes (np.ndarray): Bounding boxes of the input image.
scores (Optional[List], optional): Scores incase of simple json format. Defaults to None.
class_map (Optional[Dict], optional): mapping between categories and their ids. Defaults to dict().
class_color_map (Optional[Dict], optional): mapping between categories and their colors. Defaults to dict().
Returns:
Image: PIL images on which annotations are drawn.
"""
draw_img = | np.array(img) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 19:45:18 2019
@author: CHaithcock
"""
import sys
sys.path.insert(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')
import itertools
import numpy as np
import RHGeneratorConstants as gen
import RHState
'''
Terms.
Topological Strip
There are 8 possible ways to arrange cars/trucks in a single row or column.
Type 0: empty (no cars or trucks)
Type 1: C (one car)
Type 2: CC (two cars)
Type 3: CCC (three cars)
Type 4: CT (one car one truck)
Type 5: TC (one truck one car)
Type 6: T (one truck)
Type 7: TT (two trucks)
Board Slot (or simply slot)
In this construction, we consider the rush hour board as consisting
of 6 rows and 6 columns. We oftern refer to the 12 slots without
distinguishing rows and columns. When important to distinguish,
we use the terms row slots and column slots as needed.
Topological Class
The topological class of a state is a 12-dim vector listing for each
row slot and column slot which topologial strip occupies that slot.
Note, every state wtihin a connected component belongs to the same
topological class.
Note, two states from different connected components in the RH Graph
may belong to the same topological class
Consider this pathological example. These two states have the same
arrangment of topological strips, but there is no sequence of moves
that will take us from one state to the other
C C 0 T 0 0 0 0 0 T C C
0 0 0 T 0 0 0 0 0 T 0 0
0 0 0 T C C 0 C C T 0 0
0 0 0 T 0 0 0 0 0 T 0 0
0 0 0 T 0 0 0 0 0 T 0 0
Using the terminology above, we now outline a procedure that given a number of
cars c and trucks t to construct all possible topological classes across all
RH States with c cars and t trucks.
We use combinatorial constructions to generate all possible topological
classes. And then for each topoogical class, we construct all states belonging
to that topoligical class.
1. For a given number of cars c and trucks t, generate all possible 7-dim
vectors <c1,c2,c3,ct,tc,t1,t2> where
c1 + c2 + c3 + ct + tc = c
ct + tc + t1 + t2 = t
c1 is the number of type 1 topological strips
c2 is the numbef of type 2 topological strips
c3 is the numbef of type 3 topological strips
ct is the numbef of type 4 topological strips
tc is the numbef of type 5 topological strips
t1 is the numbef of type 6 topological strips
t2 is the numbef of type 7 topological strips
We will marry permutations of topological strips with subsets of slots.
2. For each vector <c1,c2,c3,ct,tc,t1,t2> in (1), create a bag of
associated topological strips.
For instance for vector <2,1,0,0,1,2,0> correspods to this bag of
topological strips: {C,C,CC,TC,T,T}.
Note, for a given c and t, we can get two vectors in 1 that give rise to two
very different bags in (2). Consider
c = 6 , t = 2
<6,0,0,0,0,2,0> ==> {C,C,C,C,C,C,T,T}
<0,0,2,0,0,0,1> ==> {CCC,CCC,TT}
3. For each bag of topological strips, calculate all subsets of the 12 slots
satisifying
i) each subset contains the exit row which contains the red car
ii) the number of slots equals the number of stips in the bag
4. For each subset in (3) calculate all distinct permutations of arranging
the strips in the bag across the selected subset of slots given that
the exit row is populated with a car. That is, the exit row may not be
populated by a T or TT strip.
Each of these arrangements that pairs a permutation of strips with a subset
of slots defines a topological class.
For each topological class, we construct all possible states belonging
to that class
5. Construct all states for a given topological class.
This procedure is non-combinatorial. Refer to the actual code
of this procedure for clarity on this step.
'''
# Procedure Inputs
c = 8
t = 3
'''
1. For a given number of cars c and trucks t, generate all possible 7-dim
vectors <c1,c2,c3,ct,tc,t1,t2>
Split this constuction into two parts
i) all vectors with tc = ct = 0
ii) all vectors with tc > 0 or tc > 0
'''
# Step 1.i: all 7-dim vectors with tc = tc = 0.
# create lists (1,2,...) from 1 to the most number of strips possible for each
# toplogical strip type except for CT and TC
c1 = np.arange(c//1 + 1) # strips with 1 car
c2 = np.arange(c//2 + 1) # strips with 2 cars
c3 = np.arange(c//3 + 1) # strips with 3 cars
t1 = np.arange(t//1 + 1) # strips with 1 truck
t2 = np.arange(t//2 + 1) # strips with 2 trucks
C = [x for x in itertools.product(c1,c2,c3) if np.dot(x,[1,2,3]) == c]
T = [x for x in itertools.product(t1,t2) if np.dot(x,[1,2]) == t]
stip_counts_no_ct = [ x + (0,) + (0,) + y for x in C for y in T]
# Step 1.ii: all 7-dim vectors with tc > 0 or tc > 0
# must have at least one car and at least one truck to support topological stip
# types CT and TC
if c >= 1 and t >= 1:
# min(c,t) defines the largest number of strips permitted
# pairing a car with a truck
max_ct = min(c,t)
CT = []
TC = []
# Each instance of CT ripples into a recalculation of the C and T lists
for ct in range(1,max_ct + 1):
c1 = np.arange((c-ct)//1 + 1) # remaining strips with 1 car
c2 = | np.arange((c-ct)//2 + 1) | numpy.arange |
"""Wrapper for semi-empirical QM energies with XTB.
"""
__all__ = ["XTBEnergy", "XTBBridge"]
import warnings
import torch
import numpy as np
from .base import _BridgeEnergy, _Bridge
class XTBBridge(_Bridge):
"""Wrapper around XTB for semi-empirical QM energy calculations.
Parameters
----------
numbers : np.ndarray
Atomic numbers
temperature : float
Temperature in Kelvin.
method : str
The semi-empirical method that is used to compute energies.
solvent : str
The solvent. If empty string, perform a vacuum calculation.
verbosity : int
0 (muted), 1 (minimal), 2 (full)
err_handling : str
How to deal with exceptions inside XTB. One of `["ignore", "warning", "error"]`
Attributes
----------
n_atoms : int
The number of atoms in this molecules.
available_solvents : List[str]
The solvent models that are available for computations in xtb.
available_methods : List[str]
The semiempirical methods that are available for computations in xtb.
Examples
--------
Setting up an XTB energy for a small peptide from bgmol
>>> from bgmol.systems import MiniPeptide
>>> from bgflow import XTBEnergy, XTBBridge
>>> import numpy as np
>>> import torch
>>> system = MiniPeptide("G")
>>> numbers = np.array([atom.element.number for atom in system.mdtraj_topology.atoms])
>>> target = XTBEnergy(XTBBridge(numbers=numbers, temperature=300, solvent="water"))
>>> xyz = torch.tensor(system.positions)
>>> energy = target.energy(xyz)
Notes
-----
Requires the xtb-python program (installable with `conda install -c conda-forge xtb-python`).
"""
def __init__(
self,
numbers: np.ndarray,
temperature: float,
method: str = "GFN2-xTB",
solvent: str = "",
verbosity: int = 0,
err_handling: str = "warning"
):
self.numbers = numbers
self.temperature = temperature
self.method = method
self.solvent = solvent
self.verbosity = verbosity
self.err_handling = err_handling
super().__init__()
@property
def n_atoms(self):
return len(self.numbers)
@property
def available_solvents(self):
from xtb.utils import _solvents
return list(_solvents.keys())
@property
def available_methods(self):
from xtb.utils import _methods
return list(_methods.keys())
def _evaluate_single(
self,
positions: torch.Tensor,
evaluate_force=True,
evaluate_energy=True,
):
from xtb.interface import Calculator, XTBException
from xtb.utils import get_method, get_solvent
positions = _nm2bohr(positions)
energy, force = None, None
try:
calc = Calculator(get_method(self.method), self.numbers, positions)
calc.set_solvent(get_solvent(self.solvent))
calc.set_verbosity(self.verbosity)
calc.set_electronic_temperature(self.temperature)
try:
res = calc.singlepoint()
except XTBException:
# Try with higher temperature
calc.set_electronic_temperature(10 * self.temperature)
res = calc.singlepoint()
calc.set_electronic_temperature(self.temperature)
res = calc.singlepoint(res)
if evaluate_energy:
energy = _hartree2kbt(res.get_energy(), self.temperature)
if evaluate_force:
force = _hartree_per_bohr2kbt_per_nm(
-res.get_gradient(),
self.temperature
)
assert not np.isnan(energy)
assert not np.isnan(force).any()
except XTBException as e:
if self.err_handling == "error":
raise e
elif self.err_handling == "warning":
warnings.warn(
f"Caught exception in xtb. "
f"Returning infinite energy and zero force. "
f"Original exception: {e}"
)
force = | np.zeros_like(positions) | numpy.zeros_like |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def common_test(points, my_points, func):
"""
Common test part for KS and Kuiper Test
:param points:
:return:
"""
number_of_bins = 100
values, bins = np.histogram(points, bins=number_of_bins)
my_values = my_points
return func(points, values, bins, my_values)
def box_muller(rand_gen, num_samples):
z1 = []
z2 = []
for _ in range(num_samples):
z1.append(next(rand_gen))
z2.append(next(rand_gen))
z1 = np.asarray(z1)
z2 = | np.asarray(z2) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:08:13 2015
@author: jordan
"""
import os
import sys
import glob
from datetime import datetime, timedelta
import warnings
import numpy as np
from astropy.table import Table, Column, hstack
from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_area
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.modeling import models, fitting
from astroquery.vizier import Vizier
import scipy.odr as odr
from skimage.measure import moments
from matplotlib import pyplot as plt
from photutils import daofind, aperture_photometry, CircularAperture, CircularAnnulus
import emcee
import corner
import pdb
# Add the AstroImage class
from astroimage.astroimage import AstroImage
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201501\\'
# Set the saturation limit for this image (a property of the detector)
satLimit = 12e3
satLimit = 16e3
# satLimit = 60e3
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
polAngDir = os.path.join(polarimetryDir, 'polAngImgs')
if (not os.path.isdir(polAngDir)):
os.mkdir(polAngDir, 0o755)
stokesDir = os.path.join(polarimetryDir, 'stokesImgs')
if (not os.path.isdir(stokesDir)):
os.mkdir(stokesDir, 0o755)
################################################################################
# Import the utility functions to be used here...
from utils_08b import *
# Build a dictionary contaning references to these transformation functions
USNOBtransforms = dict(zip(['V', 'R' , 'V-R'],
[USNOB_V, USNOB_R, USNOB_VR]))
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
fileList = fileIndex['Filename']
# Determine which parts of the fileIndex pertain to science images
useFiles = np.logical_and((fileIndex['Use'] == 1), (fileIndex['Dither'] == 'ABBA'))
# Cull the file index to only include files selected for use
fileIndex = fileIndex[np.where(useFiles)]
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
fileIndexByTarget = fileIndex.group_by(['Target', 'Dither'])
# Use the following data for final calibration
# Bands and zero point flux [in Jy = 10^(-26) W /(m^2 Hz)]
# Following table from Bessl, Castelli & Plez (1998)
# Passband Effective wavelength (microns) Zero point (Jy)
# U 0.366 1790
# B 0.438 4063
# V 0.545 3636
# R 0.641 3064
# I 0.798 2416
# J 1.22 1589
# H 1.63 1021
# K 2.19 640
zeroFlux = dict(zip(['U', 'B', 'V', 'R' , 'I' ],
[1790.0, 4063.0, 3636.0, 3064.0, 2416.0]))
wavelength = dict(zip(['U', 'B', 'V', 'R' , 'I' ],
[0.366, 0.438, 0.545, 0.798, 0.798]))
# Following table from Hu (2011)
# Data from Gaomeigu Observational Station
# Passband | K'(lambda) [mag/airmass] | K'' [mag/(color*airmass)]
# U 0.560 +/- 0.023 0.061 +/- 0.004
# B 0.336 +/- 0.021 0.012 +/- 0.003
# V 0.198 +/- 0.024 -0.015 +/- 0.004
# R 0.142 +/- 0.021 -0.067 +/- 0.005
# I 0.093 +/- 0.020 0.023 +/- 0.006
# Following table from Schmude (1994)
# Data from Texas A & M University Observatory
# Passband | K(lambda) [mag/airmass] | dispersion on K(lambda)
# U 0.60 +/- 0.05 0.120
# B 0.40 +/- 0.06 0.165
# V 0.26 +/- 0.03 0.084
# R 0.19 +/- 0.03 0.068
# I 0.16 +/- 0.02 0.055
kappa = dict(zip(['U', 'B', 'V', 'R' ],
[0.60, 0.40, 0.26, 0.19 ]))
# Loop through each group
groupKeys = fileIndexByTarget.groups.keys
for group in fileIndexByTarget.groups:
# Grab the current target information
thisTarget = str(np.unique(group['Target'].data)[0])
print('\nProcessing images for {0}'.format(thisTarget))
# Look for a photometric star catalog for this target
catFile = os.path.join(stokesDir, thisTarget + '_stars.csv')
if os.path.isfile(catFile):
starCatalog = Table.read(catFile, format='ascii.csv')
else:
print('Could not find catalog file for this target')
print('Please re-run script "08a_selectPhotStars.py"')
continue
# Search for all Stokes Intensity images
Ifile = os.path.join(stokesDir, thisTarget + '*I.fits')
Ifiles = glob.glob(Ifile)
# Search for all the Stokes U images
Ufile = os.path.join(stokesDir, thisTarget + '*U.fits')
Ufiles = glob.glob(Ufile)
# Search for all the Stokes Q images
Qfile = os.path.join(stokesDir, thisTarget + '*Q.fits')
Qfiles = glob.glob(Qfile)
# Read in all the Stokes Images found for this target, and strip the
# waveband from the header of each
stokesIimgs = [AstroImage(file1) for file1 in Ifiles]
waveBands = [img.header['FILTNME3'].strip() for img in stokesIimgs]
# Read in the Stokes U images
stokesUimgs = [AstroImage(file1) for file1 in Ufiles]
# Read in the Stokes Q images
stokesQimgs = [AstroImage(file1) for file1 in Qfiles]
# Compose a dictionary of stokes I, U, and Q images
stokesIdict = dict(zip(waveBands, stokesIimgs))
stokesUdict = dict(zip(waveBands, stokesUimgs))
stokesQdict = dict(zip(waveBands, stokesQimgs))
del stokesIimgs, stokesUimgs, stokesQimgs, waveBands
# Grab the WCS info from the header of the stokes Images
wcsDict = {}
yr2000 = datetime(2000,1,1)
deltaTime = timedelta(0)
for key, img in stokesIdict.items():
wcsDict[key] = WCS(img.header)
thisDate = img.header['DATE'].split('T')[0]
thisDate = datetime.strptime(thisDate, '%Y-%m-%d')
deltaTime += (thisDate - yr2000)
# Divide accumulated time vectors by number of measurements
secPerYr = 365.25*24*60*60
deltaTime = deltaTime.total_seconds()/(float(len(stokesIdict))*secPerYr)
# Form a "catalog" of position entries for matching
ra1 = starCatalog['_RAJ2000'].data.data*u.deg
dec1 = starCatalog['_DEJ2000'].data.data*u.deg
# Propagate proper motions into ra1 and dec1 positions
pmRA = starCatalog['pmRA'].data.data*(1e-3)*u.arcsec
pmDE = starCatalog['pmRA'].data.data*(1e-3)*u.arcsec
ra = ra1 + pmRA*deltaTime
dec = dec1 + pmDE*deltaTime
# Determine PSF properties for each image
# Initalize a 2D gaussian model and fitter
g_init = models.Gaussian2D(amplitude = 2e2,
x_mean = 8.0,
y_mean = 8.0,
x_stddev = 3.0,
y_stddev = 3.0,
theta = 0.0)
fit_g = fitting.LevMarLSQFitter()
#####
#####
# PERHAPS I NEED TO ALIGN THE IMAGES *BEFORE* I PERFORM PHOTOMETRY.
# THAT WAY, THERE ARE NO EXTRA TRANSFORMATIONS APPLIED TO THE IMAGE BETWEEN
# CALIBRATION AND SAVING TO DISK.
#####
#####
# 1) Loop through all the images
# 2) Determine more accurate star pixel positions (store in dictionary)
# 3) Determine star PSF properties (store in dictionary)
PSF_FWHMs = []
xyStarsDict = {}
keepStarsDict = {}
for key, img in stokesIdict.items():
# Convert the stellar celestial coordinates to pixel positions
xStars, yStars = wcsDict[key].wcs_world2pix(ra, dec, 0)
# Grab the image shape
ny, nx = img.arr.shape
# Loop through each star
starFWHMs = []
xStars1 = []
yStars1 = []
keepStars = []
for xs, ys in zip(xStars, yStars):
# Cut out a 16x16 pixel region around this estimated location
x0 = np.int(np.round(xs)) - 8 if np.int(np.round(xs)) - 8 >= 1 else 1
y0 = np.int(np.round(ys)) - 8 if np.int(np.round(ys)) - 8 >= 1 else 1
# Compute upper bounds based on lower bounds
x1 = x0 + 16
y1 = y0 + 16
# Double check that upper bounds don't break the rules either
if x1 > nx - 2:
x1 = nx - 2
x0 = x1 - 16
if y1 > ny - 2:
y1 = ny - 2
y0 = y1 - 16
# Cut out the actual star patch
patch = img.arr[y0:y1, x0:x1]
# Estimate the local sky value
bigPatch = img.arr[y0-1:y1+1, x0-1:x1+1]
padPatch = np.pad(patch, ((1,1), (1,1)), mode='constant')
skyPatch = bigPatch - padPatch
skyPix = (np.abs(skyPatch) > 1e-3)
if np.sum(skyPix) > 0:
skyInds = np.where(skyPix)
skyVals = skyPatch[skyInds]
else:
print('Cannot find sky')
pdb.set_trace()
skyVal = np.median(skyVals)
# Do a centroid estimate to find the star position
m = moments(patch - skyVal, 1)
xcen = (m[1, 0]/m[0, 0]) + x0
ycen = (m[0, 1]/m[0, 0]) + y0
# Re-cut a 16x16 pixel region around this corrected star position
x0 = np.int(np.round(xcen)) - 8 if np.int(np.round(xcen)) - 8 >= 0 else 0
y0 = np.int(np.round(ycen)) - 8 if np.int(np.round(ycen)) - 8 >= 0 else 0
# Compute upper bounds based on lower bounds
x1 = x0 + 16
y1 = y0 + 16
# Double check that upper bounds don't break the rules either
if x1 > nx - 1:
x1 = nx - 1
x0 = x1 - 16
if y1 > ny - 1:
y1 = ny - 1
y0 = y1 - 16
# Cut out the actual star patch
patch = img.arr[y0:y1, x0:x1]
# Redo a centroid estimate to find the star position.
# Use this value to test whether or not the Gaussian fit is good.
m = moments(patch - skyVal, 1)
xcen = (m[1, 0]/m[0, 0])
ycen = (m[0, 1]/m[0, 0])
xcen1 = xcen + x0
ycen1 = ycen + y0
# Fit a Gaussian to the star cutout
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
yy, xx = np.mgrid[0:patch.shape[0], 0:patch.shape[1]]
g_fit = fit_g(g_init, xx, yy, patch - skyVal)
# Test whether the fitted gaussian is close to the epected location
xFit, yFit = (g_fit.x_mean.value, g_fit.y_mean.value)
fitDist = np.sqrt((xcen - xFit)**2 + (ycen - yFit)**2)
# Test for fitting and saturation problems
fitBool = (fitDist < 2.5)
satBool = (patch.max() < satLimit) and (patch.min() > -100)
thisKeepBool = fitBool and satBool
if thisKeepBool == True:
keepStars.append(True)
xStars1.append(xcen1)
yStars1.append(ycen1)
else:
# Build the problem analysis string
probString = ''
if fitBool:
probString = probString + 'fitting '
if satBool:
if len(probString) > 0:
probString = probString + 'and saturation'
else:
probString = probString + 'saturation '
probString = probString + 'problems'
print('skipping star at ({0:4d}, {1:4d}): for {2}'.format(
np.int(xs.round()), np.int(ys.round()), probString))
keepStars.append(False)
xStars1.append(-1)
yStars1.append(-1)
continue
# Store the Gaussian fitted PSF properties in the starFWHMs list
thisSigma = np.sqrt(np.abs(g_fit.x_stddev.value*g_fit.y_stddev.value))
thisFWHM = thisSigma/gaussian_fwhm_to_sigma
starFWHMs.append(thisFWHM)
# Store the mean PSF value in the FWHMlist
mean, med, std = sigma_clipped_stats(starFWHMs)
# mode = 3.0*med - 2.0*mean
# mode = 2.5*med - 1.5*mean
# PSF_FWHMs.append(mode)
PSF_FWHMs.append(mean)
# Store star positions in the xyStarsDict
xyStars = np.array([(xs, ys) for xs, ys in zip(xStars1, yStars1)])
xyStarsDict[key] = xyStars
# Store the star test booleans in the keepStarDict
keepStarsDict[key] = keepStars
# Grab maximum stellar PSF width and use apRad = 2.5*FWHM for photometry
maxFWHM = np.max(PSF_FWHMs)
apRad = 2.5*maxFWHM
anInRad = apRad + 2.0
anOutRad = apRad + 4.0
# Cull the starCatalog entry to only include non-saturated stars.
# First check which stars passed the test in ALL bands.
keepStars = np.ones(len(starCatalog), dtype=bool)
for key, val in keepStarsDict.items():
# Make sure the keep tests are all passed
keepStars = np.logical_and(keepStars, val)
# Make sure the stars are also far enough from the edge using the newly
# determined aperture radius to compute the edge criterion.
ny, nx = stokesIdict[key].arr.shape
edgeCut = np.ceil(anOutRad)
edgeBool = xyStarsDict[key][:,0] > edgeCut
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,0] < nx - 1 - edgeCut)
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,1] > edgeCut)
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,1] < ny - 1 - edgeCut)
# Combine the edge test with the previously determined photometry test
keepStars = np.logical_and(keepStars, edgeBool)
# Test if at least 4 stars passed the tests in all bands
if np.sum(keepStars) >= 4:
# Cull the star catalog to only include detected stars
keepInds = np.where(keepStars)
starCatalog = starCatalog[keepInds]
# Also cull the list of star positions to match between all bands
xyStarsDict1 = xyStarsDict.copy()
for key, val in xyStarsDict.items():
xyStarsDict1[key] = val[keepInds]
# Delete temporary variables
xyStarsDict = xyStarsDict1.copy()
del xyStarsDict1
else:
print('Fewer than 4 stars passed the quality tests in all bands.')
print('Color photometry for this target is impossible')
pdb.set_trace()
# Separate out O, J, E, F magnitudes for predicting V and R bands
# Surveys used for USNO-B1.0:
# ----------------------------------------------------------
# # Name Emuls B/R Wavelen. Zones Fld Dates Epoch
# (nm) (Dec) Obs.
# ----------------------------------------------------------
# 0 = POSS-I 103a-O (B) 350-500 -30..+90 936 1949-1965 (1st)
# 1 = POSS-I 103a-E (R) 620-670 -30..+90 936 1949-1965 (1st)
# 2 = POSS-II IIIa-J (B) 385-540 +00..+87 897 1985-2000 (2nd)
# 3 = POSS-II IIIa-F (R) 610-690 +00..+87 897 1985-1999 (2nd)
# 4 = SERC-J IIIa-J (B) 385-540 -90..-05 606 1978-1990 (2nd)
# 5 = ESO-R IIIa-F (R) 630-690 -90..-05 624 1974-1994 (1st)
# 6 = AAO-R IIIa-F (R) 590-690 -90..-20 606 1985-1998 (2nd)
# 7 = POSS-II IV-N (I) 730-900 +05..+87 800 1989-2000 (N/A)
# 8 = SERC-I IV-N (I) 715-900 -90..+00 892 1978-2002 (N/A)
# 9 = SERC-I* IV-N (I) 715-900 +05..+20 25 1981-2002 (N/A)
# --------------------------------------------------
# Note: Check that the confirmed sources all come from the expected
# surveys. If not, then stop and re-evaluate.
# First grab all the sources used in this data (minus masked points)
B1src = np.unique(starCatalog['B1S'].data.filled(255))[-2::-1]
R1src = np.unique(starCatalog['R1S'].data.filled(255))[-2::-1]
B2src = np.unique(starCatalog['B2S'].data.filled(255))[-2::-1]
R2src = np.unique(starCatalog['R2S'].data.filled(255))[-2::-1]
# Now test if all the specified sources are the expected ones
B1eqO = all([src in [0] for src in B1src])
R1eqE = all([src in [1] for src in R1src])
B2eqJ = all([src in [2, 4] for src in B2src])
R2eqF = all([src in [3, 5, 6] for src in R2src])
if (B1eqO and R1eqE and B2eqJ and R2eqF):
# If the sources are all the expected ones, then parse the emulsions
Omags = starCatalog['B1mag'].data.data
Emags = starCatalog['R1mag'].data.data
Jmags = starCatalog['B2mag'].data.data
Fmags = starCatalog['R2mag'].data.data
# Build a dictionary of USNO-B1.0 magnitudes
USNOBmagDict = dict(zip(['O', 'E', 'J', 'F' ],
[Omags, Emags, Jmags, Fmags]))
else:
# Insert a pause if one of the sources is wrong...
print('There are some unexpected sources for the magnitudes')
print('...stopping...')
pdb.set_trace()
# 1) Loop through all the images.
# 2) Do aperture photometry on the stars
# 3) Store photometry in photDict
# Initalize a dictionary to store the airmass corrected (AMC) stokes I imgs
stokesIdict_AMC = {}
# Initalize a dictionary to store the photometry tables
photDict = {}
for key, img in stokesIdict.items():
# Now that all the pre-requisites for photometry have been met, it is time
# to apply a waveband based airmass correction and normalize by the exposure
# time. The result, stored in the img1 variable, should be used for all
# subsequent photometry
atmExtMag = kappa[key]*img.header['AIRMASS']
expTime = img.header['EXPTIME']
img1 = img*((10.0**(0.4*atmExtMag))/expTime)
# Store corrected image in the stokesIdict_AMC dictionary
stokesIdict_AMC[key] = img1
# Grab the star positions
xyStars = xyStarsDict[key]
xStars, yStars = xyStars[:,0], xyStars[:,1]
# Establish circular apertures for photometry
apertures = CircularAperture(xyStars, r = apRad)
annulus_apertures = CircularAnnulus(xyStars,
r_in = anInRad, r_out = anOutRad)
# Perform the basic photometry
rawflux_table = aperture_photometry(img1.arr, apertures,
error=img1.sigma)
bkgflux_table = aperture_photometry(img1.arr, annulus_apertures,
error=img1.sigma)
phot_table = hstack([rawflux_table, bkgflux_table],
table_names=['raw', 'bkg'])
# Compute background contribution and subtract from raw photometry
bkg_mean = phot_table['aperture_sum_bkg'] / annulus_apertures.area()
bkg_sig = phot_table['aperture_sum_err_bkg'] / annulus_apertures.area()
bkg_sum = bkg_mean * apertures.area()
bkg_sig = bkg_sig * apertures.area()
# Compute the variance in the background pixels for each star
ny, nx = img1.arr.shape
yy, xx = np.mgrid[0:ny, 0:nx]
bkg_var = []
# Loop through each star and add the local variance to the uncertainty
for xy in xyStars:
xs, ys = xy
distFromStar = np.sqrt((xx - xs)**2 + (yy - ys)**2)
skyPixInds = np.where(np.logical_and(
(distFromStar > anInRad), (distFromStar < anOutRad)))
bkg_var.append(np.var(img1.arr[skyPixInds]))
# Convert the background variance into an array
bkg_var = np.array(bkg_var)
# Compute the final photometry and its uncertainty
final_sum = phot_table['aperture_sum_raw'] - bkg_sum
final_sig = np.sqrt(phot_table['aperture_sum_err_raw']**2
+ bkg_sig**2
+ bkg_var)
phot_table['residual_aperture_sum'] = final_sum
phot_table['residual_aperture_sum_err'] = final_sig
# Compute the signal-to-noise ratio and find the stars with SNR < 3.0
SNR = final_sum/final_sig
# Now estimate the photometry from USNO-B1.0 and store it for later use
catMags, sigCatMags = USNOBtransforms[key](USNOBmagDict)
phot_table[key+'_catMag'] = catMags
phot_table[key+'_sigCatMag'] = sigCatMags
# Loop through all the stars and detect any duplicate entries. Mark each
# entry with a semi-unique 'Star ID'
# Extract the star positions from the photometry table
# (this is redundant but a nice confirmation that these will be right)
xStars = phot_table['xcenter_raw']
yStars = phot_table['ycenter_raw']
# Initalize an empty list to store the starIDs
starIDs = -1*np.ones(len(phot_table), dtype=int)
for ind, row in enumerate(phot_table):
# Skip over any rows that have been previously treated
if starIDs[ind] > 0: continue
# Compute the distance between the current star and all other stars
xs, ys = row['xcenter_raw'], row['ycenter_raw']
dist = np.sqrt((xs - xStars)**2 + (ys - yStars)**2).value
if np.sum(dist < 2.0) > 0:
# Mark all stars within 2.0 pixels of the current star with an
# identical ID.
IDinds = np.where(dist < 2.0)
starIDs[IDinds] = ind
# Add the StarID column to the phot_table
phot_table.add_column(Column(name='star_id', data=starIDs), index=0)
# plt.ion()
# plt.imshow(stokesIdict[key].arr, vmin=0,vmax=800,cmap='gray_r')
# plt.scatter(phot_table['xcenter_raw'], phot_table['ycenter_raw'],
# marker='x', color='red')
# pdb.set_trace()
# Sort the phot_table by starID
sortInds = phot_table['star_id'].data.argsort()
phot_table = phot_table[sortInds]
# Store this photometry table in the dictionary for later use
photDict[key] = phot_table
# ###########################################################################
# # PRINT OUT THE PHOTOMETRY TO CHECK FOR CONSISTENCY
# ###########################################################################
# xFmtStr = '{x[0]:>6}.{x[1]:<3}'
# yFmtStr = '{y[0]:>6}.{y[1]:<3}'
# starFmtStr = '{star[0]:>9}.{star[1]:<3}'
# bkgFmtStr = '{bkg[0]:>9}.{bkg[1]:<3}'
# snrFmtStr = '{snr[0]:>9}.{snr[1]:<3}'
# print('final photometry is...')
# print(' x y Star Flux Bkg Flux SNR')
# print('===========================================================')
# printStr = xFmtStr + yFmtStr + starFmtStr + bkgFmtStr + snrFmtStr
# for i in range(len(SNR)):
# xVal = str(xStars[i]).split('.')
# xVal[1] = (xVal[1])[0:3]
# yVal = str(yStars[i]).split('.')
# yVal[1] = (yVal[1])[0:3]
# starVal = str(final_sum[i]).split('.')
# starVal[1] = (starVal[1])[0:3]
# bkgVal = str(bkg_sum[i]).split('.')
# bkgVal[1] = (bkgVal[1])[0:3]
# snrVal = str(SNR[i]).split('.')
# snrVal[1] = (snrVal[1])[0:3]
# print(printStr.format(x = xVal, y = yVal, star = starVal,
# bkg = bkgVal, snr = snrVal))
# I need to simultaneously solve a set of linear regressions for photometric
# zero-point magnitudes and color correction terms
#
# E.g.
# (V_corrected - V_apparent) = a_0 + a_1 * (V_apparent - R_apparent)
# and
# (R_corrected - R_apparent) = a_2 + a_3 * (V_apparent - R_apparent)
# and
# (V_corrected - R_corrected) = a_4 + a_5 * (V_apparent - R_apparent)
#
# Grab all the successfully measured bandpasses
bandKeys1 = [key for key in stokesIdict.keys()]
# Ensure that they're in wavelength order
# Start by constructing an array with
# Column 0: list of wavebands
# Column 1: list of wavelengths for that bands
bandLamArr = np.array([[key, val] for key, val in wavelength.items()])
# Figure our how to sort this array by increasing wavelength, and create a
# list of possible wavebands in that sorted order
sortArr = bandLamArr[:,1].argsort()
bandList = (bandLamArr[:,0].flatten())[sortArr]
# Loop through the wavebands and construct a wavelength ordered list of
# observed waveband keys in the stokesIdict dictionary.
bandKeys = []
for band in bandList:
if band in bandKeys1:
bandKeys.append(band)
# Loop through the bands and construct keys for a "color dictionary"
colorKeys = []
for ind, band1 in enumerate(bandKeys[0:len(bandKeys)-1]):
# Only worry about colors from adjacent wavebands, one index over
band2 = bandKeys[ind+1]
colorKeys.append('-'.join([band1, band2]))
# Prepare for the linear regressions to be done on each band and color
# Define the model to be used in the fitting
def lineFunc(B, x):
return B[1] + B[0]*x
# Set up ODR with the model and data.
lineModel = odr.Model(lineFunc)
# loop through each linear regression
for colorKey in colorKeys:
print('Preparing the model outliers with MCMC')
# Setup the walker count, burn-in steps, and production steps
n_walkers = 100
n_burn_in_steps = 1000
n_steps = 2000
# Treat the photometric regressions for this set of bands...
# Establish the boundaries of acceptable parameters for the prior
labels = [
r"$\theta$",
r"$b_p$",
r"$P_b$",
r"$M_x$",
r"$\ln V_x$",
r"$M_y$",
r"$\ln V_y$"]
# Create a separate set of labels and indices for those parameters which
# will be plotted in the posterior distribution "corner plot"
plotLabels = [
r"$\theta$",
r"$b_p$",
r"$P_b$",
r"$\ln V_y$"]
plotInds = np.array([0,1,2,6])
bounds1 = [(-1.0, 1.0), # Theta (angle of the line slope)
(18.0, 28.0), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
bounds2 = [(+0.0, +1.5), # Theta (angle of the line slope)
(18.0, 28.0), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
boundsC = [(-0.5, +1.0), # Theta (angle of the line slope)
(-0.4, +0.75), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
# Parse the bands used in this color
band1, band2 = colorKey.split('-')
# Grab the photometry table for these two bands
phot_table1 = photDict[band1]
phot_table2 = photDict[band2]
# Double check that the star IDs are all matched up
if len(phot_table1) != len(phot_table2):
print('Photometry tables do not match!')
pdb.set_trace()
totalMatch = np.sum(phot_table1['star_id'].data == phot_table2['star_id'].data)
if totalMatch < len(phot_table1):
print('Photometry tables do not match!')
pdb.set_trace()
# Since we have confirmed that all the starIDs match up, we will store
# the values from the first phot_table
starIDs = phot_table['star_id'].data
# Grab the fluxes for the calibration stars for these two bands
flux1 = phot_table1['residual_aperture_sum'].data
flux2 = phot_table2['residual_aperture_sum'].data
sigFlux1 = phot_table1['residual_aperture_sum_err'].data
sigFlux2 = phot_table2['residual_aperture_sum_err'].data
# Compute the instrumental magnitudes for these two bands
instMags1 = -2.5*np.log10(flux1)
instMags2 = -2.5*np.log10(flux2)
sigInstMags1 = 2.5*np.abs(sigFlux1/(flux1*np.log(10)))
sigInstMags2 = 2.5*np.abs(sigFlux2/(flux2*np.log(10)))
# Now grab the catalog magnitudes for the calibration stars
catMags1 = phot_table1[band1+'_catMag'].data
catMags2 = phot_table2[band2+'_catMag'].data
sigCatMags1 = phot_table1[band1+'_sigCatMag'].data
sigCatMags2 = phot_table2[band2+'_sigCatMag'].data
# Begin by culling any data from extremely unexpected regions
# Compute the catalog colors for these stars
catColors, sig_catColors = USNOBtransforms[colorKey](USNOBmagDict)
# Compute the band1 - band2 color
xTest = instMags1 - instMags2
yTest = catColors
# Set some boundaries for acceptable color-color data
# slope1, intercept1 = np.tan(0.561), 0.0055/np.cos(0.561)
# slope2, intercept2 = np.tan(0.658), 0.233/np.cos(0.658)
slope1, intercept1 = np.tan(0.45), 0.00/np.cos(0.45)
slope2, intercept2 = np.tan(0.70), 0.25/np.cos(0.70)
keepPts = (yTest > (slope1*xTest + intercept1 - 0.25))
keepPts = np.logical_and(keepPts,
(yTest < slope2*xTest + intercept2 + 0.25))
keepInds = np.where(keepPts)
# Now perform the actual data cuts
starIDs = starIDs[keepInds]
instMags1 = instMags1[keepInds]
instMags2 = instMags2[keepInds]
sigInstMags1 = sigInstMags1[keepInds]
sigInstMags2 = sigInstMags2[keepInds]
catMags1 = catMags1[keepInds]
catMags2 = catMags2[keepInds]
sigCatMags1 = sigCatMags1[keepInds]
sigCatMags2 = sigCatMags2[keepInds]
catColors = catColors[keepInds]
sig_catColors = sig_catColors[keepInds]
########################################################################
############################# COLOR-COLOR ##############################
########################################################################
print('Running initial Color-Color regression')
# Compute the colors for these stars
xC = instMags1 - instMags2
yC = catColors
sxC = np.sqrt(sigInstMags1**2 + sigInstMags2**2)
syC = sig_catColors
### THIS CODE SIMPLY DISPLAYS THE DATA TO THE USER TO SEE IF
### THE SELECTED "GOOD-DATA" REGION IS ACCEPTABLE.
###
# slope1, intercept1 = np.tan(0.45), 0.00/np.cos(0.45)
# slope2, intercept2 = np.tan(0.70), 0.25/np.cos(0.70)
# plt.errorbar(xC, yC, xerr=sxC, yerr=syC, fmt='None', ecolor='k')
# plt.plot(xC, slope1*xC + intercept1 - 0.25, color='k')
# plt.plot(xC, slope2*xC + intercept2 + 0.25, color='k')
# pdb.set_trace()
# plt.close('all')
# continue
# Perform the MCMC sampling of the posterior
data = (xC, yC, sxC, syC)
samplerC = MCMCfunc(data, boundsC,
n_walkers=n_walkers,
n_burn_in_steps=n_burn_in_steps,
n_steps=n_steps)
# Plot the posteriors to see if a reasonable result was obtained.
# plotSamples = samplerC.flatchain[:,plotInds]
# plotBounds = np.array(boundsC)[plotInds]
# corner.corner(plotSamples, bins=100,
# range=plotBounds,
# labels=plotLabels)
#
# # Save the figure to disk
# fname = os.path.join(stokesDir, thisTarget + '_MCMC.png')
# plt.savefig(fname, dpi=300)
# plt.close('all')
# Compute the posterior probability that each data-point is "good"
norm = 0.0
post_probC = np.zeros(len(data[0]))
for i in range(samplerC.chain.shape[1]):
for j in range(samplerC.chain.shape[0]):
ll_fg, ll_bg = samplerC.blobs[i][j]
post_probC += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_probC /= norm
# Loop through all entries and eliminate the less probable of all
# *PAIRED* entries.
keepBool = np.zeros(len(post_probC), dtype=bool)
for ind, idNum in enumerate(starIDs):
# Skip over already treated indices
if keepBool[ind] == True: continue
# Test which starIDs equal *THIS* starID
testBool = (starIDs == idNum)
if | np.sum(testBool) | numpy.sum |
# Utility functions for the course Robot Modelling
# <NAME> (<EMAIL>), sept. 2016
#
# Additional functions added for more functionality
# <NAME> (<EMAIL>), sept. 2018
# <NAME> (<EMAIL>), sept. 2018
###############################################################################
import numpy as np
from numpy import cos, sin
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
"""
Check if input is a correct matrix
:param R:
:return:
"""
Rt = np.transpose(R.copy())
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def inverse_kinematics_wrist(R):
"""
Calculates the inverse kinematics of the wrist of the robot
:param R:
:return:
"""
minplus = 1
t5 = np.arctan2(minplus * np.sqrt(1 - (R[2, 2]**2)), R[2, 2])
t4 = np.arctan2(minplus * R[1, 2], minplus * R[0, 2])
t6 = np.arctan2(minplus * R[2, 1], minplus * -R[2, 0])
R_check = np.array([[cos(t4) * cos(t5) * cos(t6) - sin(t4) * sin(t6) - R[0, 0], -cos(t4) * cos(t5) * sin(t6) - sin(t4) * cos(t6) - R[0, 1], cos(t4) * sin(t5) - R[0, 2]],
[sin(t4) * cos(t5) * cos(t6) + cos(t4) * sin(t6) - R[1, 0], -sin(t4) * cos(t5) * sin(t6) + cos(t4) * cos(t6) - R[1, 1], sin(t4) * sin(t5) - R[1, 2]],
[-sin(t5) * cos(t6) - R[2, 0], sin(t5) * sin(t6) - R[2, 1], cos(t5) - R[2, 2]]])
return np.array([t4, t5, t6]), R_check
def make_rotation_matrix(axis, angle):
"""
make a rotation matrix based on an angle and specified axis
:param axis: string that specifies over which axis will be rotated
:param angle: rotation angle in radians
:return: rotation matrix
"""
if axis == "x":
return np.array([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == "y":
return np.array([[cos(angle), 0, -sin(angle)],
[0, 1, 0],
[sin(angle), 0, cos(angle)]])
elif axis == "z":
return np.array([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
def make_DH_matrix(DH_parameters):
"""
make a homogenious matrix based on the Denavit Hartenberg Convention
:param DH_parameters: array of 4 with all DH parameters
:return: DH matrix
"""
from numpy import cos, sin
length = DH_parameters[0]
twist = DH_parameters[1]
offset = DH_parameters[2]
angle = DH_parameters[3]
return np.array([[cos(angle), -sin(angle) * cos(twist), sin(angle) * sin(twist), length * cos(angle)],
[sin(angle), cos(angle) * cos(twist), -cos(angle) * sin(twist), length * sin(angle)],
[0, sin(twist), cos(twist), offset],
[0, 0, 0, 1]])
def interpolate(values, precision):
"""Create positionvalues within the given trajectory
precision = amount of subvalues"""
nr_values = len(values)
solution = []
for nr in range(0, nr_values):
if nr < nr_values - 1:
delta_val = np.subtract(values[nr + 1], values[nr])
x_val = np.true_divide(delta_val, precision)
for x in range(0, precision):
solution.append(np.add(values[nr], np.multiply(x_val, x)))
else:
break
solution = np.array(solution)
return solution
def make_homogenious_matrix(rotation, translation):
return np.vstack((np.hstack((rotation, translation)), np.array([0, 0, 0, 1])))
# function for the inverse kinematics of a 3DOF robot
def inverse_algorithm_3DOF(arms, points, elbow_down=False):
"""Inverse kinematics of a scara robot.
Inputs:
arms: 3-element array/list with arm lengths
point2: 3-element array with (x,y,z) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 3-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
z = points[2]
d1 = arms[0]
d2 = arms[1]
d3 = arms[2]
s = z - d1
r = np.sqrt(x**2 + y**2)
c = np.sqrt(r**2 + s**2)
beta = np.arctan2(s, r)
alpha = np.arccos(np.minimum(1, ((-d3**2 + d2**2 + c**2) / (2 * d2 * c))))
theta1 = np.arctan2(y, x)
upper_cos = (-c**2 + d3**2 + d2**2)
lower_cos = (2 * d3 * d2)
if abs(upper_cos) > abs(lower_cos):
return [0, 0, 0], True
if elbow_down:
theta2 = beta - alpha
theta3 = np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos)))
else:
theta2 = beta + alpha
theta3 = -(np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos))))
angles = [theta1, theta2, theta3, 0]
return angles, False
def kin_planar_forward(arms, angles):
"""Forward kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
angles: 2-element array/list with angles in radians(!)
Output:
point2: 2-element numpy array with (x,y) coordinate of end point
"""
x1 = arms[0] * np.cos(angles[0])
y1 = arms[0] * np.sin(angles[0])
x2 = x1 + arms[1] * np.cos(angles[0] + angles[1])
y2 = y1 + arms[1] * np.sin(angles[0] + angles[1])
points = np.array([x2, y2])
return points
def kin_planar_inverse(arms, points, elbow_down=True):
"""Inverse kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
point2: 2-element array with (x,y) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 2-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
a1 = arms[0]
a2 = arms[1]
D = (x ** 2 + y ** 2 - a1 ** 2 - a2 ** 2) / (2 * a1 * a2)
f = np.sqrt(1 - (D ** 2))
if elbow_down:
theta2 = np.arctan2(f, D)
else:
theta2 = np.arctan2(-f, D)
theta1 = np.arctan2(y, x) - np.arctan2((a2 * np.sin(theta2)), (a1 + a2 * np.cos(theta2)))
angles = np.array([theta1, theta2])
return angles
def sphere():
import pyqtgraph.opengl as gl
sphere_data= gl.MeshData.sphere(rows=8,
cols=16)
obj = gl.GLMeshItem(meshdata=sphere_data,
smooth=False,
drawFaces=True,
faceColor=(0.2, 0.3, 0.4, 1),
drawEdges=False,
edgeColor=(0.2, 0.3, 0.4, 1))
return obj
# cylinder is a convenience function to create a cylinder shape in
# pyqtgraph/OpenGL, it gives you a number of vertices distributed over the
# surface of the cylinder and triangular shaped faces that cover the whole
# surface of the cylinder
# cylinders are being used to visualize joints
def cylinder(radius, height, N):
"""Calculates vertices and faces for a cylinder for visualisation in
pyqtgraph/OpenGL.
Inputs:
radius: radius of the cylinder
height: height of the cylinder
N: number of segments to approximate the circular shape of the cylinder
Outputs:
vertices: array with on each row the (x,y,z) coordinates of the vertices
faces: array with triangular faces of the cylinder
Note:
The cylinder is a circle in the x,y plane with center at (0,0) that is
extruded along the z-axis.
"""
import scipy.spatial
t = np.linspace(0, 2 * np.pi, N, endpoint=False).reshape(N, 1)
vertices = np.zeros((2 * N, 3))
vertices[0:N, :] = np.hstack((radius * np.cos(t), radius * np.sin(t), np.zeros((N, 1))))
vertices[N:2 * N, :] = vertices[0:N, :] + np.hstack((np.zeros((N, 2)), height * np.ones((N, 1))))
faces = np.zeros((N - 2 + 2 * N + N - 2, 3), dtype=np.uint)
# bottom, makes use of Delaunay triangulation contained in Scipy's
# submodule spatial (which on its turn makes use of the Qhull library)
faces[0:N - 2, :] = scipy.spatial.Delaunay(vertices[0:N, 0:2], furthest_site=True, qhull_options='QJ').simplices[:,
-1::-1]
# sides
for i in range(N - 1):
faces[N - 2 + 2 * i, :] = np.array([i, i + 1, N + i + 1], dtype=np.uint)
faces[N - 2 + 2 * i + 1, :] = np.array([i, N + i + 1, N + i], dtype=np.uint)
# final one between the last and the first:
faces[N - 2 + 2 * (N - 1), :] = np.array([N - 1, 0, N], dtype=np.uint)
faces[N - 2 + 2 * (N - 1) + 1, :] = np.array([N - 1, N, 2 * N - 1], dtype=np.uint)
# top
faces[N - 2 + 2 * N:N - 2 + 2 * N + N - 2, :] = N + faces[0:N - 2, -1::-1]
return vertices, faces
# simular to the cylinder, but not for creating a box-shaped object
# boxes are used to visualize links
def box(size=(1, 1, 1)):
"""Calculates vertices and faces for a box for visualisation in
pyqtgraph/OpenGL.
Inputs:
size: 3 element array/list with the width,depth,height, i.e.
the dimensions along the x, y and z-axis.
Outputs:
vertices: array with on each row the (x,y,z) coordinates of the vertices
faces: array with triangular faces of the box
Note:
The box is between (0,0,0) and (size[0],size[1],size[2]), note that
negative sizes are not prevented but result in strange artifacts because
it changes the orientation of the faces of the box (inside becomes
outside).
"""
vertices = np.zeros((8, 3))
faces = np.zeros((12, 3), dtype=np.uint)
xdim = size[0]
ydim = size[1]
zdim = size[2]
vertices[0, :] = np.array([0, ydim, 0])
vertices[1, :] = np.array([xdim, ydim, 0])
vertices[2, :] = np.array([xdim, 0, 0])
vertices[3, :] = np.array([0, 0, 0])
vertices[4, :] = np.array([0, ydim, zdim])
vertices[5, :] = np.array([xdim, ydim, zdim])
vertices[6, :] = np.array([xdim, 0, zdim])
vertices[7, :] = np.array([0, 0, zdim])
faces = np.array([
# bottom (clockwise, while looking from top)
[2, 1, 0],
[3, 2, 0],
# sides (counter-clock-wise)
[0, 1, 5],
[0, 5, 4],
[1, 2, 6],
[1, 6, 5],
[2, 3, 7],
[2, 7, 6],
[3, 0, 4],
[3, 4, 7],
# top (counter-clockwise)
[4, 5, 6],
[4, 6, 7]
], dtype=np.uint)
return vertices, faces
def rotate_xyz(angles):
"""
Calculates the rotations matrix for xyz angles
(x,y,z)
:param angles:
:return:
"""
x, y, z = angles
rotate_x = np.array([[1, 0, 0],
[0, np.cos(x), | np.sin(x) | numpy.sin |
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
import numpy as np
from rdkit.ML.Descriptors import MoleculeDescriptors
from sklearn import preprocessing
import random
from hyperopt import tpe, fmin, Trials
from sklearn.metrics import average_precision_score, roc_auc_score
from sklearn.model_selection import StratifiedKFold
from datetime import datetime
from imxgboost.imbalance_xgb import imbalance_xgboost as imb_xgb
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
#%%
"DESCRIPTOR FUNCTIONS"
def getmorgan(mols, radius, bits):
"""
Calculates ECFP fingerprints of given radius and number of bits for a list N of rdkit molecules.
Returns a np.array (N, bits).
"""
Morgan = [AllChem.GetMorganFingerprintAsBitVect(x,radius,nBits=bits) for x in mols]
final = np.array(Morgan, dtype = np.float32)
return final
def make2D(mols):
"""
Calculates RDKIT descriptors for a list N of rdkit molecules.
Returns a sanitized np.array.
"""
names = [x[0] for x in Descriptors._descList]
calc = MoleculeDescriptors.MolecularDescriptorCalculator(names)
descs = [0] * len(mols)
counter = 0
for i in range(len(descs)):
counter = counter + 1
if counter == 1000:
print(i)
counter = 0
temp = calc.CalcDescriptors(mols[i])
if temp is not None:
temp = temp[1:]
descs[i] = temp
descs = np.array(descs)
descs = np.nan_to_num(descs, posinf=10e10, neginf=-10e10)
return descs
#%%
"OPTIMIZATION HELPER FUNCTIONS"
def create_set(full_dataset, y, under):
"""
Input
full_dataset: np.array (N, D), molecular descriptors D for each compound N
y: np.array (N, 1), binary label for each compound N
under: dict value or int, defines how many inactive compounds to use
in each subset
Output
inbalanced: np.array (Nx, D), molecular descriptors D for each compound Nx
after resampling
y: np.array (Nx, 1), binary label for each compound Nx
after resampling
"""
#find idx of all 0s and 1s
y_pos = []
y_neg = []
under = int(under) #make sure parameter is in the right format
for i in range(len(y)):
if y[i] == 1:
y_pos.append(i)
else:
y_neg.append(i)
#divide dataset in respective classes
actives = [full_dataset[x] for x in y_pos]
inactives = [full_dataset[x] for x in y_neg]
#sample inactive class and reconstruct
subsample = random.sample(inactives, under)
inbalanced = np.array(actives+subsample)
y_pos = [1] * len(actives)
y_neg = [0] * len(subsample)
y = np.array(y_pos + y_neg)
return inbalanced, y
def crossval(dataset, y, params, norm):
"""
Input
dataset: np.array (Nt, D), molecular descriptors D for each compound N
y: np.array (N, 1), binary label for each compound N
params: dictionary (5 params), hyperparameters of the ensemble
Output
avg_pr: np.float, average area-under-curve for the Precision-Recall curve
in 5fold stratified cross-validation
avg_roc: np.float, average area-under-curve for the ROC curve
in 5fold stratified cross-validation
"""
#initialize placeholders, start timer
global_time = datetime.now()
box_PR = []
box_ROC = []
n_folds = 5
params['n_e'] = int(params['n_e']) #make sure the param is in the right format
kfold = StratifiedKFold(n_folds, shuffle=True, random_state=1) #set state for reproducibility
for train_index, test_index in kfold.split(dataset, y):
cv_time = datetime.now() #monitor time per fold
#get folds
X_t = np.array([dataset[i] for i in train_index])
X_v = np.array([dataset[i] for i in test_index])
y_t = np.array([y[i] for i in train_index])
y_v = np.array([y[i] for i in test_index])
#initialize box for ensemble predictions
preds_box = []
if norm == "yes":
scaler = preprocessing.StandardScaler().fit(X_t)
X_t = scaler.transform(X_t)
X_v = scaler.transform(X_v)
#iterate for each model in the ensemble
for i in range(params['n_e']):
#get dataset
Res, y_r = create_set(X_t, y_t,
params['under'])
#create model with right parameters
model = imb_xgb(
special_objective='focal',
focal_gamma = params['gamma'],
max_depth = params['depth'],
eta = params['eta']
)
#fit, predict and store in box
model.fit(Res, y_r)
preds = model.predict_sigmoid(X_v)
preds_box.append(preds)
#calculate mean of all predictions in the box
preds_box = np.transpose( | np.array(preds_box) | numpy.array |
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def sigmoid(self, x):
return 1 / ( 1 + np.exp(-x))
def relu(self, x):
return np.maximum(0, x)
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# If X has more than 2 dimensions then reshape so that X has 2 dimensions
if len(X.shape) > 2:
X = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
reg = self.reg
N = X.shape[0]
a1 = | np.dot(X, W1) | numpy.dot |
# -*- coding: utf-8 -*-
"""
author: <NAME>, University of Bristol, <EMAIL>
"""
import numpy as np
from derivative import derivative
def nominator(F_x, F_y, F_z, F_xx, F_xy, F_yy, F_yz, F_zz, F_xz):
m = np.array([[F_xx, F_xy, F_xz, F_x],
[F_xy, F_yy, F_yz, F_y],
[F_xz, F_yz, F_zz, F_z],
[F_x, F_y, F_z, 0]])
d = | np.linalg.det(m) | numpy.linalg.det |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tqdm
import torch
import pickle
import resource
import numpy as np
import matplotlib.pyplot as plt
from args import parse_args
from modelSummary import model_dict
from pytorchtools import load_from_file
from torch.utils.data import DataLoader
from helperfunctions import mypause, stackall_Dict
from loss import get_seg2ptLoss
from utils import get_nparams, get_predictions
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1]))
#%%
if __name__ == '__main__':
args = parse_args()
device=torch.device("cuda")
torch.cuda.manual_seed(12)
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
args.useMultiGPU = False
torch.backends.cudnn.deterministic=False
if args.model not in model_dict:
print("Model not found.")
print("valid models are: {}".format(list(model_dict.keys())))
exit(1)
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2op = os.path.join(os.getcwd(), 'op', str(args.curObj))
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
os.makedirs(path2op, exist_ok=True)
model = model_dict[args.model]
netDict = load_from_file([args.loadfile,
os.path.join(path2checkpoint, 'checkpoint.pt')])
startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0
if 'state_dict' in netDict.keys():
model.load_state_dict(netDict['state_dict'])
print('Parameters: {}'.format(get_nparams(model)))
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec)
f = open(os.path.join('curObjects',
'baseline',
'cond_'+str(args.curObj)+'.pkl'), 'rb')
_, _, testObj = pickle.load(f)
testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All')
testObj.augFlag = False
testloader = DataLoader(testObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=False)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
accLoss = 0.0
imCounter = 0
ious = []
dists_pupil_latent = []
dists_pupil_seg = []
dists_iris_latent = []
dists_iris_seg = []
model.eval()
opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [],
'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]},
'pred':{'pup_latent_c':[],
'pup_seg_c':[],
'iri_latent_c':[],
'iri_seg_c':[],
'mask':[]},
'gt':{'pup_c':[], 'mask':[]}}
with torch.no_grad():
for bt, batchdata in enumerate(tqdm.tqdm(testloader)):
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
out_tup = model(img.to(device).to(args.prec),
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long),
0.5)
output, elOut, latent, loss = out_tup
latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy()
latent_iris_center = elOut[:, 5:7].detach().cpu().numpy()
_, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4)
_, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4)
loss = loss if args.useMultiGPU else loss.mean()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
iou, iou_bySample = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1:]
latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
latent_pupil_center,
cond[:,0].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
seg_pupil_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
latent_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
seg_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
dists_pupil_latent.append(latent_pupil_dist)
dists_iris_latent.append(latent_iris_dist)
dists_pupil_seg.append(seg_pupil_dist)
dists_iris_seg.append(seg_iris_dist)
ious.append(iou)
pup_latent_c = unnormPts(latent_pupil_center,
img.shape[2:])
pup_seg_c = unnormPts(seg_pupil_center,
img.shape[2:])
iri_latent_c = unnormPts(latent_iris_center,
img.shape[2:])
iri_seg_c = unnormPts(seg_iris_center,
img.shape[2:])
dispI = generateImageGrid(img.numpy().squeeze(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_seg_c,
cond.numpy(),
override=True,
heatmaps=False)
for i in range(0, img.shape[0]):
archNum = testObj.imList[imCounter, 1]
opDict['id'].append(testObj.imList[imCounter, 0])
opDict['code'].append(latent[i,...].detach().cpu().numpy())
opDict['archNum'].append(archNum)
opDict['archName'].append(testObj.arch[archNum])
opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :])
opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :])
opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :])
opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :])
if args.test_save_op_masks:
opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8))
opDict['scores']['iou'].append(iou_bySample[i, ...])
opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...])
opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...])
opDict['gt']['pup_c'].append(pupil_center[i,...].numpy())
if args.test_save_op_masks:
opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8))
imCounter+=1
if args.disp:
if bt == 0:
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
opDict = stackall_Dict(opDict)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious))
print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent),
np.nanstd(dists_pupil_latent)))
print('Segmentation PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_seg),
np.nanstd(dists_pupil_seg)))
print('Latent space IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_latent),
| np.nanstd(dists_iris_latent) | numpy.nanstd |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
import os
from fluids import *
import numpy as np
from math import pi, log10, log
from random import uniform
from numpy.testing import assert_allclose
from scipy.constants import *
from scipy.optimize import *
from scipy.interpolate import *
from fluids import fluids_data_dir
from fluids.core import Engauge_2d_parser
from fluids.optional.pychebfun import *
import pytest
def log_uniform(low, high):
return 10**uniform(log10(low), log10(high))
def test_fittings():
K = entrance_beveled_orifice(Di=0.1, do=.07, l=0.003, angle=45)
assert_allclose(K, 1.2987552913818574)
### Exits
assert_allclose(exit_normal(), 1.0)
K_helix = helix(Di=0.01, rs=0.1, pitch=.03, N=10, fd=.0185)
assert_allclose(K_helix, 14.525134924495514)
K_spiral = spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
assert_allclose(K_spiral, 7.950918552775473)
### Contractions
K_sharp = contraction_sharp(Di1=1, Di2=0.4)
assert_allclose(K_sharp, 0.5301269161591805)
K_beveled = contraction_beveled(Di1=0.5, Di2=0.1, l=.7*.1, angle=120)
assert_allclose(K_beveled, 0.40946469413070485)
### Expansions (diffusers)
K_sharp = diffuser_sharp(Di1=.5, Di2=1)
assert_allclose(K_sharp, 0.5625)
K = diffuser_curved(Di1=.25**0.5, Di2=1., l=2.)
assert_allclose(K, 0.2299781250000002)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07)
assert_allclose(K, 0.06873244301714816)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07, fd2=.08)
assert_allclose(K, 0.06952256647393829)
# Misc
K1 = Darby3K(NPS=2., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K2 = Darby3K(NPS=12., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K3 = Darby3K(NPS=12., Re=10000., K1=950, Ki=0.25, Kd=4)
Ks = [1.1572523963562353, 0.819510280626355, 0.819510280626355]
assert_allclose([K1, K2, K3], Ks)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000, name='fail')
tot = sum([Darby3K(NPS=2., Re=1000, name=i) for i in Darby.keys()])
assert_allclose(tot, 67.96442287975898)
K1 = Hooper2K(Di=2., Re=10000., name='Valve, Globe, Standard')
K2 = Hooper2K(Di=2., Re=10000., K1=900, Kinfty=4)
assert_allclose([K1, K2], [6.15, 6.09])
tot = sum([Hooper2K(Di=2., Re=10000., name=i) for i in Hooper.keys()])
assert_allclose(tot, 46.18)
with pytest.raises(Exception):
Hooper2K(Di=2, Re=10000)
with pytest.raises(Exception):
Hooper2K(Di=2., Re=10000, name='fail')
K2 = change_K_basis(K1=32.68875692997804, D1=.01, D2=.02)
assert_allclose(K2, 523.0201108796487)
### Entrances
def test_entrance_distance_45_Miller():
from fluids.fittings import entrance_distance_45_Miller
K = entrance_distance_45_Miller(Di=0.1, Di0=0.14)
assert_allclose(K, 0.24407641818143339)
def test_entrance_distance():
K1 = entrance_distance(0.1, t=0.0005)
assert_allclose(K1, 1.0154100000000004)
assert_allclose(entrance_distance(Di=0.1, t=0.05), 0.57)
K = entrance_distance(Di=0.1, t=0.0005, method='Miller')
assert_allclose(K, 1.0280427936730414)
K = entrance_distance(Di=0.1, t=0.0005, method='Idelchik')
assert_allclose(K, 0.9249999999999999)
K = entrance_distance(Di=0.1, t=0.0005, l=.02, method='Idelchik')
assert_allclose(K, 0.8475000000000001)
K = entrance_distance(Di=0.1, t=0.0005, method='Harris')
assert_allclose(K, 0.8705806231290558, 3e-3)
K = entrance_distance(Di=0.1, method='Crane')
assert_allclose(K, 0.78)
with pytest.raises(Exception):
entrance_distance(Di=0.1, t=0.01, method='BADMETHOD')
def test_entrance_rounded():
K = entrance_rounded(Di=0.1, rc=0.0235)
assert_allclose(K, 0.09839534618360923)
assert_allclose(entrance_rounded(Di=0.1, rc=0.2), 0.03)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Miller')
assert_allclose(K, 0.057734448458542094)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Swamee')
assert_allclose(K, 0.06818838227156554)
K = entrance_rounded(Di=0.1, rc=0.01, method='Crane')
assert_allclose(K, .09)
K = entrance_rounded(Di=0.1, rc=0.01, method='Harris')
assert_allclose(K, 0.04864878230217168)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Harris')
assert_allclose(K, 0.0)
K = entrance_rounded(Di=0.1, rc=0.01, method='Idelchik')
assert_allclose(K, 0.11328005177738182)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Idelchik')
assert_allclose(K, 0.03)
with pytest.raises(Exception):
entrance_rounded(Di=0.1, rc=0.01, method='BADMETHOD')
def test_entrance_beveled():
K = entrance_beveled(Di=0.1, l=0.003, angle=45)
assert_allclose(K, 0.45086864221916984)
K = entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
assert_allclose(K, 0.3995000000000001)
def test_entrance_sharp():
assert_allclose(entrance_sharp(), 0.57)
with pytest.raises(Exception):
entrance_sharp(method='BADMETHOD')
for method in ['Swamee', 'Blevins', 'Idelchik', 'Crane']:
assert_allclose(0.5, entrance_sharp(method=method))
entrance_sharp(method='Miller') # Don't bother checking a value for the Miller method
def test_entrance_angled():
K_30_Idelchik = 0.9798076211353316
assert_allclose(entrance_angled(30), K_30_Idelchik)
assert_allclose(entrance_angled(30, method='Idelchik'), K_30_Idelchik)
with pytest.raises(Exception):
entrance_angled(30, method='BADMETHOD')
### Bends
def test_bend_rounded_Crane():
K = bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
assert_allclose(K, 0.09321910015613409)
K_max = bend_rounded_Crane(Di=.400, rc=.4*25, angle=30)
K_limit = bend_rounded_Crane(Di=.400, rc=.4*20, angle=30)
assert_allclose(K_max, K_limit)
def test_bend_rounded_Miller():
# Miller examples - 9.12
D = .6
Re = Reynolds(V=4, D=D, nu=1.14E-6)
kwargs = dict(Di=D, bend_diameters=2, angle=90, Re=Re, roughness=.02E-3)
K = bend_rounded_Miller(L_unimpeded=30*D, **kwargs)
assert_allclose(K, 0.1513266131915296, rtol=1e-4)# 0.150 in Miller- 1% difference due to fd
K = bend_rounded_Miller(L_unimpeded=0*D, **kwargs)
assert_allclose(K, 0.1414607344374372, rtol=1e-4) # 0.135 in Miller - Difference mainly from Co interpolation method, OK with that
K = bend_rounded_Miller(L_unimpeded=2*D, **kwargs)
assert_allclose(K, 0.09343184457353562, rtol=1e-4) # 0.093 in miller
def test_bend_rounded():
### Bends
K_5_rc = [bend_rounded(Di=4.020, rc=4.0*5, angle=i, fd=0.0163) for i in [15, 30, 45, 60, 75, 90]]
K_5_rc_values = [0.07038212630028828, 0.10680196344492195, 0.13858204974134541, 0.16977191374717754, 0.20114941557508642, 0.23248382866658507]
assert_allclose(K_5_rc, K_5_rc_values)
K_10_rc = [bend_rounded(Di=34.500, rc=36*10, angle=i, fd=0.0106) for i in [15, 30, 45, 60, 75, 90]]
K_10_rc_values = [0.061075866683922314, 0.10162621862720357, 0.14158887563243763, 0.18225270014527103, 0.22309967045081655, 0.26343782210280947]
assert_allclose(K_10_rc, K_10_rc_values)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, fd=0.0163)
assert_allclose(K, 0.106920213333191)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5)
assert_allclose(K, 0.11532121658742862)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5, method='Miller')
assert_allclose(K, 0.10276501180879682)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Crane')
assert_allclose(K, 0.08959057097762159)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Ito')
assert_allclose(K, 0.10457946464978755)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Swamee')
assert_allclose(K, 0.055429466248839564)
def test_bend_miter():
K_miters = [bend_miter(i) for i in [150, 120, 90, 75, 60, 45, 30, 15]]
K_miter_values = [2.7128147734758103, 2.0264994448555864, 1.2020815280171306, 0.8332188430731828, 0.5299999999999998, 0.30419633092708653, 0.15308822558050816, 0.06051389308126326]
assert_allclose(K_miters, K_miter_values)
K = bend_miter(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20, method='Miller')
assert_allclose(K, 0.2944060416245167)
K = bend_miter(Di=.05, angle=45, Re=1e6, roughness=1e-5, method='Crane')
assert_allclose(K, 0.28597953150073047)
K = bend_miter(angle=45, Re=1e6, method='Rennels')
assert_allclose(K, 0.30419633092708653)
with pytest.raises(Exception):
bend_miter(angle=45, Re=1e6, method='BADMETHOD')
def test_bend_miter_Miller():
K = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K, 0.2944060416245167)
K_default_L_unimpeded = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5)
assert_allclose(K, K_default_L_unimpeded)
K_high_angle = bend_miter_Miller(Di=.6, angle=120, Re=1e6, roughness=1e-5, L_unimpeded=20)
K_higher_angle = bend_miter_Miller(Di=.6, angle=150, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K_high_angle, K_higher_angle)
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_rounded_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(500):
Di = log_uniform(1e-5, 100)
rc = uniform(0, 100)
angle = uniform(0, 180)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_rounded_Miller(Di=Di, rc=rc, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_miter_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(10**3):
Di = log_uniform(1e-5, 100)
angle = uniform(0, 120)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_miter_Miller(Di=Di, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
### Diffusers
def test_diffuser_conical():
K1 = diffuser_conical(Di1=.1**0.5, Di2=1, angle=10., fd=0.020)
K2 = diffuser_conical(Di1=1/3., Di2=1, angle=50, fd=0.03) # 2
K3 = diffuser_conical(Di1=2/3., Di2=1, angle=40, fd=0.03) # 3
K4 = diffuser_conical(Di1=1/3., Di2=1, angle=120, fd=0.0185) # #4
K5 = diffuser_conical(Di1=2/3., Di2=1, angle=120, fd=0.0185) # Last
K6 = diffuser_conical(Di1=.1**0.5, Di2=1, l=3.908, fd=0.020)
Ks = [0.12301652230915454, 0.8081340270019336, 0.32533470783539786, 0.812308728765127, 0.3282650135070033, 0.12300865396254032]
assert_allclose([K1, K2, K3, K4, K5, K6], Ks)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, angle=1800., fd=0.020)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, fd=0.020)
K1 = diffuser_conical_staged(Di1=1., Di2=10., DEs=[2,3,4,5,6,7,8,9], ls=[1,1,1,1,1,1,1,1,1], fd=0.01)
K2 = diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01)
Ks = [1.7681854713484308, 0.973137914861591]
assert_allclose([K1, K2], Ks)
# Idelchilk
Ks_Idelchik = [diffuser_conical(Di1=.1**0.5, Di2=1, l=l, method='Idelchik') for l in [.1, .5, 1, 2, 3, 4, 5, 20]]
Ks_Idelchik_expect = [0.8617385829640242, 0.9283647028367953, 0.7082429168951839, 0.291016580744589, 0.18504484868875992, 0.147705693811332, 0.12911637682462676, 0.17]
assert_allclose(Ks_Idelchik, Ks_Idelchik_expect, rtol=1e-2)
### Contractions
def test_contraction_conical_Crane():
K2 = contraction_conical_Crane(Di1=0.0779, Di2=0.0525, l=0)
assert_allclose(K2, 0.2729017979998056)
def test_contraction_round():
K_round = contraction_round(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K_round, 0.1783332490866574)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Miller')
assert_allclose(K, 0.085659530512986387)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Idelchik')
assert_allclose(K, 0.1008)
with pytest.raises(Exception):
contraction_round(Di1=1, Di2=0.4, rc=0.04, method='BADMETHOD')
def test_contraction_round_Miller():
K = contraction_round_Miller(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K, 0.085659530512986387)
def test_contraction_conical():
K_conical1 = contraction_conical(Di1=0.1, Di2=0.04, l=0.04, fd=0.0185)
K_conical2 = contraction_conical(Di1=0.1, Di2=0.04, angle=73.74, fd=0.0185)
assert_allclose([K_conical1, K_conical2], [0.15779041548350314, 0.15779101784158286])
with pytest.raises(Exception):
contraction_conical(Di1=0.1, Di2=0.04, fd=0.0185)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Rennels')
assert_allclose(K, 0.47462419839494946)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Idelchik')
assert_allclose(K, 0.391723)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Crane')
assert_allclose(K, 0.41815380146594)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Swamee')
assert_allclose(K, 0.4479863925376303)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Blevins')
assert_allclose(K, 0.365)
K = contraction_conical(Di1=0.1, Di2=0.04, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.0918289683812792)
# high l ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.06, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.08651515699621345)
# low a ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.099, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.03065262382984957)
# low l ratio
K = contraction_conical(Di1=0.1, Di2=0.04, l=0.001, Re=1E6, method='Miller')
assert_allclose(K, 0.5)
# high l ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.05, l=1, Re=1E6, method='Miller')
assert_allclose(K, 0.04497085709551787)
with pytest.raises(Exception):
contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='BADMETHOD')
### Valves
def test_valve_coefficients():
Cv = Kv_to_Cv(2)
assert_allclose(Cv, 2.3121984567073133)
Kv = Cv_to_Kv(2.312)
assert_allclose(Kv, 1.9998283393826013)
K = Kv_to_K(2.312, .015)
assert_allclose(K, 15.15337460039990)
Kv = K_to_Kv(15.15337460039990, .015)
assert_allclose(Kv, 2.312)
# Two way conversions
K = Cv_to_K(2.712, .015)
assert_allclose(K, 14.719595348352552)
assert_allclose(K, Kv_to_K(Cv_to_Kv(2.712), 0.015))
Cv = K_to_Cv(14.719595348352552, .015)
assert_allclose(Cv, 2.712)
assert_allclose(Cv, Kv_to_Cv(K_to_Kv(14.719595348352552, 0.015)))
# Code to generate the Kv Cv conversion factor
# Round 1 trip; randomly assume Kv = 12, rho = 900; they can be anything
# an tit still works
dP = 1E5
rho = 900.
Kv = 12.
Q = Kv/3600.
D = .01
V = Q/(pi/4*D**2)
K = dP/(.5*rho*V*V)
good_K = K
def to_solve(x):
from scipy.constants import gallon, minute, hour, psi
conversion = gallon/minute*hour # from gpm to m^3/hr
dP = 1*psi
Cv = Kv*x*conversion
Q = Cv/3600
D = .01
V = Q/(pi/4*D**2)
K = dP/(.5*rho*V*V)
return K - good_K
from scipy.optimize import newton
ans = newton(to_solve, 1.2)
assert_allclose(ans, 1.1560992283536566)
def test_K_gate_valve_Crane():
K = K_gate_valve_Crane(D1=.01, D2=.02, angle=45, fd=.015)
assert_allclose(K, 14.548553268047963)
K = K_gate_valve_Crane(D1=.1, D2=.1, angle=0, fd=.015)
assert_allclose(K, 0.12)
# non-smooth transition test
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=45, fd=.015)
assert_allclose(K, 2.5577948931946746)
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=45.01, fd=.015)
assert_allclose(K, 2.5719286772143595)
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=13.115)
assert_allclose(K, 1.1466029421844073, rtol=1e-4)
def test_K_globe_valve_Crane():
K = K_globe_valve_Crane(.01, .02, fd=.015)
assert_allclose(K, 87.1)
assert_allclose(K_globe_valve_Crane(.01, .01, fd=.015), .015*340)
K = K_globe_valve_Crane(.01, .02)
assert_allclose(K, 135.9200548324305)
def test_K_angle_valve_Crane():
K = K_angle_valve_Crane(.01, .02, fd=.016)
assert_allclose(K, 19.58)
K = K_angle_valve_Crane(.01, .02, fd=.016, style=1)
assert_allclose(K, 43.9)
K = K_angle_valve_Crane(.01, .01, fd=.016, style=1)
assert_allclose(K, 2.4)
with pytest.raises(Exception):
K_angle_valve_Crane(.01, .02, fd=.016, style=-1)
K = K_angle_valve_Crane(.01, .02)
assert_allclose(K, 26.597361811128465)
def test_K_swing_check_valve_Crane():
K = K_swing_check_valve_Crane(D=.1, fd=.016)
assert_allclose(K, 1.6)
K = K_swing_check_valve_Crane(D=.1, fd=.016, angled=False)
assert_allclose(K, 0.8)
K = K_swing_check_valve_Crane(D=.02)
assert_allclose(K, 2.3974274785373257)
def test_K_lift_check_valve_Crane():
K = K_lift_check_valve_Crane(.01, .02, fd=.016)
assert_allclose(K, 21.58)
K = K_lift_check_valve_Crane(.01, .01, fd=.016)
assert_allclose(K, 0.88)
K = K_lift_check_valve_Crane(.01, .01, fd=.016, angled=False)
assert_allclose(K, 9.6)
K = K_lift_check_valve_Crane(.01, .02, fd=.016, angled=False)
assert_allclose(K, 161.1)
K = K_lift_check_valve_Crane(.01, .02)
assert_allclose(K, 28.597361811128465)
def test_K_tilting_disk_check_valve_Crane():
K = K_tilting_disk_check_valve_Crane(.01, 5, fd=.016)
assert_allclose(K, 0.64)
K = K_tilting_disk_check_valve_Crane(.25, 5, fd=.016)
assert_allclose(K, .48)
K = K_tilting_disk_check_valve_Crane(.9, 5, fd=.016)
assert_allclose(K, 0.32)
K = K_tilting_disk_check_valve_Crane(.01, 15, fd=.016)
assert_allclose(K, 1.92)
K = K_tilting_disk_check_valve_Crane(.25, 15, fd=.016)
assert_allclose(K, 1.44)
K = K_tilting_disk_check_valve_Crane(.9, 15, fd=.016)
assert_allclose(K, 0.96)
K = K_tilting_disk_check_valve_Crane(.01, 5)
assert_allclose(K, 1.1626516551826345)
def test_K_globe_stop_check_valve_Crane():
K = K_globe_stop_check_valve_Crane(.1, .02, .0165)
assert_allclose(K, 4.5225599999999995)
K = K_globe_stop_check_valve_Crane(.1, .02, .0165, style=1)
assert_allclose(K, 4.51992)
K = K_globe_stop_check_valve_Crane(.1, .02, .0165, style=2)
assert_allclose(K, 4.513452)
with pytest.raises(Exception):
K_globe_stop_check_valve_Crane(.1, .02, .0165, style=-1)
K = K_globe_stop_check_valve_Crane(.1, .1, .0165)
assert_allclose(K, 6.6)
K = K_globe_stop_check_valve_Crane(.1, .02, style=1)
assert_allclose(K, 4.5235076518969795)
def test_K_angle_stop_check_valve_Crane():
K = K_angle_stop_check_valve_Crane(.1, .02, .0165)
assert_allclose(K, 4.51728)
K = K_angle_stop_check_valve_Crane(.1, .02, .0165, style=1)
assert_allclose(K, 4.52124)
K = K_angle_stop_check_valve_Crane(.1, .02, .0165, style=2)
assert_allclose(K, 4.513452)
with pytest.raises(Exception):
K_angle_stop_check_valve_Crane(.1, .02, .0165, style=-1)
K = K_angle_stop_check_valve_Crane(.1, .1, .0165)
assert_allclose(K, 3.3)
K = K_angle_stop_check_valve_Crane(.1, .02, style=1)
assert_allclose(K, 4.525425593879809)
def test_K_ball_valve_Crane():
K = K_ball_valve_Crane(.01, .02, 50, .025)
assert_allclose(K, 14.100545785228675)
K = K_ball_valve_Crane(.01, .02, 40, .025)
assert_allclose(K, 12.48666472974707)
K = K_ball_valve_Crane(.01, .01, 0, .025)
assert_allclose(K, 0.07500000000000001)
K = K_ball_valve_Crane(.01, .02, 50)
assert_allclose(K, 14.051310974926592)
def test_K_diaphragm_valve_Crane():
K = K_diaphragm_valve_Crane(fd=0.015, style=0)
assert_allclose(2.235, K)
K = K_diaphragm_valve_Crane(fd=0.015, style=1)
assert_allclose(K, 0.585)
with pytest.raises(Exception):
K_diaphragm_valve_Crane(fd=0.015, style=-1)
K = K_diaphragm_valve_Crane(D=.1, style=0)
| assert_allclose(K, 2.4269804835982565) | numpy.testing.assert_allclose |
# Copyright (c) 2020. <NAME>, <EMAIL>
import random, warnings, itertools, numpy as np, scipy.linalg as LA, pandas as pd
from numpy.linalg import LinAlgError
class NonPSDError(LinAlgError):
def __init__(self):
err_msg = "Matrix is not positive semidefinite (PSD)."
super(LinAlgError, self).__init__(err_msg)
class LSML:
def __init__(self, tol=1e-5, max_iter=1000, sigma=0.01):
self.tol = tol
self.max_iter = max_iter
self.metric = None
self.sigma = sigma
def _comparison_loss(self, metric, comparisons):
loss = 0.
for xa, xb, xc, xd in comparisons:
vab = xa - xb
vcd = xc - xd
dab = np.dot(vab.T, np.dot(metric, vab))
dcd = np.dot(vcd.T, np.dot(metric, vcd))
if dab > dcd:
loss += (np.sqrt(dab) - np.sqrt(dcd)) ** 2
return loss
def _regularization_loss(self, prior_inv):
return self.sigma * (np.trace(np.dot(self.metric, prior_inv))
- np.log(LA.det(self.metric + np.diag(np.ones(len(prior_inv))*1e-12))))
def _total_loss(self, prior_inv, new_metric, comparisons):
return self._comparison_loss(new_metric, comparisons) + \
self._regularization_loss(prior_inv)
def _gradient(self, prior_inv, comparisons):
dMetric = self.sigma * (prior_inv - LA.inv(self.metric))
for xa, xb, xc, xd in comparisons:
vab = xa - xb
vcd = xc - xd
dab = np.dot(vab.T, np.dot(self.metric, vab))
dcd = np.dot(vcd.T, np.dot(self.metric, vcd))
if dab <= dcd:
continue # comparison already satisfied.
if dab == 0 or dcd == 0:
continue # this is the setting for COMPAS
dMetric += (1 - np.sqrt(dcd / dab)) * np.outer(vab, vab) + \
(1 - np.sqrt(dab / dcd)) * np.outer(vcd, vcd)
return dMetric
def score(self, comparisons, h, dmetric=None):
count = 0
if dmetric is None:
metric = self.metric
else:
metric = dmetric
for xa, xb, xc, xd in comparisons:
vab, vcd = xa - xb, xc - xd
dab = np.dot(vab.T, np.dot(metric, vab))
dcd = np.dot(vcd.T, | np.dot(metric, vcd) | numpy.dot |
from corvus.structures import Handler, Exchange, Loop, Update
import corvutils.pyparsing as pp
import os, sys, subprocess, shutil #, resource
import re
import numpy as np
#from CifFile import ReadCif
#from cif2cell.uctools import *
# Debug: FDV
import pprint
pp_debug = pprint.PrettyPrinter(indent=4)
# Define dictionary of implemented calculations
implemented = {}
strlistkey = lambda L:','.join(sorted(L))
subs = lambda L:[{L[j] for j in range(len(L)) if 1<<j&k} for k in range(1,1<<len(L))]
for s in subs(['potlist','atomlist']):
key = strlistkey(s)
autodesc = 'Basic FEFF ' + ', '.join(s) + ' from ABINIT cell definition'
input = ['acell','znucl','xred','rprim','natom']
cost = 1
implemented[key] = {'type':'Exchange','out':list(s),'req':input,
'desc':autodesc,'cost':cost}
implemented['feffAtomicData'] = {'type':'Exchange','out':['feffAtomicData'],'cost':1,
'req':['cluster','absorbing_atom'],'desc':'Calculate atomic data using FEFF.'}
implemented['feffSCFPotentials'] = {'type':'Exchange','out':['feffSCFPotentials'],'cost':1,
'req':['cluster','absorbing_atom','feffAtomicData'],'desc':'Calculate SCF potentials using FEFF.'}
implemented['feffCrossSectionsAndPhases'] = {'type':'Exchange','out':['feffCrossSectionsAndPhases'],'cost':1,
'req':['cluster','absorbing_atom','feffSCFPotentials'],'desc':'Calculate atomic cross sections and phases using FEFF.'}
implemented['feffGreensFunction'] = {'type':'Exchange','out':['feffGreensFunction'],'cost':1,
'req':['cluster','absorbing_atom','feffCrossSectionsAndPhases'],'desc':'Calculate Greens function using FEFF.'}
implemented['feffPaths'] = {'type':'Exchange','out':['feffPaths'],'cost':1,
'req':['cluster','absorbing_atom','feffGreensFunction'],'desc':'Calculate paths using FEFF.'}
implemented['feffFMatrices'] = {'type':'Exchange','out':['feffFMatrices'],'cost':1,
'req':['cluster','absorbing_atom','feffPaths'],'desc':'Calculate scattering matrices using FEFF.'}
implemented['xanes'] = {'type':'Exchange','out':['xanes'],'cost':1,
'req':['cluster','absorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['feffXES'] = {'type':'Exchange','out':['feffXES'],'cost':1,
'req':['cluster','absorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['feffRIXS'] = {'type':'Exchange','out':['feffRIXS'],'cost':1,
'req':['cluster','absorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['opcons'] = {'type':'Exchange','out':['opcons'],'cost':1,
'req':['cif_input'],'desc':'Calculate optical constants using FEFF.'}
# Added by FDV
# Trying to implement and EXAFS with optimized geometry and ab initio DW factors
implemented['opt_dynmat_s2_exafs'] = {'type':'Exchange',
'out':['opt_dynmat_s2_exafs'], 'cost':3,
'req':['opt_dynmat','absorbing_atom'],
'desc':'Calculate EXAFS with optimized geometry and ab initio DW factors from a dynamical matrix using FEFF.'}
class Feff(Handler):
def __str__(self):
return 'FEFF Handler'
@staticmethod
def canProduce(output):
if isinstance(output, list) and output and isinstance(output[0], str):
return strlistkey(output) in implemented
elif isinstance(output, str):
return output in implemented
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def requiredInputFor(output):
if isinstance(output, list) and output and isinstance(output[0], str):
unresolved = {o for o in output if not Feff.canProduce(o)}
canProduce = (o for o in output if Feff.canProduce(o))
additionalInput = (set(implemented[o]['req']) for o in canProduce)
return list(set.union(unresolved,*additionalInput))
elif isinstance(output, str):
if output in implemented:
return implemented[output]['req']
else:
return [output]
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def cost(output):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token or list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
return implemented[key]['cost']
@staticmethod
def sequenceFor(output,inp=None):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token of list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
f = lambda subkey : implemented[key][subkey]
if f('type') is 'Exchange':
return Exchange(Feff, f('req'), f('out'), cost=f('cost'), desc=f('desc'))
@staticmethod
def prep(config):
if 'xcIndexStart' in config:
if config['xcIndexStart'] > 0:
subdir = config['pathprefix'] + str(config['xcIndex']) + '_FEFF'
xcDir = os.path.join(config['cwd'], subdir)
else:
xcDir = config['xcDir']
else:
subdir = config['pathprefix'] + str(config['xcIndex']) + '_FEFF'
xcDir = os.path.join(config['cwd'], subdir)
# Make new output directory if if doesn't exist
if not os.path.exists(xcDir):
os.makedirs(xcDir)
# Store current Exchange directory in configuration
config['xcDir'] = xcDir
#@staticmethod
#def setDefaults(input,target):
# JJ Kas - run now performs all 3 methods, i.e., generateInput, run, translateOutput
# Maybe we should also include prep here. Is there a reason that we want to limit the directory names
# to automated Corvus_FEFFNN? Also if we have prep included here, we can decide on making a new directory
# or not.
@staticmethod
def run(config, input, output):
# Set os specific executable ending
if os.name == 'nt':
win_exe = '.exe'
else:
win_exe = ''
# set atoms and potentials
# Set directory to feff executables.
# Debug: FDV
# pp_debug.pprint(config)
feffdir = config['feff']
# Debug: FDV
# sys.exit()
# Copy feff related input to feffinput here. Later we will be overriding some settings,
# so we want to keep the original input intact.
feffInput = {key:input[key] for key in input if key.startswith('feff.')}
# Generate any data that is needed from generic input and populate feffInput with
# global data (needed for all feff runs.)
if 'feff.target' in input or 'cluster' not in input:
if 'cif_input' in input: # Prefer using cif for input, but still use REAL space
# Replace path with absolute path
feffInput['feff.cif'] = [[os.path.abspath(input['cif_input'][0][0])]]
if 'feff.reciprocal' not in input:
feffInput['feff.real'] = [[True]]
if 'cluster' in input:
atoms = getFeffAtomsFromCluster(input)
setInput(feffInput,'feff.atoms',atoms)
potentials = getFeffPotentialsFromCluster(input)
setInput(feffInput,'feff.potentials',potentials)
debyeOpts = getFeffDebyeOptions(input)
if 'feff.exchange' in feffInput:
exch = feffInput['feff.exchange']
else:
exch = [[0, 0.0, 0.0, 2]]
if 'spectral_broadening' in input:
exch[0][2] = input['spectral_broadening'][0][0]
if 'fermi_shift' in input:
exch[0][1] = input['fermi_shift'][0][0]
feffInput['feff.exchange'] = exch
if debyeOpts is not None:
setInput(feffInput,'feff.debye',debyeOpts)
# Set directory for this exchange
dir = config['xcDir']
# Set input file
inpf = os.path.join(dir, 'feff.inp')
# Loop over targets in output. Not sure if there will ever be more than one output target here.
for target in output:
if (target == 'feffAtomicData'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeAtomicInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable.
# Run rdinp and atomic part of calculation
execs = ['rdinp','atomic','screen']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = [feffInput.get('feff.MPI.CMD')[0] + win_exe]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe) + win_exe]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffSCFPotentials'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeSCFInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
# Run rdinp and atomic part of calculation
execs = ['rdinp','atomic', 'pot', 'screen']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffCrossSectionsAndPhases'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeCrossSectionsInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
# Run rdinp and atomic part of calculation
execs = ['rdinp','atomic','screen', 'pot', 'xsph']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
output[target] = dir
elif (target == 'feffGreensFunction'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeGreensFunctionInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
execs = ['rdinp','atomic','pot','screen','xsph','fms','mkgtr']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffPaths'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writePathsInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
execs = ['rdinp','atomic','pot','screen','xsph','fms','mkgtr','path']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffFMatrices'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeFMatricesInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
execs = ['rdinp','atomic','pot','screen','xsph','fms','mkgtr','path','genfmt']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
output[target] = dir
elif (target == 'xanes'):
# Loop over edges. For now just run in the same directory. Should change this later.
for i,edge in enumerate(input['feff.edge'][0]):
feffInput['feff.edge'] = [[edge]]
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeXANESInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdinp again since writeSCFInput may have different cards than
execs = ['rdinp','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = [feffInput.get('feff.MPI.CMD')[0][0] + win_exe]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe) + win_exe]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
outFile=os.path.join(dir,'xmu.dat')
w,xmu = np.loadtxt(outFile,usecols = (0,3)).T
if i==0:
xmu_arr = [xmu]
w_arr = [w]
else:
xmu_arr = xmu_arr + [xmu]
w_arr = w_arr + [w]
# Now combine energy grids, interpolate files, and sum.
wtot = np.unique(np.append(w_arr[0],w_arr[1:]))
xmutot = np.zeros_like(wtot)
xmuterp_arr = []
for i,xmu_elem in enumerate(xmu_arr):
xmuterp_arr = xmuterp_arr + [np.interp(wtot,w_arr[i],xmu_elem)]
xmutot = xmutot + np.interp(wtot,w_arr[i],xmu_elem)
#output[target] = np.array([wtot,xmutot] + xmuterp_arr).tolist()
output[target] = np.array([wtot,xmutot]).tolist()
#print output[target]
elif (target == 'feffXES'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
# Write input file for FEFF.
writeXESInput(feffInput,inpf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable.
execs = ['rdinp','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the data.
outFile=os.path.join(dir,'xmu.dat')
output[target] = np.loadtxt(outFile,usecols = (0,3)).T.tolist()
elif (target == 'feffRIXS'):
# For RIXS, need to run multiple times as follows.
# Core-Core RIXS
# 1. Run for the deep core-level.
# 2. Run for the shallow core-level.
# 3. Collect files.
# 4. Run rixs executable.
# Core-valence RIXS
# 1. Run for the deep core-level.
# 2. Run for the valence level.
# 3. Run and XES calculation.
# 4. Run rixs executable.
# Set global settings for all runs.
# Set default energy grid
setInput(feffInput,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
setInput(feffInput,'feff.exchange',[[0, 0.0, -20.0, 0]])
setInput(feffInput,'feff.corehole',[['RPA']],Force=True) # maybe not this one. Need 'NONE' for valence
setInput(feffInput,'feff.edge',[['K','VAL']])
edges = feffInput['feff.edge'][0]
# Save original state of input
savedInput = dict(feffInput)
# Loop over edges and run XANES for each edge:
# Save deep edge
edge0 = edges[0]
nEdge=0
for edge in edges:
nEdge = nEdge + 1
# Make directory
dirname = os.path.join(dir,edge)
if not os.path.exists(dirname):
os.mkdir(dirname)
if edge.upper() != "VAL":
outFileName='rixsET.dat'
# Delete XES input key
if 'feff.xes' in feffInput:
del feffInput['feff.xes']
# Set edge.
setInput(feffInput,'feff.edge',[[edge]],Force=True)
# Set icore to other edge
setInput(feffInput,'feff.icore',[[getICore(edge0)]],Force=True)
# Set corehole RPA
setInput(feffInput,'feff.corehole',[['RPA']],Force=True)
# Set default energy grid
setInput(feffInput,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
# Set RLPRINT
setInput(feffInput,'feff.rlprint',[[True]],Force=True)
feffinp = os.path.join(dirname, 'feff.inp')
# Write XANES input for this run
writeXANESInput(feffInput,feffinp)
else: # This is a valence calculation. Calculate NOHOLE and XES
# XANES calculation
outFileName='rixsET-sat.dat'
# Find out if we are using a valence hole for valence calculation.
# Set edge.
setInput(feffInput,'feff.edge',[[edge0]],Force=True)
if len(edges) == nEdge+1:
# Set corehole RPA
setInput(feffInput,'feff.corehole',[['RPA']],Force=True)
# We want to use this core-state as the core hole in place of the valence
# Set screen parameters
setInput(feffInput,'feff.screen',[['icore', getICore(edges[nEdge])]],Force=True)
else:
# Set corehole NONE
setInput(feffInput,'feff.corehole',[['NONE']],Force=True)
# Write wscrn.dat to VAL directory
wscrnFileW = os.path.join(dirname,'wscrn.dat')
writeList(wscrnLines,wscrnFileW)
# Set icore to deep edge
setInput(feffInput,'feff.icore',[[getICore(edge0)]],Force=True)
# Set default energy grid
setInput(feffInput,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
# Set RLPRINT
setInput(feffInput,'feff.rlprint',[[True]],Force=True)
#
# Run XES for the deep level
# Save XANES card, but delete from input
xanesInput = {}
if 'feff.xanes' in feffInput:
setInput(xanesInput, 'feff.xanes', feffInput['feff.xanes'])
del feffInput['feff.xanes']
# Set XES options
# Set default energy grid
del feffInput['feff.egrid']
setInput(feffInput,'feff.egrid',[['e_grid', -40, 10, 0.1]])
setInput(feffInput,'feff.xes', [[-20, 10, 0.1]])
xesdir = os.path.join(dir,'XES')
if not os.path.exists(xesdir):
os.mkdir(xesdir)
feffinp = os.path.join(xesdir,'feff.inp')
# Write XES input.
writeXESInput(feffInput,feffinp)
# Run executables to get XES
# Set output and error files
with open(os.path.join(xesdir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(xesdir, 'corvus.FEFF.stderr'), 'w') as err:
execs = ['rdinp','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',xesdir,executable,args,out,err)
# Make xes.dat from xmu.dat
xmuFile = open(os.path.join(xesdir,'xmu.dat'))
xesFile = os.path.join(dir,'xes.dat')
xesLines=[]
for line in xmuFile:
if line.lstrip()[0] != '#':
fields=line.split()
xesLines = xesLines + [str(xmu - float(fields[1])) + ' ' + fields[3]]
elif 'Mu' in line:
fields = line.strip().split()
xmu = float(fields[2][3:len(fields[2])-3])
# Write lines in reverse order so that column 1 is sorted correctly.
writeList(xesLines[::-1],xesFile)
# Now make input file for XANES calculation.
if 'feff.xanes' in xanesInput:
feffInput['feff.xanes'] = xanesInput['feff.xanes']
del feffInput['feff.xes']
# Set default energy grid
setInput(feffInput,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]],Force=True)
feffinp = os.path.join(dirname, 'feff.inp')
# Write XANES input for this run
writeXANESInput(feffInput,feffinp)
# Run XANES for this edge
# Set output and error files
with open(os.path.join(dirname, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dirname, 'corvus.FEFF.stderr'), 'w') as err:
execs = ['rdinp','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dirname,executable,args,out,err)
# Now copy files from this edge to main directory
shutil.copyfile(os.path.join(dirname,'wscrn.dat'), os.path.join(dir,'wscrn_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'phase.bin'), os.path.join(dir,'phase_' + str(nEdge) + '.bin'))
shutil.copyfile(os.path.join(dirname,'gg.bin'), os.path.join(dir,'gg_' + str(nEdge) + '.bin'))
shutil.copyfile(os.path.join(dirname,'xsect.dat'), os.path.join(dir,'xsect_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'xmu.dat'), os.path.join(dir,'xmu_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'rl.dat'), os.path.join(dir,'rl_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'.dimensions.dat'), os.path.join(dir,'.dimensions.dat'))
# If this is the first edge, get the screened potential.
if nEdge == 1:
wscrnLines = []
with open(os.path.join(dirname,'wscrn.dat'),'r') as wscrnFileR:
for wscrnLine in wscrnFileR.readlines():
if wscrnLine.lstrip()[0] == '#':
wscrnLines = wscrnLines + [wscrnLine.strip()]
else:
wscrnFields = wscrnLine.strip().split()
wscrnLines = wscrnLines + [wscrnFields[0] + ' 0.0 0.0']
# Finally, run rixs executable
feffInput = savedInput
setInput(feffInput,'feff.rixs', [[0.1, 0.1]])
feffinp = os.path.join(dir, 'feff.inp')
# Write XANES input for this run
writeXANESInput(feffInput,feffinp)
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.stderr'), 'w') as err:
execs = ['rdinp','atomic','rixs']
for exe in execs:
if 'feff.MPI.CMD' in feffInput:
executable = feffInput.get('feff.MPI.CMD')[0]
args = feffInput.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
outFile=os.path.join(dir,outFileName)
output[target] = np.loadtxt(outFile).T.tolist()
## OPCONS BEGIN
elif (target == 'opcons'):
# Opcons imports
import copy
#import matplotlib.pyplot as plt
from corvus.controls import generateAndRunWorkflow
# Define some constants
hart = 2*13.605698
alpinv = 137.03598956
bohr = 0.529177249
# Used in fixing element symbols
only_alpha = re.compile('[^a-zA-Z]')
# Set prefix for sdtout of feff runs.
runExecutable.prefix = '\t\t\t'
# Copy general input to local one
input2 = copy.deepcopy(input)
# Modify the common values of local input
input2['feff.setedge'] = input.get('feff.setedge',[[True]])
input2['feff.absolute'] = [[True]]
input2['feff.rgrid'] = [[0.01]]
# Copy general config to local one
config2 = copy.deepcopy(config)
# Set directory to run in.
config2['cwd'] = config['xcDir']
# Set xcIndexStart to -1 so that xcDir will be set below rather than in prep.
config2['xcIndexStart'] = -1
# Use absolute units for everything.
config2['feff.absolute'] = [[True]]
# Initialize variables that collect results (?)
NumberDensity = []
vtot = 0.0
xas_arr = []
xas0_arr = []
en_arr = []
component_labels = []
# The logic of the lines below is weird: In opcons calculations the absorber is chosen on the fly by looping over all unique atoms
if 'absorbing_atom' not in input:
absorbers = []
else:
absorbers = input['absorbing_atom'][0]
# Build a list of absorbers for the system
# I think this also build a fake cluster to go in the input
if 'cif_input' in input2:
cifFile = ReadCif(os.path.abspath(input2['cif_input'][0][0]))
cif_dict = cifFile[list(cifFile.keys())[0]]
cell_data = CellData()
cell_data.getFromCIF(cif_dict)
cell_data.primitive()
symmult = []
cluster = []
i=1
for ia,a in enumerate(cell_data.atomdata): # This loops over sites in the original cif
symmult = symmult + [len(a)]
element = list(a[0].species.keys())[0]
component_labels = component_labels + [element + str(i)]
if 'absorbing_atom' not in input:
absorbers = absorbers + [ia+1]
cluster = cluster + [['Cu', 0.0, 0.0, ia*2.0 ]]
i += 1
if 'cluster' not in input2:
input2['cluster'] = cluster
# Debug: FDV
# print('ABSORBERS')
# pp_debug.pprint(absorbers)
# OPCONS LOOP SETUP BEGIN -------------------------------------------------------------------------------------
# Added by FDV
# Creating a list to collect the inputs for delayed execution
WF_Params_Dict = {}
# For each atom in absorbing_atoms run a full-spectrum calculation (all edges, XANES + EXAFS)
for absorber in absorbers:
print('')
print("##########################################################")
print(" Component: " + component_labels[absorber-1])
print("##########################################################")
print('')
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
input2['absorbing_atom'] = [[absorber]]
input2['feff.target'] = [[absorber]]
if 'cif_input' in input2:
input2['feff.target'] = [[absorber]]
element = list(cell_data.atomdata[absorber-1][0].species.keys())[0]
if 'number_density' not in input:
NumberDensity = NumberDensity + [symmult[absorber - 1]]
else:
# This only works if all elements are treated as the same for our calculation
element = input['cluster'][absorber-1][0]
if 'number_density' not in input:
n_element = 0
for atm in input['cluster']:
if element in atm:
n_element += 1
NumberDensity = NumberDensity + [n_element]
print('Number in unit cell: ' + str(NumberDensity[-1]))
### END INPUT GEN --------------------------------------------------------------------------------------------
# For each edge for this atom, run a XANES and EXAFS run
# Commented out by FDV, unused, simplifying
# FirstEdge = True
Item_Absorber = {}
for edge in feff_edge_dict[only_alpha.sub('',element)]:
Item_Edge = {}
print("\t" + edge)
print("\t\t" + 'XANES')
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
input2['feff.edge'] = [[edge]]
# Run XANES
input2['taget_list'] = [['xanes']]
# Set energy grid for XANES.
input2['feff.egrid'] = [['e_grid', -10, 10, 0.1], ['k_grid','last',5,0.07]]
input2['feff.control'] = [[1,1,1,1,1,1]]
config2['xcDir'] = os.path.join(config2['cwd'],component_labels[absorber-1],edge,'XANES')
targetList = [['xanes']]
if 'feff.scf' in input:
input2['feff.scf'] = input['feff.scf']
else:
input2['feff.scf'] = [[4.0,0,100,0.1,0]]
if 'feff.fms' in input:
input2['feff.fms'] = input['feff.fms']
else:
input2['feff.fms'] = [[6.0]]
input2['feff.rpath'] = [[0.1]]
### END INPUT GEN --------------------------------------------------------------------------------------------
# Added by FDV
Item_xanes = { 'config2':copy.deepcopy(config2),
'input2':copy.deepcopy(input2),
'targetList':copy.deepcopy(targetList) }
# Commented out by FDV, unused, simplifying
# FirstEdge = False
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
print("\t\t" + 'EXAFS')
xanesDir = config2['xcDir']
exafsDir = os.path.join(config2['cwd'],component_labels[absorber-1],edge,'EXAFS')
config2['xcDir'] = exafsDir
input2['feff.control'] = [[0, 1, 1, 1, 1, 1]]
input2['feff.egrid'] = [['k_grid', -20, -2, 1], ['k_grid',-2,0,0.07], ['k_grid', 0, 40, 0.07],['exp_grid', 'last', 500000.0, 10.0]]
if 'feff.fms' in input:
input2['feff.rpath'] = [[max(input['feff.fms'][0][0],0.1)]]
else:
input2['feff.rpath'] = [[6.0]]
input2['feff.fms'] = [[0.0]]
### END INPUT GEN --------------------------------------------------------------------------------------------
# Added by FDV
Item_exafs = { 'config2':copy.deepcopy(config2),
'input2':copy.deepcopy(input2),
'targetList':copy.deepcopy(targetList) }
Item_Absorber[edge] = { 'xanes':Item_xanes,
'exafs':Item_exafs }
print('')
WF_Params_Dict[absorber] = Item_Absorber
print('')
# OPCONS LOOP SETUP END ---------------------------------------------------------------------------------------
# Debug: FDV
print('#### FDV ####')
print('#### All WF Params ####')
pp_debug.pprint(WF_Params_Dict)
# Monty has issue on 2.7, so will just use pickle
import pickle
pickle.dump(WF_Params_Dict,open('WF_Params_Dict.pickle','wb'))
# Debug
# sys.exit()
# OPCONS LOOP RUN BEGIN ---------------------------------------------------------------------------------------
# For each atom in absorbing_atoms run a full-spectrum calculation (all edges, XANES + EXAFS)
for absorber in absorbers:
print('')
print("##########################################################")
print(" Component: " + component_labels[absorber-1])
print("##########################################################")
print('')
print('--- FDV ---', 'absorber', absorber)
for edge in WF_Params_Dict[absorber].keys():
print("\t" + edge)
print("\t\t" + 'XANES')
# Added by FDV
# Modified by FDV
# Commented out and moved to an independent loop
print('--- FDV ---', 'edge', edge)
config2 = WF_Params_Dict[absorber][edge]['xanes']['config2']
input2 = WF_Params_Dict[absorber][edge]['xanes']['input2']
targetList = WF_Params_Dict[absorber][edge]['xanes']['targetList']
if 'opcons.usesaved' not in input:
generateAndRunWorkflow(config2, input2,targetList)
else:
# Run if xmu.dat doesn't exist.
if not os.path.exists(os.path.join(config2['xcDir'],'xmu.dat')):
generateAndRunWorkflow(config2, input2,targetList)
else:
print("\t\t\txmu.dat already calculated. Skipping.")
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
print("\t\t" + 'EXAFS')
xanesDir = config2['xcDir']
exafsDir = os.path.join(config2['cwd'],component_labels[absorber-1],edge,'EXAFS')
if not os.path.exists(exafsDir):
os.makedirs(exafsDir)
shutil.copyfile(os.path.join(xanesDir,'apot.bin'), os.path.join(exafsDir,'apot.bin'))
shutil.copyfile(os.path.join(xanesDir,'pot.bin'), os.path.join(exafsDir,'pot.bin'))
# Modified by FDV
# Commented out and moved to an independent loop
config2 = WF_Params_Dict[absorber][edge]['exafs']['config2']
input2 = WF_Params_Dict[absorber][edge]['exafs']['input2']
targetList = WF_Params_Dict[absorber][edge]['exafs']['targetList']
if 'opcons.usesaved' not in input2:
generateAndRunWorkflow(config2, input2,targetList)
else:
# Run if xmu.dat doesn't exist.
if not os.path.exists(os.path.join(config2['xcDir'],'xmu.dat')):
generateAndRunWorkflow(config2, input2,targetList)
print('')
print('')
# OPCONS LOOP RUN END -----------------------------------------------------------------------------------------
# OPCONS LOOP ANA BEGIN ---------------------------------------------------------------------------------------
# For each atom in absorbing_atoms run a full-spectrum calculation (all edges, XANES + EXAFS)
emin = 100000.0
for iabs,absorber in enumerate(absorbers):
print('')
print("##########################################################")
print(" Component: " + component_labels[absorber-1])
print("##########################################################")
print('')
# Commented out by FDV, unused, simplifying
# FirstEdge = True
for edge in WF_Params_Dict[absorber].keys():
print("\t" + edge)
print("\t\t" + 'XANES')
# Added by FDV
config2 = WF_Params_Dict[absorber][edge]['xanes']['config2']
input2 = WF_Params_Dict[absorber][edge]['xanes']['input2']
targetList = WF_Params_Dict[absorber][edge]['xanes']['targetList']
### BEGIN OUTPUT ANA --------------------------------------------------------------------------------------------
if 'cif_input' in input:
# Get total volume from cif in atomic units.
vtot = cell_data.volume()*(cell_data.lengthscale/bohr)**3
else:
# Get norman radii from xmu.dat
with open(os.path.join(config2['xcDir'],'xmu.dat')) as f:
for line in f: # Go through the lines one at a time
words = line.split()
if 'Rnm=' in words:
vtot = vtot + (float(words[words.index('Rnm=')+1])/bohr)**3*4.0/3.0*np.pi
break
f.close()
outFile = os.path.join(config2['xcDir'],'xmu.dat')
e1,k1,xanes = np.loadtxt(outFile,usecols = (0,2,3)).T
xanes = np.maximum(xanes,0.0)
### END OUTPUT ANA --------------------------------------------------------------------------------------------
# Added by FDV
config2 = WF_Params_Dict[absorber][edge]['exafs']['config2']
input2 = WF_Params_Dict[absorber][edge]['exafs']['input2']
targetList = WF_Params_Dict[absorber][edge]['exafs']['targetList']
### BEGIN OUTPUT ANA --------------------------------------------------------------------------------------------
outFile = os.path.join(config2['xcDir'],'xmu.dat')
e2,k2,exafs,mu0 = np.loadtxt(outFile,usecols = (0,2,3,4)).T
exafs = np.maximum(exafs,0.0)
mu0 = np.maximum(mu0,0.0)
e0 = e2[100] - (k2[100]*bohr)**2/2.0*hart
emin = min(e0/2.0/hart,emin)
# Interpolate onto a union of the two energy-grids and smoothly go from one to the other between
e_tot = np.unique(np.append(e1,e2))
k_tot = np.where(e_tot > e0, np.sqrt(2.0*np.abs(e_tot-e0)/hart), -np.sqrt(2.0*np.abs(e0 - e_tot)/hart))/bohr
kstart = 3.0
kfin = 4.0
weight1 = np.cos((np.minimum(np.maximum(k_tot,kstart),kfin)-kstart)/(kfin-kstart)*np.pi/2)**2
weight2 = 1.0 - weight1
#NumberDensity[iabs] = NumberDensity[iabs]/2.0
print('Number density', NumberDensity[iabs], vtot, NumberDensity[iabs]/vtot)
xas_element = NumberDensity[iabs]*(np.interp(e_tot,e1,xanes)*weight1 + np.interp(e_tot,e2,exafs)*weight2)
xas0_element = NumberDensity[iabs]*np.interp(e_tot,e2,mu0)
xas_element[np.where(e_tot < e1[0])] = NumberDensity[iabs]*np.interp(e_tot[np.where(e_tot < e1[0])],e2,mu0)
xas_arr = xas_arr + [xas_element]
xas0_arr = xas0_arr + [xas0_element]
en_arr = en_arr + [e_tot]
#plt.plot(e_tot, xas_element)
#plt.show()
### END OUTPUT ANA --------------------------------------------------------------------------------------------
print('')
print('')
# OPCONS LOOP ANA END -----------------------------------------------------------------------------------------
# POST LOOP ANALYSYS: If everything is correct we should not have to change anything below
# Interpolate onto common grid from 0 to 500000 eV
# Make common grid as union of all grids.
energy_grid = np.unique(np.concatenate(en_arr))
# Now loop through all elements and add xas from each element
xas_tot = np.zeros_like(energy_grid)
xas0_tot = np.zeros_like(energy_grid)
for i,en in enumerate(en_arr):
xas_tot = xas_tot + np.interp(energy_grid,en,xas_arr[i],left=0.0,right=0.0)
xas0_tot = xas0_tot + np.interp(energy_grid,en,xas0_arr[i],left=0.0,right=0.0)
xas_tot = xas_tot/vtot
xas0_tot = xas0_tot/vtot
# transform to eps2. xas_tot*-4pi/apha/\omega*bohr**2
energy_grid = energy_grid/hart
eps2 = xas_tot*4*np.pi*alpinv*bohr**2/energy_grid
eps2 = eps2[np.where(energy_grid > emin)]
eps2_bg = xas0_tot*4*np.pi*alpinv*bohr**2/energy_grid
eps2_bg = eps2_bg[ | np.where(energy_grid > emin) | numpy.where |
from builtins import range, str, object
import os
import re
import copy
import json
import numpy as np
import pickle
from functools import partial
from contextlib import contextmanager
from peri import util, comp, models
from peri.logger import log as baselog
log = baselog.getChild('states')
class UpdateError(Exception):
pass
def sample(field, inds=None, slicer=None, flat=True):
"""
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
"""
if inds is not None:
out = field.ravel()[inds]
elif slicer is not None:
out = field[slicer].ravel()
else:
out = field
if flat:
return out.ravel()
return out
_graddoc = \
"""
Parameters
-----------
func : callable
Function wrt to take a derivative, should return a nparray that is
the same shape for all params and values
params : string or list of strings
Paramter(s) to take the derivative wrt
dl : float
Derivative step size for numerical deriv
rts : boolean
Return To Start. Return the state to how you found it when done,
needs another update call, so can be ommitted sometimes (small dl).
If True, functions return the final answer along with the final func
evaluation so that it may be passed onto other calls.
nout : Int, optional
How many objects the function returns. Allows for gradient of multiple
things (e.g. residuals + error) at the same time in a nice, contiguous
way. Default is 1
out : ndarray or None, optional
If set, the return array for the output. Performance feature. Does not
check for shape internally; will just raise an error. Default is None,
i.e. initialize the output internally.
**kwargs :
Arguments to `func`
"""
_sampledoc = \
"""
kwargs (supply only one):
-----------------------------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
"""
#=============================================================================
# Super class of State, has all basic components and structure
#=============================================================================
class State(comp.ParameterGroup):
def __init__(self, params, values, logpriors=None, hyper_params=['sigma'],
hyper_values=[0.04], **kwargs):
"""
A model and corresponding functions to perform a fit to data using a
variety of optimization routines. A model takes parameters and values
(names and values) which determine the output of a model, which is then
compared with data.
Parameters
-----------
params : list of strings
The names of the parameters (should be a unique set)
values : list of numbers
The corresponding values of the parameters
logpriors : list of `peri.prior.Prior`
Priors (constraints) to apply to parameters
hyper_params : list of strings, optional
The names of any hyper-parameters (should be a unique set).
Stored as a `peri.comp.ParameterGroup`. Default is `['sigma']`,
the standard-deviation of the noise distribution.
hyper_values : list of numbers, optional
The corresponding values of the hyper-parameters. Stored as a
`peri.comp.ParameterGroup`. Default is `[0.04]`
kwargs :
Arguments to pass to super class :class:`peri.comp.ParameterGroup`
including `ordered` and `category`.
"""
self.stack = []
self.logpriors = logpriors
super(State, self).__init__(params, values, **kwargs)
self.hyper_parameters = comp.ParameterGroup(hyper_params, hyper_values)
self.build_funcs()
@property
def data(self):
"""
Class property: the raw data of the model fit. Should return a number
(preferrably float) or an ndarray (essentially any object which as
operands +-/...). This object is constant since it is data.
"""
pass
@property
def model(self):
"""
Class property: the current model fit to the data. Should return a
number or ndarray. Ideally this object should be an object updated by
the :func:`peri.states.State.update` function and simply returned in
this property
"""
pass
@property
def residuals(self):
"""
Class property: the model residuals wrt data, residuals = data - model,
:math:`R_i = D_i - M_i(\\theta)`
"""
return self.data - self.model
@property
def error(self):
"""
Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2`
"""
r = self.residuals.ravel()
return np.dot(r,r) #faster than flatiter
@property
def loglikelihood(self):
"""
Class property: loglikelihood calculated by the model error,
:math:`\\mathcal{L} = - \\frac{1}{2} \\sum\\left[
\\left(\\frac{D_i - M_i(\\theta)}{\sigma}\\right)^2
+ \\log{(2\pi \sigma^2)} \\right]`
"""
sig = self.hyper_parameters.get_values('sigma')
err = self.error
N = np.size(self.data)
return -0.5*err/sig**2 - np.log(np.sqrt(2*np.pi)*sig)*N
@property
def logposterior(self):
"""
Class property: log of posterior prob (including likelihood
calculated by the model error and priors):
self.logprior + self.loglikelihood
"""
return self.logprior + self.loglikelihood
@property
def logprior(self):
"""
Class property: logprior calculated from the sum of all prior objects
"""
return 0 # FIXME should return the sum of the log priors
def update(self, params, values):
"""
Update a single parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
value : number or list of numbers
Values of those parameters which to update
"""
return super(State, self).update(params, values)
def update_hyper(self, params, values):
"""
Update any single hyper parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
value : number or list of numbers
Values of those parameters which to update
"""
self.hyper_parameters.update(params, values)
def update_sigma(self, sigma):
"""Updates the expected sigma of the noise distribution"""
self.update_hyper('sigma', sigma)
def push_update(self, params, values):
"""
Perform a parameter update and keep track of the change on the state.
Same call structure as :func:`peri.states.States.update`
"""
curr = self.get_values(params)
self.stack.append((params, curr))
self.update(params, values)
def pop_update(self):
"""
Pop the last update from the stack push by
:func:`peri.states.States.push_update` by undoing the chnage last
performed.
"""
params, values = self.stack.pop()
self.update(params, values)
@contextmanager
def temp_update(self, params, values):
"""
Context manager to temporarily perform a parameter update (by using the
stack structure). To use:
with state.temp_update(params, values):
# measure the cost or something
state.error
"""
self.push_update(params, values)
yield
self.pop_update()
def param_all(self):
return self.params
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs):
"""
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
"""
vals = self.get_values(p)
f0 = funct(**kwargs)
self.update(p, vals+dl)
f1 = funct(**kwargs)
if rts:
self.update(p, vals)
if nout == 1:
return (f1 - f0) / dl
else:
return [(f1[i] - f0[i]) / dl for i in range(nout)]
def _hess_two_param(self, funct, p0, p1, dl=2e-5, rts=False, **kwargs):
"""
Hessian of `func` wrt two parameters `p0` and `p1`. (see _graddoc)
"""
vals0 = self.get_values(p0)
vals1 = self.get_values(p1)
f00 = funct(**kwargs)
self.update(p0, vals0+dl)
f10 = funct(**kwargs)
self.update(p1, vals1+dl)
f11 = funct(**kwargs)
self.update(p0, vals0)
f01 = funct(**kwargs)
if rts:
self.update(p0, vals0)
self.update(p1, vals1)
return (f11 - f10 - f01 + f00) / (dl**2)
def _grad(self, funct, params=None, dl=2e-5, rts=False, nout=1, out=None,
**kwargs):
"""
Gradient of `func` wrt a set of parameters params. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire gradient to return and make an array
calc_shape = (
lambda ar: (len(ps),) + (ar.shape if isinstance(
ar, np.ndarray) else (1,)))
if out is not None:
grad = out # reference
elif nout == 1:
shape = calc_shape(f0)
grad = np.zeros(shape) # must be preallocated for mem reasons
else:
shape = [calc_shape(f0[i]) for i in range(nout)]
grad = [np.zeros(shp) for shp in shape]
for i, p in enumerate(ps):
if nout == 1:
grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
else:
stuff = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
for a in range(nout): grad[a][i] = stuff[a]
return grad # was np.squeeze(grad)
def _jtj(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
jTj of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
grad = self._grad(funct=funct, params=params, dl=dl, rts=rts, **kwargs)
return np.dot(grad, grad.T)
def _hess(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
Hessian of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire hessian, allocate an array
shape = f0.shape if isinstance(f0, np.ndarray) else (1,)
shape = (len(ps), len(ps)) + shape
hess = np.zeros(shape)
for i, pi in enumerate(ps):
for j, pj in enumerate(ps[i:]):
J = j + i
thess = self._hess_two_param(funct, pi, pj, dl=dl, rts=rts, **kwargs)
hess[i][J] = thess
hess[J][i] = thess
return | np.squeeze(hess) | numpy.squeeze |
import os, os.path, random
import json
import torch
from torch.nn import functional as F
from torchvision import transforms
import numpy as np
from PIL import Image
import cv2
import albumentations as A
def img_transform(img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def imresize(im, size, interp='bilinear'):
if interp == 'nearest':
resample = Image.NEAREST
elif interp == 'bilinear':
resample = Image.BILINEAR
elif interp == 'bicubic':
resample = Image.BICUBIC
else:
raise Exception('resample method undefined!')
return im.resize(size, resample)
def b_imresize(im, size, interp='bilinear'):
return F.interpolate(im, size, mode=interp)
# from HRnet
def multi_scale_aug(image, label=None):
# print('image_shape: ', image.shape)
# print('label_shape: ', label.shape)
rand_scale = 0.5 + random.randint(0, 16) / 10.0
long_size = np.int(2048 * rand_scale + 0.5)
w, h = image.shape[-2:]
if h > w:
new_h = long_size
new_w = np.int(w * long_size / h + 0.5)
else:
new_w = long_size
new_h = np.int(h * long_size / w + 0.5)
image = F.interpolate(image, (new_w, new_h), mode='bilinear')
if label is not None:
label = F.interpolate(label.unsqueeze(1).float(), (new_w, new_h), mode='nearest').squeeze(1).long()
else:
return image
return image, label
def patch_loader(X, Y, xi, yi, cfg, train_mode=False, select_scale=None):
X_batch = X.clone()
Y_batch = Y.clone()
if select_scale is not None:
select_scale_batch = select_scale.clone()
len_batch = len(select_scale_batch)
else:
len_batch = X.size(0)
# print('---------select_scale_batch-----------\n', select_scale_batch)
for b in range(len_batch):
X = X_batch[b].unsqueeze(0)
Y = Y_batch[b].unsqueeze(0)
if select_scale is not None:
select_scale = int(select_scale_batch[b])
# print('---------select_scale-----------\n', select_scale)
fov_map_scale = cfg.MODEL.fov_map_scale
ori_w, ori_h = X.shape[-2:]
if cfg.DATASET.multi_scale_aug and train_mode:
X, Y = multi_scale_aug(X, Y)
ori_cx_lr = xi*fov_map_scale
ori_cy_lr = yi*fov_map_scale*cfg.MODEL.patch_ap
scaled_cx_lr = ori_cx_lr * (X.shape[-2]/ori_w)
scaled_cy_lr = ori_cy_lr * (X.shape[-1]/ori_h)
xi, yi = scaled_cx_lr//fov_map_scale, scaled_cy_lr//(fov_map_scale*cfg.MODEL.patch_ap)
# X: b,c,w,h
xi_ori, yi_ori = xi, yi
if train_mode == False:
patch_bank = list((float(cfg.VAL.expand_prediection_rate_patch)*np.array(cfg.MODEL.patch_bank)).astype(int))
else:
patch_bank = cfg.MODEL.patch_bank
segm_downsampling_rate = cfg.DATASET.segm_downsampling_rate
fov_padding = cfg.MODEL.fov_padding
# single_gpu_size = str(torch.cuda.get_device_properties('cuda:0')).split('total_memory=')[1].split('MB')[0]
single_gpu_size = 10000
if int(single_gpu_size) < 1:
fov_padding_cpu = True
else:
fov_padding_cpu = False
if 'CITYSCAPES' in cfg.DATASET.root_dataset or 'CITYSCAPE' in cfg.DATASET.list_train:
ignore_label=20-1
elif 'Digest' in cfg.DATASET.root_dataset:
ignore_label=-2
else:
if cfg.DATASET.ignore_index != -2:
ignore_label=cfg.DATASET.ignore_index
else:
ignore_label=-2
X_patches = []
X_patches_cords = []
for s in range(len(patch_bank)):
if cfg.VAL.F_Xlr_only and s>0:
X_patches.append(X_patches[0])
continue
if select_scale != None and s != select_scale and s != 0:
continue
patch_size = patch_bank[s]
patch_size_x = patch_size
patch_size_y = patch_size*cfg.MODEL.patch_ap
# TODO: debug, current adjusting xi, yi approach only available when X.shape can devide by fov_map_scale
if cfg.DATASET.adjust_crop_range:
xi = int(xi_ori*(X.shape[-2]-patch_size_x)/X.shape[-2])
yi = int(yi_ori*(X.shape[-1]-patch_size_y)/X.shape[-1])
# correction on residual
if xi_ori >= round(ori_w/fov_map_scale)-1:
xi += 1
if yi_ori >= round(ori_h/(fov_map_scale*cfg.MODEL.patch_ap))-1:
yi += 1
# if X.shape[2] < patch_size_x or X.shape[3] < patch_size_y:
# raise Exception('Patch size {}x{} exceed image size {}'.format(patch_size_x, patch_size_y, X.shape))
if fov_padding:
if cfg.DATASET.adjust_crop_range:
p_h = max(patch_size_x-X.shape[2], 0)
p_w = max(patch_size_y-X.shape[3], 0)
# p = max(X_p_x, X_p_y)
p_y_h = max(patch_bank[0]-X.shape[2], 0)
p_y_w = max(patch_bank[0]*cfg.MODEL.patch_ap-X.shape[3], 0)
# p_y = max(Y_p_x, Y_p_y)
else:
p_w = patch_size_y-1
p_h = patch_size_x-1
p_y_w = patch_bank[0]*cfg.MODEL.patch_ap-1
p_y_h = patch_bank[0]-1
if cfg.DATASET.mirror_padding:
if fov_padding_cpu:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h), mode='reflect').cpu()
else:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h), mode='reflect')
if train_mode and segm_downsampling_rate != 1:
if fov_padding_cpu:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), mode='reflect').cpu()
else:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), mode='reflect')
else:
if fov_padding_cpu:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w,p_y_w,p_y_h,p_y_h), mode='reflect').cpu()
else:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w,p_y_w,p_y_h,p_y_h), mode='reflect')
Y_pad = Y_pad.squeeze(1).long()
else:
if fov_padding_cpu:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h)).cpu()
else:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h))
if train_mode and segm_downsampling_rate != 1:
if fov_padding_cpu:
Y_pad = F.pad(Y, (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), value=ignore_label).cpu()
else:
Y_pad = F.pad(Y, (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), value=ignore_label)
else:
if fov_padding_cpu:
Y_pad = F.pad(Y, (p_y_w,p_y_w,p_y_h,p_y_h), value=ignore_label).cpu()
else:
Y_pad = F.pad(Y, (p_y_w,p_y_w,p_y_h,p_y_h), value=ignore_label)
cx_lr = xi*fov_map_scale # upper left corner of current X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cx_lr > (X.shape[2]-patch_size_x):
cx_lr = X.shape[2]-patch_size_x
cx = cx_lr + patch_bank[0]//2 - patch_size_x//2 # upper left corner of current patch size with same center of X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cx < 0:
cx = 0
if fov_padding:
# cx_lr = cx_lr+p_y
cx_p = cx+p_h
if cfg.DATASET.multi_scale_aug:
cx_p_y = cx+p_y_h
else:
cx_p_y = cx_lr+p_y_h
if cfg.DATASET.multi_scale_aug and train_mode:
if cx_p < 0:
cx_p = 0
elif cx_p > (X.shape[2]-patch_size_x):
cx_p = X.shape[2]-patch_size_x
if cx_p_y < 0:
cx_p_y = 0
elif cx_p_y > (X.shape[2]-patch_size_x):
cx_p_y = X.shape[2]-patch_size_x
if cfg.DATASET.adjust_crop_range:
if cx_p_y < 0:
cx_p_y = 0
elif cx_p_y > (X.shape[2]-patch_size_x):
cx_p_y = X.shape[2]-patch_size_x
else:
if cx < 0:
cx = 0
elif cx > (X.shape[2]-patch_size_x):
cx = X.shape[2]-patch_size_x
cy_lr = yi*(fov_map_scale*cfg.MODEL.patch_ap) # upper left corner of current X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cy_lr > (X.shape[3]-patch_size_y):
cy_lr = X.shape[3]-patch_size_y
cy = cy_lr + (patch_bank[0]*cfg.MODEL.patch_ap)//2 - patch_size_y//2 # upper left corner of current patch size with same center of X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cy < 0:
cy = 0
if fov_padding:
# cy_lr = cy_lr+p_y
cy_p = cy+p_w
if cfg.DATASET.multi_scale_aug:
cy_p_y = cy+p_y_w
else:
cy_p_y = cy_lr+p_y_w
if cfg.DATASET.multi_scale_aug and train_mode:
if cy_p < 0:
cy_p = 0
elif cy_p > (X.shape[3]-patch_size_y):
cy_p = X.shape[3]-patch_size_y
if cy_p_y < 0:
cy_p_y = 0
elif cy_p_y > (X.shape[3]-patch_size_y):
cy_p_y = X.shape[3]-patch_size_y
if cfg.DATASET.adjust_crop_range:
if cy_p_y < 0:
cy_p_y = 0
elif cy_p_y > (X.shape[3]-patch_size_y):
cy_p_y = X.shape[3]-patch_size_y
else:
if cy < 0:
cy = 0
elif cy > (X.shape[3]-patch_size_y):
cy = X.shape[3]-patch_size_y
if fov_padding:
crop_patch = X_pad[:, :, cx_p:cx_p+patch_size_x, cy_p:cy_p+patch_size_y].to(X.device)
else:
crop_patch = X[:, :, cx:cx+patch_size_x, cy:cy+patch_size_y]
assert min(crop_patch.shape)!=0, "crop_patch size wrong ({}) cropped from X_pad ({}), X({}) at (cx_p={},cy_p={},xi={},yi={}, patch_size_x={}, patch_size_y={})".format(crop_patch.shape, X_pad.shape, X.shape, cx_p, cy_p, xi, yi, patch_size_x, patch_size_y)
if not (select_scale != None and select_scale != 0 and s == 0):
X_patches.append(b_imresize(crop_patch, (patch_bank[0],patch_bank[0]*cfg.MODEL.patch_ap), interp='bilinear'))
if not train_mode:
if not (select_scale != None and select_scale != 0 and s == 0):
X_patches_cords.append((cx, cy, patch_size, p_w, p_h)) # TODO: patch_size to be corrected in visualisation in eval/eval_multipro
if s == 0:
if segm_downsampling_rate != 1 and train_mode:
patch_size = patch_bank[0] // segm_downsampling_rate
cx_p_y = cx_p_y // segm_downsampling_rate
cy_p_y = cy_p_y // segm_downsampling_rate
else:
patch_size = patch_bank[0]
patch_size_x = patch_size
patch_size_y = patch_size*cfg.MODEL.patch_ap
if not train_mode:
Y_patch_cord = (cx_p_y, cy_p_y, patch_size, p_y_w, p_y_h)
# Y.shape b,w,h, NOTE Y has different size in train and val mode
if fov_padding:
Y_patch = Y_pad[:, cx_p_y:cx_p_y+patch_size_x, cy_p_y:cy_p_y+patch_size_y].to(Y.device)
else:
Y_patch = Y[:, cx_lr:cx_lr+patch_size_x, cy_lr:cy_lr+patch_size_y]
if b == 0:
X_patches_batch = X_patches
Y_patch_batch = Y_patch
else:
for p in range(len(X_patches_batch)):
X_patches_batch[p] = torch.cat([X_patches_batch[p], X_patches[p]])
Y_patch_batch = torch.cat([Y_patch_batch, Y_patch])
if train_mode:
return X_patches_batch, Y_patch_batch
else:
# print('Y_patch_cord: ', Y_patch_cord)
return X_patches_batch, Y_patch_cord, X_patches_cords, Y_patch_batch
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, odgt, opt, **kwargs):
# remapping labels reflecting servie degree of GS
if opt.root_dataset == '/scratch0/chenjin/GLEASON2019_DATA/Data/' or \
opt.root_dataset == '/home/chenjin/Chen_UCL/Histo-MRI-mapping/GLEASON2019_DATA/Data/' or \
opt.root_dataset == '/SAN/medic/Histo_MRI_GPU/chenjin/Data/GLEASON2019_DATA/Data/' or \
'GLEASON2019_DATA' in opt.root_dataset or 'Gleason' in opt.root_dataset:
# four class mapping
if opt.class_mapping == 0:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 3,
5: 4, 6: 1,
}
# three class mapping exclude class5
elif opt.class_mapping == 30:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 3,
5: 1, 6: 1,
}
# gs3 vs all
elif opt.class_mapping == 3:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 1,
5: 1, 6: 1,
}
# gs4 vs all
elif opt.class_mapping == 4:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 1, 4: 2,
5: 1, 6: 1,
}
# gs5 vs all
elif opt.class_mapping == 5:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 1, 4: 1,
5: 2, 6: 1,
}
# benine vs all
elif opt.class_mapping == 6:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 2,
5: 2, 6: 1,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.748, 0.611, 0.823],
std=[0.146, 0.245, 0.119])
elif opt.root_dataset == '/home/chenjin/Chen_UCL/Histo-MRI-mapping/DigestPath2019/' or 'Digest' in opt.list_train:
self.label_mapping = {0: 1,
255: 2,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.816, 0.697, 0.792],
std=[0.160, 0.277, 0.198])
elif 'ADE20K' in opt.root_dataset or 'ADE' in opt.list_train:
self.label_mapping = {}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif 'CITYSCAPES' in opt.root_dataset or 'CITYSCAPE' in opt.list_train:
# following HRNet-Semantic-Segmentation setting
# but starting from 1 instead of 0, seems 0 leads to bug in criterion.OhemCrossEntropy implementation
# debug note 24/12/19 seems label must start from 1 and must be continues, otherwise lead inconsistence between pred by view(-1) and seg_label
ignore_label=20
self.label_mapping = {-1: ignore_label, 0: ignore_label,
1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label,
5: ignore_label, 6: ignore_label,
7: 1, 8: 2, 9: ignore_label,
10: ignore_label, 11: 3, 12: 4,
13: 5, 14: ignore_label, 15: ignore_label,
16: ignore_label, 17: 6, 18: ignore_label,
19: 7, 20: 8, 21: 9, 22: 10, 23: 11, 24: 12,
25: 13, 26: 14, 27: 15, 28: 16,
29: ignore_label, 30: ignore_label,
31: 17, 32: 18, 33: 19}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif 'Histo' in opt.root_dataset or 'histomri' in opt.list_train:
self.label_mapping = {}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.8223, 0.7783, 0.7847],
std=[0.210, 0.216, 0.241])
elif 'DeepGlob' in opt.root_dataset or 'DeepGlob' in opt.list_train:
# ignore_label=7
if opt.ignore_index == 0:
self.label_mapping = {0: 2,
1: 3, 2: 4,
3: 5, 4: 6,
5: 7, 6: 1,
}
elif opt.ignore_index == 6:
self.label_mapping = {0: 1,
1: 2, 2: 3,
3: 4, 4: 5,
5: 6, 6: 7,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.282, 0.379, 0.408],
std=[0.089, 0.101, 0.127])
else:
raise Exception('Unknown root for mapping and normalisation!')
# parse options
self.imgSizes = opt.imgSizes
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# parse the input list
self.parse_input_list(odgt, **kwargs)
def convert_label(self, label, inverse=False):
label = np.array(label)
temp = label.copy()
if inverse:
for v, k in self.label_mapping.items():
label[temp == k] = v
else:
for k, v in self.label_mapping.items():
label[temp == k] = v
return label
def parse_input_list(self, odgt, max_sample=-1, start_idx=-1, end_idx=-1):
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def img_transform(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = self.normalize(torch.from_numpy(img.copy()))
return img
def img_transform_unnorm(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def img_transform_rev(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def segm_transform(self, segm):
# to tensor, -1 to 149
# !!!!! JC: This is why all data need to mapped to 1-numClass
# and because of this, ignore_index (in CrossEntropy/OhemCrossEntropy/IoU) = ignore_label (in dataset class_mapping)-1
segm = torch.from_numpy(np.array(segm)).long() - 1
return segm
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(self, x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, cal_REV=False, **kwargs):
super(TrainDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
self.if_shuffled = False
# augmentation
self.augmentation = opt.random_flip
self.balance_sam_idx = 0
self.num_class = opt.num_class
self.cal_REV = cal_REV
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if self.augmentation == 'balance_sample' and self.balance_sam_idx > 2:
# search gs-5 and reset idx every 3 steps represent
# severe rare gs-4 in contrast to balanced other 3 classes
search_rare_class = True
s_idx = self.cur_idx
while search_rare_class:
search_sample = self.list_sample[s_idx]
s_idx += 1
if s_idx >= self.num_sample:
s_idx = 0
segm_path = os.path.join(self.root_dataset, search_sample['fpath_segm'])
segm = self.convert_label(Image.open(segm_path))
hist, _ = np.histogram(segm, bins=self.num_class, range=(0, self.num_class-1))
if (hist[-1] / np.sum(hist)) > 0.25:
this_sample = search_sample
search_rare_class = False
self.balance_sam_idx = 0
self.balance_sam_idx += 1
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
np.random.seed(index)
np.random.shuffle(self.list_sample)
self.if_shuffled = True
# get sub-batch candidates
batch_records = self._get_sub_batch()
# resize all images' short edges to the chosen size
if isinstance(self.imgSizes, list) or isinstance(self.imgSizes, tuple):
this_short_size = np.random.choice(self.imgSizes)
else:
this_short_size = self.imgSizes
# calculate the BATCH's height and width
# since we concat more than one samples, the batch's h and w shall be larger than EACH sample
batch_widths = np.zeros(self.batch_per_gpu, np.int32)
batch_heights = np.zeros(self.batch_per_gpu, np.int32)
for i in range(self.batch_per_gpu):
img_height, img_width = batch_records[i]['height'], batch_records[i]['width']
if self.imgMaxSize == 1:
# discard 1st downsample in foveation model, i.e. creat foveation map on original image
this_scale = 1
else:
this_scale = min(
this_short_size / min(img_height, img_width), \
self.imgMaxSize / max(img_height, img_width))
batch_widths[i] = img_width * this_scale
batch_heights[i] = img_height * this_scale
# Here we must pad both input image and segmentation map to size h' and w' so that p | h' and p | w'
batch_width = np.max(batch_widths)
batch_height = np.max(batch_heights)
batch_width = int(self.round2nearest_multiple(batch_width, self.padding_constant))
batch_height = int(self.round2nearest_multiple(batch_height, self.padding_constant))
assert self.padding_constant >= self.segm_downsampling_rate, \
'padding constant must be equal or large than segm downsamping rate'
batch_images = torch.zeros(
self.batch_per_gpu, 3, batch_height, batch_width)
batch_segms = torch.zeros(
self.batch_per_gpu,
batch_height // self.segm_downsampling_rate,
batch_width // self.segm_downsampling_rate).long()
for i in range(self.batch_per_gpu):
this_record = batch_records[i]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
# skip non exitst Training
if not os.path.isfile(segm_path):
continue
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
# print(img.size)
# random_flip
if self.augmentation == 'Flip':
if np.random.choice([0, 1]):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
segm = segm.transpose(Image.FLIP_LEFT_RIGHT)
elif self.augmentation == 'balance_sample' and (i+1) % 4 == 0:
aug = A.Compose([
A.RandomCrop(self.imgSizes[0], self.imgSizes[1]),
A.Flip()
],p=1)
img = np.array(img)
segm = np.array(segm)
search_rare = True
while search_rare:
augmented = aug(image=img, mask=segm)
segm_s = self.convert_label(augmented['mask'])
hist, _ = np.histogram(segm_s, bins=self.num_class, range=(0, self.num_class-1))
if (hist[-1] / np.sum(hist)) > 0.25:
img = Image.fromarray(augmented['image'])
segm = Image.fromarray(augmented['mask'])
search_rare = False
elif self.augmentation == 'fullFoV_balance_sample' and (i+1) % 4 == 0:
img = imresize(img, (batch_widths[i], batch_heights[i]), interp='bilinear')
segm = imresize(segm, (batch_widths[i], batch_heights[i]), interp='nearest')
aug = A.Compose([
A.RandomCrop(batch_widths[i], batch_heights[i]),
A.Flip()
],p=1)
img = | np.array(img) | numpy.array |
import xarray as xr
import numpy as np
class SplitAndStandardize:
"""Class instantiation of SplitAndStandardize:
Here we will be preprocessing data for deep learning model training.
This module includes methods for training and testing data splits and standardization.
Attributes:
climate (str): The climate period to derive deep learning data for; ``current`` or ``future``.
variable (str): Variable to run script the for, which can include ``TK``, ``EV``, ``EU``, ``QVAPOR``,
``PRESS``, ``W_vert``, ``WMAX``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, or ``MASK``.
percent_split (float): Percentage of total data to assign as training data. The remaining data will be
assigned as testing data. For example, 0.6 is 60% training data, 40% testing data.
working_directory (str): The directory path to where the produced files will be saved and worked from.
threshold1 (int): The threshold for used for the chosen classification method (e.g., 75 UH25).
mask (boolean): Whether the threshold was applied within the storm patch mask or not. Defaults to ``False``.
unbalanced (boolean): Whether training data will be artificially balanced (``False``) or left unbalanced (``True``). Defaults to ``False``.
currenttrain_futuretest (boolean):
Raises:
Exceptions: Checks whether correct values were input for climate, variable, and percent_split.
"""
def __init__(self, climate, variable, percent_split, working_directory, threshold1, mask=False, unbalanced=False,
currenttrain_futuretest=False, kfold_total=5, kfold_indx=None, use_kfold=False):
# assigning class attributes
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# variable name checks and string automatic assignments
if variable!='TK' and variable!='EV' and variable!='EU' and variable!='QVAPOR' and variable!='PRESS' and variable!='W_vert' \
and variable!='WMAX' and variable!='DBZ' and variable!='CTT' and variable!='UH25' and variable!='UH03' and variable!='MASK':
raise Exception("Please enter TK, EV, EU, QVAPOR, PRESS, W_vert, UH25, UH03, MAXW, CTT, DBZ, or MASK as variable.")
else:
self.variable=variable
# temperature at 1, 3, 5, and 7 km
if self.variable=="TK":
self.choice_var1="temp_sev_1"
self.choice_var3="temp_sev_3"
self.choice_var5="temp_sev_5"
self.choice_var7="temp_sev_7"
self.attrs_array=np.array(["tk_1km", "tk_3km", "tk_5km", "tk_7km"])
self.single=False
# v-wind at 1, 3, 5, and 7 km
if self.variable=="EV":
self.choice_var1="evwd_sev_1"
self.choice_var3="evwd_sev_3"
self.choice_var5="evwd_sev_5"
self.choice_var7="evwd_sev_7"
self.attrs_array=np.array(["ev_1km", "ev_3km", "ev_5km", "ev_7km"])
self.single=False
# u-wind at 1, 3, 5, and 7 km
if self.variable=="EU":
self.choice_var1="euwd_sev_1"
self.choice_var3="euwd_sev_3"
self.choice_var5="euwd_sev_5"
self.choice_var7="euwd_sev_7"
self.attrs_array=np.array(["eu_1km", "eu_3km", "eu_5km", "eu_7km"])
self.single=False
# water vapor at 1, 3, 5, and 7 km
if self.variable=="QVAPOR":
self.choice_var1="qvap_sev_1"
self.choice_var3="qvap_sev_3"
self.choice_var5="qvap_sev_5"
self.choice_var7="qvap_sev_7"
self.attrs_array=np.array(["qv_1km", "qv_3km", "qv_5km", "qv_7km"])
self.single=False
# pressure at 1, 3, 5, and 7 km
if self.variable=="PRESS":
self.choice_var1="pres_sev_1"
self.choice_var3="pres_sev_3"
self.choice_var5="pres_sev_5"
self.choice_var7="pres_sev_7"
self.attrs_array=np.array(["pr_1km", "pr_3km", "pr_5km", "pr_7km"])
self.single=False
# w-wind at 1, 3, 5, and 7 km
if self.variable=="W_vert":
self.choice_var1="wwnd_sev_1"
self.choice_var3="wwnd_sev_3"
self.choice_var5="wwnd_sev_5"
self.choice_var7="wwnd_sev_7"
self.attrs_array=np.array(["ww_1km", "ww_3km", "ww_5km", "ww_7km"])
self.single=False
# max-w
if self.variable=="WMAX":
self.choice_var1="maxw_sev_1"
self.attrs_array=np.array(["maxw"])
self.single=True
# dbz
if self.variable=="DBZ":
self.choice_var1="dbzs_sev_1"
self.attrs_array=np.array(["dbzs"])
self.single=True
# cloud top temperature
if self.variable=="CTT":
self.choice_var1="ctts_sev_1"
self.attrs_array=np.array(["ctts"])
self.single=True
# 2-5 km updraft helicity
if self.variable=="UH25":
self.choice_var1="uh25_sev_1"
self.attrs_array=np.array(["uh25"])
self.single=True
# 0-3 km updraft helicity
if self.variable=="UH03":
self.choice_var1="uh03_sev_1"
self.attrs_array=np.array(["uh03"])
self.single=True
# storm masks
if self.variable=="MASK":
self.choice_var1="mask_sev_1"
self.attrs_array=np.array(["mask"])
self.single=True
# percent splitting for train and test sets
if percent_split>=1:
raise Exception("Percent split should be a float less than 1.")
if percent_split<1:
self.percent_split=percent_split
# assign class attributes
self.working_directory=working_directory
self.threshold1=threshold1
self.unbalanced=unbalanced
self.mask=mask
# mask option string naming for files
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# boolean for training with current, testing with future, standardization
self.currenttrain_futuretest=currenttrain_futuretest
if self.currenttrain_futuretest:
if self.climate == 'current':
raise Exception("Set currenttrain_futuretest to False!")
# for k-fold cross validation
self.use_kfold=use_kfold
if self.use_kfold:
self.kfold_total=kfold_total
self.kfold_indx=kfold_indx
def variable_translate(self):
"""Variable name for the respective filenames.
Returns:
variable (str): The variable string used to save files.
Raises:
ValueError: Input variable must be from available list.
"""
var={
'EU':'EU',
'EV':'EV',
'TK':'TK',
'QVAPOR':'QVAPOR',
'WMAX':'MAXW',
'W_vert':'W',
'PRESS':'P',
'DBZ':'DBZ',
'CTT':'CTT',
'UH25':'UH25',
'UH03':'UH03',
'MASK':'MASK'
}
try:
out=var[self.variable]
return out
except:
raise ValueError("Please enter ``TK``, ``EU``, ``EV``, ``QVAPOR``, ``PRESS``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, ``W_vert``, ``WMAX``, or ``MASK`` as variable.")
def open_above_threshold(self):
"""Open and concat files for the six months of analysis (threshold exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def open_below_threshold(self):
"""Open and concat files for six months of analysis (threshold non-exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def grab_variables(self, data):
"""Eagerly load variable data. This function converts dask arrays into numpy arrays.
Args:
data (Xarray dataset): The original Xarray dataset containing dask arrays.
Returns:
data_1, data_2, data_3, data_4 or data_1 (numpy array(s)): Input data as numpy arrays.
"""
# if variable file contains 4 heights
if not self.single:
data_1=data[self.choice_var1].values
data_2=data[self.choice_var3].values
data_3=data[self.choice_var5].values
data_4=data[self.choice_var7].values
return data_1, data_2, data_3, data_4
# if variable file is single height
if self.single:
data_1=data[self.choice_var1].values
return data_1
def create_traintest_data(self, data_b, data_a, return_label=False):
"""This function performs balancing of above and below threshold data for training and testing data. Data is permuted
before being assigned to training and testing groups.
The training group sample size is computed using the assigned percentage (``self.percent_split``) from the above threshold population.
Then, the testing group sample size is computed using the leftover percentage (e.g., 1-``self.percent_split``) from a population
with a similar ratio of above and below threshold storm patches (e.g., ~5% above threshold to 95% below threshold). This is done
artificially balance the ratio of threshold exceeding storms to that of non-exceeding storms, to ensure that the training data set
contains sufficient examples of above threshold storm patches, given that they are rare events. The testing data set is left with
a population of storms that resembles the original data's population.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[int(data_a.shape[0]*self.percent_split):]
test_above=data_a[select_data]
# generate index for test below (stratified sampling)
indx_below=int((((data_a.shape[0]*(1-self.percent_split))*data_b.shape[0])/data_a.shape[0])+(data_a.shape[0]*(1-self.percent_split)))
# test below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[int(data_a.shape[0] * self.percent_split):indx_below]
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def create_traintest_unbalanced(self, data_b, data_a, return_label=False):
"""This function performs creates and permutes training and testing data.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_b.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[int(data_a.shape[0]*self.percent_split):]
test_above=data_a[select_data]
# test below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[int(data_b.shape[0]*self.percent_split):]
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def create_traintest_unbalanced_kfold(self, data_b, data_a, return_label=False):
"""This function performs creates and permutes training and testing data for k-fold cross validation.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# helper functions for indices of k-fold cross validation
kgroups = np.arange(0, self.kfold_total, 1)
kgroups_leftover = np.delete(kgroups, self.kfold_indx)
# train above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(
data_a.shape[0]), self.kfold_total))[[kgroups_leftover[0],kgroups_leftover[1],kgroups_leftover[2],kgroups_leftover[3]]])
train_above=data_a[select_data]
# train below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(
data_b.shape[0]), self.kfold_total))[[kgroups_leftover[0],kgroups_leftover[1],kgroups_leftover[2],kgroups_leftover[3]]])
train_below=data_b[select_data]
# test above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(data_a.shape[0]), self.kfold_total))[[self.kfold_indx]])
test_above=data_a[select_data]
# test below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(data_b.shape[0]), self.kfold_total))[[self.kfold_indx]])
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def standardize_scale_apply(self, data):
"""Z-score standardization of the training data.
Args:
data (numpy array): Input data to standardize.
Returns:
data (numpy array): Input data standardized.
"""
if self.variable=="UH03":
data[np.isinf(data)]=0.0
if not self.currenttrain_futuretest:
return np.divide((data - np.nanmean(data)), np.nanstd(data))
if self.currenttrain_futuretest:
if self.unbalanced:
if not self.use_kfold:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist_unbalanced.nc")
if self.use_kfold:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist_unbalanced_k{self.kfold_indx}.nc")
if not self.unbalanced:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist.nc")
return np.divide((data - temp_stat['train_mean'][self.tmpindex].values), temp_stat['train_std'][self.tmpindex].values)
def standardize_scale_apply_test(self, train, test):
"""Z-score standardization of the test data using the training data mean and standard deviation.
Args:
train (numpy array): Input training data for Z-score distribution values (mean and standard deviation).
test (numpy array): Input testing data to standardize.
Returns:
test (numpy array): Input testing data standardized.
"""
if self.variable=="UH03":
train[np.isinf(train)]=0.0
test[np.isinf(test)]=0.0
if not self.currenttrain_futuretest:
return np.divide((test - np.nanmean(train)), np.nanstd(train))
if self.currenttrain_futuretest:
if self.unbalanced:
if not self.use_kfold:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist_unbalanced.nc")
if self.use_kfold:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist_unbalanced_k{self.kfold_indx}.nc")
if not self.unbalanced:
temp_stat = xr.open_dataset(
f"/glade/scratch/molina/DL_proj/current_conus_fields/dl_preprocess/current_{self.variable_translate().lower()}_{self.mask_str}_dldata_traindist.nc")
return np.divide((test - temp_stat['train_mean'][self.tmpindex].values), temp_stat['train_std'][self.tmpindex].values)
def split_data_to_traintest(self, below1=None, below2=None, below3=None, below4=None, above1=None, above2=None, above3=None, above4=None):
"""Function that applies ``create_traintest_data()`` to various variables.
Args:
below1 (numpy array): Storm patch data that does not exceed the threshold. If there are multiple ``below`` arrays, they arranged
from low-to-high heights above ground level. Defaults to ``None``.
below2 (numpy array): Storm patch data that does not exceed the threshold. Defaults to ``None``.
below3 (numpy array): Storm patch data that does not exceed the threshold. Defaults to ``None``.
below4 (numpy array): Storm patch data that does not exceed the threshold. Defaults to ``None``.
above1 (numpy array): Storm patch data that exceeds the threshold. If there are multiple ``above`` arrays, they arranged from
low-to-high heights above ground level. Defaults to ``None``.
above2 (numpy array): Storm patch data that exceeds the threshold. Defaults to ``None``.
above3 (numpy array): Storm patch data that exceeds the threshold. Defaults to ``None``.
above4 (numpy array): Storm patch data that exceeds the threshold. Defaults to ``None``.
Returns:
train1, train2, train3, train4, train_label, test1, test2, test3, test4, test_label or train1, test1, train_label, test_label (numpy arrays):
The input data variables assembled into training and testing datasets. The corresponding labels are also output. Number of output arrays
depends on the variable type (e.g., if interpolated across various heights or if a single variable).
"""
if not self.single:
if not self.unbalanced:
train1, test1, train_label, test_label=self.create_traintest_data(below1, above1, return_label=True)
train2, test2=self.create_traintest_data(below2, above2, return_label=False)
train3, test3=self.create_traintest_data(below3, above3, return_label=False)
train4, test4=self.create_traintest_data(below4, above4, return_label=False)
return train1, train2, train3, train4, train_label, test1, test2, test3, test4, test_label
if self.unbalanced:
if not self.use_kfold:
train1, test1, train_label, test_label=self.create_traintest_unbalanced(below1, above1, return_label=True)
train2, test2=self.create_traintest_unbalanced(below2, above2, return_label=False)
train3, test3=self.create_traintest_unbalanced(below3, above3, return_label=False)
train4, test4=self.create_traintest_unbalanced(below4, above4, return_label=False)
if self.use_kfold:
train1, test1, train_label, test_label=self.create_traintest_unbalanced_kfold(below1, above1, return_label=True)
train2, test2=self.create_traintest_unbalanced_kfold(below2, above2, return_label=False)
train3, test3=self.create_traintest_unbalanced_kfold(below3, above3, return_label=False)
train4, test4=self.create_traintest_unbalanced_kfold(below4, above4, return_label=False)
return train1, train2, train3, train4, train_label, test1, test2, test3, test4, test_label
if self.single:
if not self.unbalanced:
train1, test1, train_label, test_label=self.create_traintest_data(below1, above1, return_label=True)
return train1, test1, train_label, test_label
if self.unbalanced:
if not self.use_kfold:
train1, test1, train_label, test_label=self.create_traintest_unbalanced(below1, above1, return_label=True)
if self.use_kfold:
train1, test1, train_label, test_label=self.create_traintest_unbalanced_kfold(below1, above1, return_label=True)
return train1, test1, train_label, test_label
def standardize_training(self, func, data1, data2=None, data3=None, data4=None):
"""Function to standardize the training data.
Args:
func (class method): The choice of standardization.
data1 (numpy array): Data to be standardized.
data2 (numpy array): Data to be standardized. Defaults to ``None``.
data3 (numpy array): Data to be standardized. Defaults to ``None``.
data4 (numpy array): Data to be standardized. Defaults to ``None``.
Returns:
data_scaled1, data_scaled2, data_scaled3, data_scaled4 or data_scaled1 (numpy array(s)): The training data standardized.
"""
self.tmpindex=0
data_scaled1=func(data1)
self.tmpindex=[]
if not self.single:
self.tmpindex=1
data_scaled2=func(data2)
self.tmpindex=[]
self.tmpindex=2
data_scaled3=func(data3)
self.tmpindex=[]
self.tmpindex=3
data_scaled4=func(data4)
self.tmpindex=[]
return data_scaled1, data_scaled2, data_scaled3, data_scaled4
if self.single:
return data_scaled1
def standardize_testing(self, func, train1=None, train2=None, train3=None, train4=None,
test1=None, test2=None, test3=None, test4=None):
"""Function to standardize the testing data.
Args:
func (class method): The choice of standardization.
train1 (numpy array): Training data for standardization of testing data.
train2 (numpy array): Training data for standardization of testing data. Defaults to ``None``.
train3 (numpy array): Training data for standardization of testing data. Defaults to ``None``.
train4 (numpy array): Training data for standardization of testing data. Defaults to ``None``.
test1 (numpy array): Testing data for standardization.
test2 (numpy array): Testing data for standardization. Defaults to ``None``.
test3 (numpy array): Testing data for standardization. Defaults to ``None``.
test4 (numpy array): Testing data for standardization. Defaults to ``None``.
Returns:
data1, data2, data3, data4 or data1 (numpy array(s)): The testing data standardized.
"""
self.tmpindex=0
data1=func(train1, test1)
self.tmpindex=[]
if not self.single:
self.tmpindex=1
data2=func(train2, test2)
self.tmpindex=[]
self.tmpindex=2
data3=func(train3, test3)
self.tmpindex=[]
self.tmpindex=3
data4=func(train4, test4)
self.tmpindex=[]
return data1, data2, data3, data4
if self.single:
return data1
def stack_the_data(self, data1, data2, data3, data4):
"""Stack the numpy arrays before assembling final xarray netcdf file for saving.
Args:
data1 (numpy array): Data to be stacked. Arrange from lowest (``data1``) to highest (``data4``) vertical heights.
data2 (numpy array): Data to be stacked.
data3 (numpy array): Data to be stacked.
data4 (numpy array): Data to be stacked.
Returns:
totaldata (numpy array): Stacked data variables.
"""
if not self.single:
totaldata=np.stack([data1, data2, data3, data4])
return totaldata
def return_train_mean_and_std(self, traindata1, traindata2=None, traindata3=None, traindata4=None):
"""Extract mean and std data to record statistical distributions prior to standardization.
This data will be used during deep learning model interpretation.
Args:
traindata1 (numpy array): Input training data for mean and standard deviation. Arrange from lowest (``traindata1``)
to highest (``traindata4``) vertical heights.
traindata2 (numpy array): Input training data for mean and standard deviation.
traindata3 (numpy array): Input training data for mean and standard deviation.
traindata4 (numpy array): Input training data for mean and standard deviation.
Returns:
Input training data's mean and standard deviation values (float).
"""
if not self.currenttrain_futuretest:
if not self.single:
return np.array([ | np.nanmean(traindata1) | numpy.nanmean |
import math
import rospy
import numpy as np
from tf import transformations as tra
from geometry_graph_msgs.msg import Node, geometry_msgs
from tub_feasibility_check import srv as kin_check_srv
from tub_feasibility_check.msg import BoundingBoxWithPose, AllowedCollision
from tub_feasibility_check.srv import CheckKinematicsResponse
from shape_msgs.msg import SolidPrimitive
import GraspFrameRecipes
import planner_utils as pu
class AlternativeBehavior:
# TODO this class should be adapted if return value of the feasibility check changes (e.g. switch conditions)
def __init__(self, feasibility_check_result, init_conf):
self.number_of_joints = len(feasibility_check_result.final_configuration)
self.trajectory_steps = []
for i in range(0, len(feasibility_check_result.trajectory), self.number_of_joints):
self.trajectory_steps.append(feasibility_check_result.trajectory[i:i+self.number_of_joints])
if np.allclose(init_conf, self.trajectory_steps[0]):
rospy.logwarn("Initial configuration {0} is first point in trajectory".format(init_conf))
# :1 = skip the initial position TODO remove if relative is used!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.trajectory_steps = self.trajectory_steps[1:]
def assert_that_initial_config_not_included(self, init_conf):
if np.allclose(init_conf, self.trajectory_steps[0]):
raise ValueError("Initial configuration {0} is first point in trajectory".format(init_conf))
def get_trajectory(self):
print("get_trajectory LEN:", len(self.trajectory_steps))
return np.transpose(np.array(self.trajectory_steps))
class FeasibilityQueryParameters:
def __init__(self, checked_motions, goals, allowed_collisions, goal_manifold_frames, goal_manifold_orientations):
# TODO change multiple dictionaries to one Motion class?
# This list includes the checked motions in order (They have to be sequential!)
self.checked_motions = checked_motions
# The goal poses of the respective motions in op-space (index has to match index of checked_motions)
self.goals = goals
# The collisions that are allowed in message format per motion
self.allowed_collisions = allowed_collisions
# TODO docu
self.goal_manifold_frames = goal_manifold_frames
# TODO docu
self.goal_manifold_orientations = goal_manifold_orientations
def get_matching_ifco_wall(ifco_in_base_transform, ec_frame):
# transforms points in base frame to ifco frame
base_in_ifco_transform = tra.inverse_matrix(ifco_in_base_transform)
# ec x axis in ifco frame
ec_x_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 0]
ec_z_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 2]
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.1
# one could also check for dot-product = 0 instead of using the x-axis but this is prone to the same errors
if ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_x_axis.dot(np.array([0, 1, 0])) > space_thresh:
# print("GET MATCHING=SOUTH", tf_dbg_call_to_string(ec_frame, frame_name='ifco_south'))
return 'south'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_x_axis.dot(np.array([0, 1, 0])) < -space_thresh:
# print("GET MATCHING=NORTH", tf_dbg_call_to_string(ec_frame, frame_name='ifco_north'))
return 'north'
elif ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh and ec_x_axis.dot(np.array([1, 0, 0])) > space_thresh:
# print("GET MATCHING=WEST", tf_dbg_call_to_string(ec_frame, frame_name='ifco_west'))
return 'west'
elif ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh and ec_x_axis.dot(np.array([1, 0, 0])) < -space_thresh:
# print("GET MATCHING=EAST", tf_dbg_call_to_string(ec_frame, frame_name='ifco_east'))
return 'east'
else:
# This should never be reached. Just here to prevent bugs
raise ValueError("ERROR: Could not identify matching ifco wall. Check frames!")
def get_matching_ifco_corner(ifco_in_base_transform, ec_frame):
# transforms points in base frame to ifco frame
base_in_ifco_transform = tra.inverse_matrix(ifco_in_base_transform)
# ec (corner) z-axis in ifco frame
ec_z_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 2]
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.0 # 0.1
if ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh:
print("GET MATCHING=SOUTH_EAST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_southeast'))
return 'south', 'east'
elif ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh:
print("GET MATCHING=SOUTH_WEST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_southwest'))
return 'south', 'west'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh:
print("GET MATCHING=NORTH_WEST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_northwest'))
return 'north', 'west'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh:
print("GET MATCHING=NORTH_EAST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_northeast'))
return 'north', 'east'
else:
# This should never be reached. Just here to prevent bugs
raise ValueError("ERROR: Could not identify matching ifco wall. Check frames!")
# Checks if the Y-Axis of the ifco frame points towards the robot (origin of base frame)
# The base frame is assumed to be the following way:
# x points to the robots front
# y points to the robots left (if you are behind the robot)
# z points upwards
def ifco_transform_needs_to_be_flipped(ifco_in_base_transform):
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.05
x_of_yaxis = ifco_in_base_transform[0, 1]
x_of_translation = ifco_in_base_transform[0, 3]
print(ifco_in_base_transform)
print(space_thresh, x_of_yaxis, x_of_translation)
if x_of_translation > space_thresh:
# ifco is in front of robot
return x_of_yaxis > 0
elif x_of_translation < space_thresh:
# ifco is behind the robot
return x_of_yaxis < 0
else:
y_of_translation = ifco_in_base_transform[1, 3]
y_of_yaxis = ifco_in_base_transform[1, 1]
if y_of_translation < 0:
# ifco is to the right of the robot
return y_of_yaxis < 0
else:
# ifco is to the left of the robot
return y_of_yaxis > 0
# This function will call TUB's feasibility checker to check a motion.
# If the motion is not feasible it will try to generate an alternative joint trajectory and place it into
# the given stored_trajectories argument (dictionary).
def check_kinematic_feasibility(current_object_idx, objects, object_params, current_ec_index, strategy, all_ec_frames,
ifco_in_base_transform, handarm_params, stored_trajectories):
if handarm_params is None:
raise ValueError("HandArmParameters can't be None, check callstack!")
print("IFCO_BEFORE", pu.tf_dbg_call_to_string(ifco_in_base_transform, frame_name='ifco_before'))
if ifco_transform_needs_to_be_flipped(ifco_in_base_transform):
# flip the ifco transform such that it fulfills the requirements of the feasibilty checker
# (y-axis of ifco points towards the robot)
rospy.loginfo("Flip ifco transform for tub feasibilty checker")
zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
ifco_in_base_transform = ifco_in_base_transform.dot(zflip_transform)
print("IFCO_AFTER", pu.tf_dbg_call_to_string(ifco_in_base_transform, frame_name='ifco_after'))
object = objects[current_object_idx]
ec_frame = all_ec_frames[current_ec_index]
if object['type'] in handarm_params[strategy]:
params = handarm_params[strategy][object['type']]
else:
params = handarm_params[strategy]['object']
# The initial joint configuration (goToView config)
# curr_start_config = rospy.get_param('planner_gui/robot_view_position') # TODO use current joint state instead?
# TODO also check gotToView -> params['initial_goal'] (requires forward kinematics, or change to op-space)
curr_start_config = params['initial_goal']
if strategy == 'SurfaceGrasp':
call_params = prepare_surface_grasp_parameter(objects, current_object_idx, object_params, params)
elif strategy == "WallGrasp":
selected_wall_name = get_matching_ifco_wall(ifco_in_base_transform, ec_frame)
print("FOUND_EC: ", selected_wall_name)
blocked_ecs = ['north', 'east', 'west'] # TODO move to config file?
if selected_wall_name in blocked_ecs:
rospy.loginfo("Skipped wall " + selected_wall_name + " (Blacklisted)")
return 0
call_params = prepare_wall_grasp_parameter(ec_frame, selected_wall_name, objects, current_object_idx,
object_params, ifco_in_base_transform, params)
elif strategy == "CornerGrasp":
selected_wall_names = get_matching_ifco_corner(ifco_in_base_transform, ec_frame)
print("FOUND_EC: ", selected_wall_names)
blocked_ecs = [('north', 'east'), ('north', 'west'), ('south', 'west')] # TODO move to config file?
if selected_wall_names in blocked_ecs:
rospy.loginfo("Skipped corner " + selected_wall_names[0] + selected_wall_names[1] + " (Blacklisted)")
return 0
call_params = prepare_corner_grasp_parameter(ec_frame, selected_wall_names, objects, current_object_idx,
object_params, ifco_in_base_transform, params)
else:
raise ValueError("Kinematics checks are currently only supported for surface, wall and corner grasps, "
"but strategy was " + strategy)
# initialize stored trajectories for the given object
stored_trajectories[(current_object_idx, current_ec_index)] = {}
# The pose of the ifco (in base frame) in message format
ifco_pose = pu.transform_to_pose_msg(ifco_in_base_transform)
print("IFCO_POSE", ifco_pose)
# The bounding boxes of all objects in message format
bounding_boxes = []
for obj in objects:
obj_pose = pu.transform_to_pose_msg(obj['frame'])
obj_bbox = SolidPrimitive(type=SolidPrimitive.BOX,
dimensions=[obj['bounding_box'].x, obj['bounding_box'].y, obj['bounding_box'].z])
bounding_boxes.append(BoundingBoxWithPose(box=obj_bbox, pose=obj_pose))
print("BOUNDING_BOXES", bounding_boxes)
all_steps_okay = True
# perform the actual checks
for motion, curr_goal in zip(call_params.checked_motions, call_params.goals):
manifold_name = motion + '_manifold'
goal_pose = pu.transform_to_pose_msg(curr_goal)
print("GOAL_POSE", goal_pose)
print("INIT_CONF", curr_start_config)
goal_manifold_frame = pu.transform_to_pose_msg(call_params.goal_manifold_frames[motion])
goal_manifold_orientation = geometry_msgs.msg.Quaternion(x=call_params.goal_manifold_orientations[motion][0],
y=call_params.goal_manifold_orientations[motion][1],
z=call_params.goal_manifold_orientations[motion][2],
w=call_params.goal_manifold_orientations[motion][3])
check_feasibility = rospy.ServiceProxy('/check_kinematics', kin_check_srv.CheckKinematics)
print("allowed", call_params.allowed_collisions[motion])
print("Call check kinematics for " + motion + " (" + strategy + ")\nGoal:\n" + str(curr_goal))
res = check_feasibility(initial_configuration=curr_start_config,
goal_pose=goal_pose,
ifco_pose=ifco_pose,
bounding_boxes_with_poses=bounding_boxes,
goal_manifold_frame=goal_manifold_frame,
min_position_deltas=params[manifold_name]['min_position_deltas'],
max_position_deltas=params[manifold_name]['max_position_deltas'],
goal_manifold_orientation=goal_manifold_orientation,
min_orientation_deltas=params[manifold_name]['min_orientation_deltas'],
max_orientation_deltas=params[manifold_name]['max_orientation_deltas'],
allowed_collisions=call_params.allowed_collisions[motion]
)
print("check feasibility result was: " + str(res.status))
if res.status == CheckKinematicsResponse.FAILED:
# trajectory is not feasible and no alternative was found, directly return 0
return 0
elif res.status == CheckKinematicsResponse.REACHED_SAMPLED:
# original trajectory is not feasible, but alternative was found => save it
stored_trajectories[(current_object_idx, current_ec_index)][motion] = AlternativeBehavior(res, curr_start_config)
curr_start_config = res.final_configuration
all_steps_okay = False
print("FOUND ALTERNATIVE. New Start: ", curr_start_config)
elif res.status == CheckKinematicsResponse.REACHED_INITIAL:
# original trajectory is feasible, we save the alternative in case a later motion is not possible.
stored_trajectories[(current_object_idx, current_ec_index)][motion] = AlternativeBehavior(res, curr_start_config)
curr_start_config = res.final_configuration
print("USE NORMAL. Start: ", curr_start_config)
else:
raise ValueError(
"check_kinematics: No handler for result status of {} implemented".format(res.status))
if all_steps_okay:
# if all steps are okay use original trajectory TODO only replace preceding steps!
stored_trajectories[(current_object_idx, current_ec_index)] = {}
pass
# Either the initial trajectory was possible or an alternative behavior was generated
return 1.0
def prepare_surface_grasp_parameter(objects, current_object_idx, object_params, params):
# use kinematic checks
# TODO create proxy; make it a persistent connection?
# Code duplication from planner.py TODO put at a shared location
# Set the initial pose above the object
goal_ = np.copy(object_params['frame']) # TODO: this should be support_surface_frame
goal_[:3, 3] = tra.translation_from_matrix(object_params['frame'])
goal_ = goal_.dot(params['hand_transform'])
# the grasp frame is symmetrical - check which side is nicer to reach
# this is a hacky first version for our WAM
zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
if goal_[0][0] < 0:
goal_ = goal_.dot(zflip_transform)
# hand pose above object
pre_grasp_pose = goal_.dot(params['pre_approach_transform'])
# down_dist = params['down_dist'] # dist lower than ifco bottom: behavior of the high level planner
# dist = z difference to object centroid (both transformations are w.r.t. to world frame
# (more realistic behavior since we have to touch the object for a successful grasp)
down_dist = pre_grasp_pose[2, 3] - object_params['frame'][2, 3] # get z-translation difference
# goal pose for go down movement
go_down_pose = tra.translation_matrix([0, 0, -down_dist]).dot(pre_grasp_pose)
post_grasp_pose = params['post_grasp_transform'].dot(
go_down_pose) # TODO it would be better to allow relative motion as goal frames
checked_motions = ["pre_approach",
"go_down"] # , "post_grasp_rot"] ,go_up, go_drop_off # TODO what about remaining motions? (see wallgrasp)
goals = [pre_grasp_pose, go_down_pose] # , post_grasp_pose]
# TODO what about using the bounding boxes as for automatic goal manifold calculation?
# Take orientation of object but translation of pre grasp pose
pre_grasp_pos_manifold = np.copy(object_params['frame'])
pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_grasp_pose)
goal_manifold_frames = {
'pre_approach': pre_grasp_pos_manifold,
# Use object frame for resampling
'go_down': np.copy(object_params['frame']) # TODO change that again to go_down_pose!?
}
goal_manifold_orientations = {
# use hand orientation
'pre_approach': tra.quaternion_from_matrix(pre_grasp_pose),
# Use object orientation
'go_down': tra.quaternion_from_matrix(go_down_pose),
# tra.quaternion_from_matrix(object_params['frame']) # TODO use hand orietation instead?
}
# The collisions that are allowed per motion in message format
allowed_collisions = {
# no collisions are allowed during going to pre_grasp pose
'pre_approach': [],
'go_down': [AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=current_object_idx,
terminating=True, required=True),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name='bottom', terminating=False)] +
[AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=obj_idx, terminating=False)
for obj_idx, o in enumerate(objects) if obj_idx != current_object_idx and
params['go_down_allow_touching_other_objects']
],
# TODO also account for the additional object in a way?
'post_grasp_rot': [AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=current_object_idx,
terminating=True),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name='bottom', terminating=False)]
}
print("ALLOWED COLLISIONS:", allowed_collisions)
return FeasibilityQueryParameters(checked_motions, goals, allowed_collisions, goal_manifold_frames,
goal_manifold_orientations)
def prepare_wall_grasp_parameter(ec_frame, selected_wall_name, objects, current_object_idx, object_params,
ifco_in_base_transform, params):
# hand pose above and behind the object
pre_approach_transform = params['pre_approach_transform']
wall_frame = np.copy(ec_frame)
wall_frame[:3, 3] = tra.translation_from_matrix(object_params['frame'])
# apply hand transformation
ec_hand_frame = wall_frame.dot(params['hand_transform'])
# ec_hand_frame = (ec_frame.dot(params['hand_transform']))
pre_approach_pose = ec_hand_frame.dot(pre_approach_transform)
# down_dist = params['down_dist'] # dist lower than ifco bottom: behavior of the high level planner
# dist = z difference to ifco bottom minus hand frame offset (dist from hand frame to collision point)
# (more realistic behavior since we have a force threshold when going down to the bottom)
bounded_down_dist = pre_approach_pose[2, 3] - ifco_in_base_transform[2, 3]
hand_frame_to_bottom_offset = 0.07 # 7cm TODO maybe move to handarm_parameters.py
bounded_down_dist = min(params['down_dist'], bounded_down_dist - hand_frame_to_bottom_offset)
# goal pose for go down movement
go_down_pose = tra.translation_matrix([0, 0, -bounded_down_dist]).dot(pre_approach_pose)
# pose after lifting. This is somewhat fake, since the real go_down_pose will be determined by
# the FT-Switch during go_down and the actual lifted distance by the TimeSwitch (or a pose switch in case
# the robot allows precise small movements) TODO better solution?
fake_lift_up_dist = np.min([params['lift_dist'], 0.01]) # 1cm
corrective_lift_pose = tra.translation_matrix([0, 0, fake_lift_up_dist]).dot(go_down_pose)
dir_wall = tra.translation_matrix([0, 0, -params['sliding_dist']])
# TODO sliding_distance should be computed from wall and hand frame.
# slide direction is given by the normal of the wall
wall_frame = np.copy(ec_frame)
dir_wall[:3, 3] = wall_frame[:3, :3].dot(dir_wall[:3, 3])
# normal goal pose behind the wall
slide_to_wall_pose = dir_wall.dot(corrective_lift_pose)
# now project it into the wall plane!
z_projection = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
to_wall_plane_transform = wall_frame.dot(z_projection.dot(tra.inverse_matrix(wall_frame).dot(slide_to_wall_pose)))
slide_to_wall_pose[:3, 3] = tra.translation_from_matrix(to_wall_plane_transform)
# TODO remove code duplication with planner.py (refactor code snippets to function calls) !!!!!!!
checked_motions = ['pre_approach', 'go_down', 'corrective_lift',
'slide_to_wall'] # TODO overcome problem of FT-Switch after go_down
goals = [pre_approach_pose, go_down_pose, corrective_lift_pose, slide_to_wall_pose] # TODO see checked_motions
# Take orientation of object but translation of pre grasp pose
pre_grasp_pos_manifold = np.copy(object_params['frame'])
pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_approach_pose)
slide_pos_manifold = np.copy(slide_to_wall_pose)
goal_manifold_frames = {
'pre_approach': pre_grasp_pos_manifold,
# Use object frame for sampling
'go_down': np.copy(go_down_pose),
'corrective_lift': np.copy(corrective_lift_pose),
# should always be the same frame as go_down # TODO use world orientation?
# Use wall frame for sampling. Keep in mind that the wall frame has different orientation, than world.
'slide_to_wall': slide_pos_manifold,
}
goal_manifold_orientations = {
# use hand orientation
'pre_approach': tra.quaternion_from_matrix(pre_approach_pose),
# Use object orientation
'go_down': tra.quaternion_from_matrix(go_down_pose), # TODO use hand orietation instead?
# should always be the same orientation as go_down
'corrective_lift': tra.quaternion_from_matrix(corrective_lift_pose),
# use wall orientation
'slide_to_wall': tra.quaternion_from_matrix(wall_frame),
}
allowed_collisions = {
# 'init_joint': [],
# no collisions are allowed during going to pre_grasp pose
'pre_approach': [],
# Only allow touching the bottom of the ifco
'go_down': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
'corrective_lift': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
# TODO also allow all other obejcts to be touched during sliding motion
'slide_to_wall': [
# Allow all other objects to be touched as well
# (since hand will go through them in simulation) TODO desired behavior?
AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=obj_idx,
terminating=False, required=obj_idx == current_object_idx)
for obj_idx in range(0, len(objects))
] + [
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name=selected_wall_name, terminating=False),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
}
return FeasibilityQueryParameters(checked_motions, goals, allowed_collisions, goal_manifold_frames,
goal_manifold_orientations)
def prepare_corner_grasp_parameter(ec_frame, selected_wall_names, objects, current_object_idx, object_params,
ifco_in_base_transform, params):
# hand pose above and behind the object
pre_approach_transform = params['pre_approach_transform']
corner_frame = | np.copy(ec_frame) | numpy.copy |
# Copyright (c) Microsoft Corporation.
import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
class ADARNN(Model):
"""ADARNN Model
Parameters
----------
d_feat : int
input dimension for each time step
metric: str
the evaluate metric used in early stop
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
"""
def __init__(
self,
d_feat=6,
hidden_size=64,
num_layers=2,
dropout=0.0,
n_epochs=200,
pre_epoch=40,
dw=0.5,
loss_type="cosine",
len_seq=60,
len_win=0,
lr=0.001,
metric="mse",
batch_size=2000,
early_stop=20,
loss="mse",
optimizer="adam",
n_splits=2,
GPU=0,
seed=None,
**kwargs
):
# Set logger.
self.logger = get_module_logger("ADARNN")
self.logger.info("ADARNN pytorch version...")
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU)
# set hyper-parameters.
self.d_feat = d_feat
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.n_epochs = n_epochs
self.pre_epoch = pre_epoch
self.dw = dw
self.loss_type = loss_type
self.len_seq = len_seq
self.len_win = len_win
self.lr = lr
self.metric = metric
self.batch_size = batch_size
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.n_splits = n_splits
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.seed = seed
self.logger.info(
"ADARNN parameters setting:"
"\nd_feat : {}"
"\nhidden_size : {}"
"\nnum_layers : {}"
"\ndropout : {}"
"\nn_epochs : {}"
"\nlr : {}"
"\nmetric : {}"
"\nbatch_size : {}"
"\nearly_stop : {}"
"\noptimizer : {}"
"\nloss_type : {}"
"\nvisible_GPU : {}"
"\nuse_GPU : {}"
"\nseed : {}".format(
d_feat,
hidden_size,
num_layers,
dropout,
n_epochs,
lr,
metric,
batch_size,
early_stop,
optimizer.lower(),
loss,
GPU,
self.use_gpu,
seed,
)
)
if self.seed is not None:
np.random.seed(self.seed)
torch.manual_seed(self.seed)
n_hiddens = [hidden_size for _ in range(num_layers)]
self.model = AdaRNN(
use_bottleneck=False,
bottleneck_width=64,
n_input=d_feat,
n_hiddens=n_hiddens,
n_output=1,
dropout=dropout,
model_type="AdaRNN",
len_seq=len_seq,
trans_loss=loss_type,
)
self.logger.info("model:\n{:}".format(self.model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.model)))
if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
self.train_optimizer = optim.SGD(self.model.parameters(), lr=self.lr)
else:
raise NotImplementedError("optimizer {} is not supported!".format(optimizer))
self.fitted = False
self.model.to(self.device)
@property
def use_gpu(self):
return self.device != torch.device("cpu")
def train_AdaRNN(self, train_loader_list, epoch, dist_old=None, weight_mat=None):
self.model.train()
criterion = nn.MSELoss()
dist_mat = torch.zeros(self.num_layers, self.len_seq).to(self.device)
len_loader = np.inf
for loader in train_loader_list:
if len(loader) < len_loader:
len_loader = len(loader)
for data_all in zip(*train_loader_list):
# for data_all in zip(*train_loader_list):
self.train_optimizer.zero_grad()
list_feat = []
list_label = []
for data in data_all:
# feature :[36, 24, 6]
feature, label_reg = data[0].to(self.device).float(), data[1].to(self.device).float()
list_feat.append(feature)
list_label.append(label_reg)
flag = False
index = get_index(len(data_all) - 1)
for temp_index in index:
s1 = temp_index[0]
s2 = temp_index[1]
if list_feat[s1].shape[0] != list_feat[s2].shape[0]:
flag = True
break
if flag:
continue
total_loss = torch.zeros(1).to(self.device)
for i, n in enumerate(index):
feature_s = list_feat[n[0]]
feature_t = list_feat[n[1]]
label_reg_s = list_label[n[0]]
label_reg_t = list_label[n[1]]
feature_all = torch.cat((feature_s, feature_t), 0)
if epoch < self.pre_epoch:
pred_all, loss_transfer, out_weight_list = self.model.forward_pre_train(
feature_all, len_win=self.len_win
)
else:
pred_all, loss_transfer, dist, weight_mat = self.model.forward_Boosting(feature_all, weight_mat)
dist_mat = dist_mat + dist
pred_s = pred_all[0 : feature_s.size(0)]
pred_t = pred_all[feature_s.size(0) :]
loss_s = criterion(pred_s, label_reg_s)
loss_t = criterion(pred_t, label_reg_t)
total_loss = total_loss + loss_s + loss_t + self.dw * loss_transfer
self.train_optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_value_(self.model.parameters(), 3.0)
self.train_optimizer.step()
if epoch >= self.pre_epoch:
if epoch > self.pre_epoch:
weight_mat = self.model.update_weight_Boosting(weight_mat, dist_old, dist_mat)
return weight_mat, dist_mat
else:
weight_mat = self.transform_type(out_weight_list)
return weight_mat, None
def calc_all_metrics(self, pred):
"""pred is a pandas dataframe that has two attributes: score (pred) and label (real)"""
res = {}
ic = pred.groupby(level="datetime").apply(lambda x: x.label.corr(x.score))
rank_ic = pred.groupby(level="datetime").apply(lambda x: x.label.corr(x.score, method="spearman"))
res["ic"] = ic.mean()
res["icir"] = ic.mean() / ic.std()
res["ric"] = rank_ic.mean()
res["ricir"] = rank_ic.mean() / rank_ic.std()
res["mse"] = -(pred["label"] - pred["score"]).mean()
res["loss"] = res["mse"]
return res
def test_epoch(self, df):
self.model.eval()
preds = self.infer(df["feature"])
label = df["label"].squeeze()
preds = pd.DataFrame({"label": label, "score": preds}, index=df.index)
metrics = self.calc_all_metrics(preds)
return metrics
def log_metrics(self, mode, metrics):
metrics = ["{}/{}: {:.6f}".format(k, mode, v) for k, v in metrics.items()]
metrics = ", ".join(metrics)
self.logger.info(metrics)
def fit(
self,
dataset: DatasetH,
evals_result=dict(),
save_path=None,
):
df_train, df_valid = dataset.prepare(
["train", "valid"],
col_set=["feature", "label"],
data_key=DataHandlerLP.DK_L,
)
# splits = ['2011-06-30']
days = df_train.index.get_level_values(level=0).unique()
train_splits = np.array_split(days, self.n_splits)
train_splits = [df_train[s[0] : s[-1]] for s in train_splits]
train_loader_list = [get_stock_loader(df, self.batch_size) for df in train_splits]
save_path = get_or_create_path(save_path)
stop_steps = 0
best_score = -np.inf
best_epoch = 0
evals_result["train"] = []
evals_result["valid"] = []
# train
self.logger.info("training...")
self.fitted = True
best_score = -np.inf
best_epoch = 0
weight_mat, dist_mat = None, None
for step in range(self.n_epochs):
self.logger.info("Epoch%d:", step)
self.logger.info("training...")
weight_mat, dist_mat = self.train_AdaRNN(train_loader_list, step, dist_mat, weight_mat)
self.logger.info("evaluating...")
train_metrics = self.test_epoch(df_train)
valid_metrics = self.test_epoch(df_valid)
self.log_metrics("train: ", train_metrics)
self.log_metrics("valid: ", valid_metrics)
valid_score = valid_metrics[self.metric]
train_score = train_metrics[self.metric]
evals_result["train"].append(train_score)
evals_result["valid"].append(valid_score)
if valid_score > best_score:
best_score = valid_score
stop_steps = 0
best_epoch = step
best_param = copy.deepcopy(self.model.state_dict())
else:
stop_steps += 1
if stop_steps >= self.early_stop:
self.logger.info("early stop")
break
self.logger.info("best score: %.6lf @ %d" % (best_score, best_epoch))
self.model.load_state_dict(best_param)
torch.save(best_param, save_path)
if self.use_gpu:
torch.cuda.empty_cache()
return best_score
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if not self.fitted:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I)
return self.infer(x_test)
def infer(self, x_test):
index = x_test.index
self.model.eval()
x_values = x_test.values
sample_num = x_values.shape[0]
x_values = x_values.reshape(sample_num, self.d_feat, -1).transpose(0, 2, 1)
preds = []
for begin in range(sample_num)[:: self.batch_size]:
if sample_num - begin < self.batch_size:
end = sample_num
else:
end = begin + self.batch_size
x_batch = torch.from_numpy(x_values[begin:end]).float().to(self.device)
with torch.no_grad():
pred = self.model.predict(x_batch).detach().cpu().numpy()
preds.append(pred)
return pd.Series(np.concatenate(preds), index=index)
def transform_type(self, init_weight):
weight = torch.ones(self.num_layers, self.len_seq).to(self.device)
for i in range(self.num_layers):
for j in range(self.len_seq):
weight[i, j] = init_weight[i][j].item()
return weight
class data_loader(Dataset):
def __init__(self, df):
self.df_feature = df["feature"]
self.df_label_reg = df["label"]
self.df_index = df.index
self.df_feature = torch.tensor(
self.df_feature.values.reshape(-1, 6, 60).transpose(0, 2, 1), dtype=torch.float32
)
self.df_label_reg = torch.tensor(self.df_label_reg.values.reshape(-1), dtype=torch.float32)
def __getitem__(self, index):
sample, label_reg = self.df_feature[index], self.df_label_reg[index]
return sample, label_reg
def __len__(self):
return len(self.df_feature)
def get_stock_loader(df, batch_size, shuffle=True):
train_loader = DataLoader(data_loader(df), batch_size=batch_size, shuffle=shuffle)
return train_loader
def get_index(num_domain=2):
index = []
for i in range(num_domain):
for j in range(i + 1, num_domain + 1):
index.append((i, j))
return index
class AdaRNN(nn.Module):
"""
model_type: 'Boosting', 'AdaRNN'
"""
def __init__(
self,
use_bottleneck=False,
bottleneck_width=256,
n_input=128,
n_hiddens=[64, 64],
n_output=6,
dropout=0.0,
len_seq=9,
model_type="AdaRNN",
trans_loss="mmd",
GPU=0,
):
super(AdaRNN, self).__init__()
self.use_bottleneck = use_bottleneck
self.n_input = n_input
self.num_layers = len(n_hiddens)
self.hiddens = n_hiddens
self.n_output = n_output
self.model_type = model_type
self.trans_loss = trans_loss
self.len_seq = len_seq
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
in_size = self.n_input
features = nn.ModuleList()
for hidden in n_hiddens:
rnn = nn.GRU(input_size=in_size, num_layers=1, hidden_size=hidden, batch_first=True, dropout=dropout)
features.append(rnn)
in_size = hidden
self.features = nn.Sequential(*features)
if use_bottleneck is True: # finance
self.bottleneck = nn.Sequential(
nn.Linear(n_hiddens[-1], bottleneck_width),
nn.Linear(bottleneck_width, bottleneck_width),
nn.BatchNorm1d(bottleneck_width),
nn.ReLU(),
nn.Dropout(),
)
self.bottleneck[0].weight.data.normal_(0, 0.005)
self.bottleneck[0].bias.data.fill_(0.1)
self.bottleneck[1].weight.data.normal_(0, 0.005)
self.bottleneck[1].bias.data.fill_(0.1)
self.fc = nn.Linear(bottleneck_width, n_output)
torch.nn.init.xavier_normal_(self.fc.weight)
else:
self.fc_out = nn.Linear(n_hiddens[-1], self.n_output)
if self.model_type == "AdaRNN":
gate = nn.ModuleList()
for i in range(len(n_hiddens)):
gate_weight = nn.Linear(len_seq * self.hiddens[i] * 2, len_seq)
gate.append(gate_weight)
self.gate = gate
bnlst = nn.ModuleList()
for i in range(len(n_hiddens)):
bnlst.append(nn.BatchNorm1d(len_seq))
self.bn_lst = bnlst
self.softmax = torch.nn.Softmax(dim=0)
self.init_layers()
def init_layers(self):
for i in range(len(self.hiddens)):
self.gate[i].weight.data.normal_(0, 0.05)
self.gate[i].bias.data.fill_(0.0)
def forward_pre_train(self, x, len_win=0):
out = self.gru_features(x)
fea = out[0] # [2N,L,H]
if self.use_bottleneck is True:
fea_bottleneck = self.bottleneck(fea[:, -1, :])
fc_out = self.fc(fea_bottleneck).squeeze()
else:
fc_out = self.fc_out(fea[:, -1, :]).squeeze() # [N,]
out_list_all, out_weight_list = out[1], out[2]
out_list_s, out_list_t = self.get_features(out_list_all)
loss_transfer = torch.zeros((1,)).to(self.device)
for i, n in enumerate(out_list_s):
criterion_transder = TransferLoss(loss_type=self.trans_loss, input_dim=n.shape[2])
h_start = 0
for j in range(h_start, self.len_seq, 1):
i_start = j - len_win if j - len_win >= 0 else 0
i_end = j + len_win if j + len_win < self.len_seq else self.len_seq - 1
for k in range(i_start, i_end + 1):
weight = (
out_weight_list[i][j]
if self.model_type == "AdaRNN"
else 1 / (self.len_seq - h_start) * (2 * len_win + 1)
)
loss_transfer = loss_transfer + weight * criterion_transder.compute(
n[:, j, :], out_list_t[i][:, k, :]
)
return fc_out, loss_transfer, out_weight_list
def gru_features(self, x, predict=False):
x_input = x
out = None
out_lis = []
out_weight_list = [] if (self.model_type == "AdaRNN") else None
for i in range(self.num_layers):
out, _ = self.features[i](x_input.float())
x_input = out
out_lis.append(out)
if self.model_type == "AdaRNN" and predict is False:
out_gate = self.process_gate_weight(x_input, i)
out_weight_list.append(out_gate)
return out, out_lis, out_weight_list
def process_gate_weight(self, out, index):
x_s = out[0 : int(out.shape[0] // 2)]
x_t = out[out.shape[0] // 2 : out.shape[0]]
x_all = torch.cat((x_s, x_t), 2)
x_all = x_all.view(x_all.shape[0], -1)
weight = torch.sigmoid(self.bn_lst[index](self.gate[index](x_all.float())))
weight = torch.mean(weight, dim=0)
res = self.softmax(weight).squeeze()
return res
def get_features(self, output_list):
fea_list_src, fea_list_tar = [], []
for fea in output_list:
fea_list_src.append(fea[0 : fea.size(0) // 2])
fea_list_tar.append(fea[fea.size(0) // 2 :])
return fea_list_src, fea_list_tar
# For Boosting-based
def forward_Boosting(self, x, weight_mat=None):
out = self.gru_features(x)
fea = out[0]
if self.use_bottleneck:
fea_bottleneck = self.bottleneck(fea[:, -1, :])
fc_out = self.fc(fea_bottleneck).squeeze()
else:
fc_out = self.fc_out(fea[:, -1, :]).squeeze()
out_list_all = out[1]
out_list_s, out_list_t = self.get_features(out_list_all)
loss_transfer = torch.zeros((1,)).to(self.device)
if weight_mat is None:
weight = (1.0 / self.len_seq * torch.ones(self.num_layers, self.len_seq)).to(self.device)
else:
weight = weight_mat
dist_mat = torch.zeros(self.num_layers, self.len_seq).to(self.device)
for i, n in enumerate(out_list_s):
criterion_transder = TransferLoss(loss_type=self.trans_loss, input_dim=n.shape[2])
for j in range(self.len_seq):
loss_trans = criterion_transder.compute(n[:, j, :], out_list_t[i][:, j, :])
loss_transfer = loss_transfer + weight[i, j] * loss_trans
dist_mat[i, j] = loss_trans
return fc_out, loss_transfer, dist_mat, weight
# For Boosting-based
def update_weight_Boosting(self, weight_mat, dist_old, dist_new):
epsilon = 1e-5
dist_old = dist_old.detach()
dist_new = dist_new.detach()
ind = dist_new > dist_old + epsilon
weight_mat[ind] = weight_mat[ind] * (1 + torch.sigmoid(dist_new[ind] - dist_old[ind]))
weight_norm = torch.norm(weight_mat, dim=1, p=1)
weight_mat = weight_mat / weight_norm.t().unsqueeze(1).repeat(1, self.len_seq)
return weight_mat
def predict(self, x):
out = self.gru_features(x, predict=True)
fea = out[0]
if self.use_bottleneck is True:
fea_bottleneck = self.bottleneck(fea[:, -1, :])
fc_out = self.fc(fea_bottleneck).squeeze()
else:
fc_out = self.fc_out(fea[:, -1, :]).squeeze()
return fc_out
class TransferLoss:
def __init__(self, loss_type="cosine", input_dim=512, GPU=0):
"""
Supported loss_type: mmd(mmd_lin), mmd_rbf, coral, cosine, kl, js, mine, adv
"""
self.loss_type = loss_type
self.input_dim = input_dim
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
def compute(self, X, Y):
"""Compute adaptation loss
Arguments:
X {tensor} -- source matrix
Y {tensor} -- target matrix
Returns:
[tensor] -- transfer loss
"""
if self.loss_type in ("mmd_lin", "mmd"):
mmdloss = MMD_loss(kernel_type="linear")
loss = mmdloss(X, Y)
elif self.loss_type == "coral":
loss = CORAL(X, Y, self.device)
elif self.loss_type in ("cosine", "cos"):
loss = 1 - cosine(X, Y)
elif self.loss_type == "kl":
loss = kl_div(X, Y)
elif self.loss_type == "js":
loss = js(X, Y)
elif self.loss_type == "mine":
mine_model = Mine_estimator(input_dim=self.input_dim, hidden_dim=60).to(self.device)
loss = mine_model(X, Y)
elif self.loss_type == "adv":
loss = adv(X, Y, self.device, input_dim=self.input_dim, hidden_dim=32)
elif self.loss_type == "mmd_rbf":
mmdloss = MMD_loss(kernel_type="rbf")
loss = mmdloss(X, Y)
elif self.loss_type == "pairwise":
pair_mat = pairwise_dist(X, Y)
loss = torch.norm(pair_mat)
return loss
def cosine(source, target):
source, target = source.mean(), target.mean()
cos = nn.CosineSimilarity(dim=0)
loss = cos(source, target)
return loss.mean()
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class Discriminator(nn.Module):
def __init__(self, input_dim=256, hidden_dim=256):
super(Discriminator, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dis1 = nn.Linear(input_dim, hidden_dim)
self.dis2 = nn.Linear(hidden_dim, 1)
def forward(self, x):
x = F.relu(self.dis1(x))
x = self.dis2(x)
x = torch.sigmoid(x)
return x
def adv(source, target, device, input_dim=256, hidden_dim=512):
domain_loss = nn.BCELoss()
# !!! Pay attention to .cuda !!!
adv_net = Discriminator(input_dim, hidden_dim).to(device)
domain_src = torch.ones(len(source)).to(device)
domain_tar = torch.zeros(len(target)).to(device)
domain_src, domain_tar = domain_src.view(domain_src.shape[0], 1), domain_tar.view(domain_tar.shape[0], 1)
reverse_src = ReverseLayerF.apply(source, 1)
reverse_tar = ReverseLayerF.apply(target, 1)
pred_src = adv_net(reverse_src)
pred_tar = adv_net(reverse_tar)
loss_s, loss_t = domain_loss(pred_src, domain_src), domain_loss(pred_tar, domain_tar)
loss = loss_s + loss_t
return loss
def CORAL(source, target, device):
d = source.size(1)
ns, nt = source.size(0), target.size(0)
# source covariance
tmp_s = torch.ones((1, ns)).to(device) @ source
cs = (source.t() @ source - (tmp_s.t() @ tmp_s) / ns) / (ns - 1)
# target covariance
tmp_t = torch.ones((1, nt)).to(device) @ target
ct = (target.t() @ target - (tmp_t.t() @ tmp_t) / nt) / (nt - 1)
# frobenius norm
loss = (cs - ct).pow(2).sum()
loss = loss / (4 * d * d)
return loss
class MMD_loss(nn.Module):
def __init__(self, kernel_type="linear", kernel_mul=2.0, kernel_num=5):
super(MMD_loss, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
self.kernel_type = kernel_type
def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0 - total1) ** 2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2 - n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def linear_mmd(self, X, Y):
delta = X.mean(axis=0) - Y.mean(axis=0)
loss = delta.dot(delta.T)
return loss
def forward(self, source, target):
if self.kernel_type == "linear":
return self.linear_mmd(source, target)
elif self.kernel_type == "rbf":
batch_size = int(source.size()[0])
kernels = self.guassian_kernel(
source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma
)
with torch.no_grad():
XX = torch.mean(kernels[:batch_size, :batch_size])
YY = torch.mean(kernels[batch_size:, batch_size:])
XY = torch.mean(kernels[:batch_size, batch_size:])
YX = torch.mean(kernels[batch_size:, :batch_size])
loss = torch.mean(XX + YY - XY - YX)
return loss
class Mine_estimator(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine_estimator, self).__init__()
self.mine_model = Mine(input_dim, hidden_dim)
def forward(self, X, Y):
Y_shffle = Y[torch.randperm(len(Y))]
loss_joint = self.mine_model(X, Y)
loss_marginal = self.mine_model(X, Y_shffle)
ret = torch.mean(loss_joint) - torch.log(torch.mean(torch.exp(loss_marginal)))
loss = -ret
return loss
class Mine(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine, self).__init__()
self.fc1_x = nn.Linear(input_dim, hidden_dim)
self.fc1_y = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
def forward(self, x, y):
h1 = F.leaky_relu(self.fc1_x(x) + self.fc1_y(y))
h2 = self.fc2(h1)
return h2
def pairwise_dist(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = X.unsqueeze(1).expand(n, m, d)
b = Y.unsqueeze(0).expand(n, m, d)
return torch.pow(a - b, 2).sum(2)
def pairwise_dist_np(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = np.expand_dims(X, 1)
b = np.expand_dims(Y, 0)
a = np.tile(a, (1, m, 1))
b = np.tile(b, (n, 1, 1))
return np.power(a - b, 2).sum(2)
def pa(X, Y):
XY = np.dot(X, Y.T)
XX = np.sum(np.square(X), axis=1)
XX = | np.transpose([XX]) | numpy.transpose |
import sys
import numpy as np
MOD = 10 ** 9 + 7
def cumprod(a, p):
l = len(a); sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql ** 2).reshape(sql, sql)
for i in range(sql - 1): a[:, i+1] *= a[:, i]; a[:, i+1] %= p
for i in range(sql - 1): a[i+1] *= a[i, -1]; a[i+1] %= p
return np.ravel(a)[:l]
def make_tables(n=10 ** 9, r=10 ** 7, p=MOD):
fac = np.arange(r + 1); fac[0] = 1; fac = cumprod(fac, p)
ifac = np.arange(r + 1, 0, -1); ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = cumprod(ifac, p)[n::-1]
n_choose = np.arange(n + 1, n - r, -1); n_choose[0] = 1;
n_choose[1:] = cumprod(n_choose[1:], p) * ifac[1:r+1] % p
return fac, ifac, n_choose
fac, ifac, n_choose = make_tables()
def choose(n, r, p=MOD):
return fac[n] * ifac[r] % p * ifac[n-r] % p
n, k = map(int, sys.stdin.readline().split())
a = np.array(sys.stdin.readline().split(), dtype=np.int64)
a.sort()
def main():
res = (a[k-1:] * choose(np.arange(k-1, n), k - 1) % MOD).sum() % MOD
res -= (a[:n-(k-1)] * choose( | np.arange(n-1, k-2, -1) | numpy.arange |
import copy
from datetime import datetime
import dask.array as da
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from pathlib import Path
from act.io.armfiles import read_netcdf
from act.qc.arm import add_dqr_to_qc
from act.qc.qcfilter import parse_bit, set_bit, unset_bit
from act.qc.radiometer_tests import fft_shading_test
from act.qc.sp2 import SP2ParticleCriteria, PYSP2_AVAILABLE
from act.tests import (
EXAMPLE_CEIL1,
EXAMPLE_CO2FLX4M,
EXAMPLE_MET1,
EXAMPLE_METE40,
EXAMPLE_MFRSR,
EXAMPLE_IRT25m20s,
EXAMPLE_BRS,
EXAMPLE_MET_YAML
)
from act.qc.bsrn_tests import _calculate_solar_parameters
from act.qc.add_supplemental_qc import read_yaml_supplemental_qc, apply_supplemental_qc
def test_fft_shading_test():
obj = read_netcdf(EXAMPLE_MFRSR)
obj.clean.cleanup()
obj = fft_shading_test(obj)
qc_data = obj['qc_diffuse_hemisp_narrowband_filter4']
assert np.nansum(qc_data.values) == 7164
def test_global_qc_cleanup():
ds_object = read_netcdf(EXAMPLE_MET1)
ds_object.load()
ds_object.clean.cleanup()
assert ds_object['qc_wdir_vec_mean'].attrs['flag_meanings'] == [
'Value is equal to missing_value.',
'Value is less than the fail_min.',
'Value is greater than the fail_max.',
]
assert ds_object['qc_wdir_vec_mean'].attrs['flag_masks'] == [1, 2, 4]
assert ds_object['qc_wdir_vec_mean'].attrs['flag_assessments'] == [
'Bad',
'Bad',
'Bad',
]
assert ds_object['qc_temp_mean'].attrs['flag_meanings'] == [
'Value is equal to missing_value.',
'Value is less than the fail_min.',
'Value is greater than the fail_max.',
'Difference between current and previous values exceeds fail_delta.',
]
assert ds_object['qc_temp_mean'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_temp_mean'].attrs['flag_assessments'] == [
'Bad',
'Bad',
'Bad',
'Indeterminate',
]
ds_object.close()
del ds_object
def test_qc_test_errors():
ds_object = read_netcdf(EXAMPLE_MET1)
var_name = 'temp_mean'
assert ds_object.qcfilter.add_less_test(var_name, None) is None
assert ds_object.qcfilter.add_greater_test(var_name, None) is None
assert ds_object.qcfilter.add_less_equal_test(var_name, None) is None
assert ds_object.qcfilter.add_equal_to_test(var_name, None) is None
assert ds_object.qcfilter.add_not_equal_to_test(var_name, None) is None
def test_arm_qc():
# Test DQR Webservice using known DQR
variable = 'wspd_vec_mean'
qc_variable = 'qc_' + variable
obj = read_netcdf(EXAMPLE_METE40)
# DQR webservice does go down, so ensure it
# properly runs first before testing
try:
obj = add_dqr_to_qc(obj, variable=variable)
ran = True
obj.attrs['_datastream'] = obj.attrs['datastream']
del obj.attrs['datastream']
obj2 = add_dqr_to_qc(obj, variable=variable)
obj3 = add_dqr_to_qc(obj)
add_dqr_to_qc(obj, variable=variable, exclude=['D190529.4'])
add_dqr_to_qc(obj, variable=variable, include=['D400101.1'])
with np.testing.assert_raises(ValueError):
del obj.attrs['_datastream']
add_dqr_to_qc(obj, variable=variable)
except ValueError:
ran = False
if ran:
assert qc_variable in obj
dqr = [True for d in obj[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj[qc_variable].attrs['flag_assessments']
assert qc_variable in obj2
dqr = [True for d in obj2[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj2[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj2[qc_variable].attrs['flag_assessments']
assert qc_variable in obj3
dqr = [True for d in obj3[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj3[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj3[qc_variable].attrs['flag_assessments']
def test_qcfilter():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
expected_qc_var_name = 'qc_' + var_name
ds_object.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=True, cleanup=False, flag_type=False
)
assert expected_qc_var_name in list(ds_object.keys())
del ds_object[expected_qc_var_name]
# Perform adding of quality control variables to object
result = ds_object.qcfilter.add_test(var_name, test_meaning='Birds!')
assert isinstance(result, dict)
qc_var_name = result['qc_variable_name']
assert qc_var_name == expected_qc_var_name
# Check that new linking and describing attributes are set
assert ds_object[qc_var_name].attrs['standard_name'] == 'quality_flag'
assert ds_object[var_name].attrs['ancillary_variables'] == qc_var_name
# Check that CF attributes are set including new flag_assessments
assert 'flag_masks' in ds_object[qc_var_name].attrs.keys()
assert 'flag_meanings' in ds_object[qc_var_name].attrs.keys()
assert 'flag_assessments' in ds_object[qc_var_name].attrs.keys()
# Check that the values of the attributes are set correctly
assert ds_object[qc_var_name].attrs['flag_assessments'][0] == 'Bad'
assert ds_object[qc_var_name].attrs['flag_meanings'][0] == 'Birds!'
assert ds_object[qc_var_name].attrs['flag_masks'][0] == 1
# Set some test values
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(var_name, index=index, test_number=result['test_number'])
# Add a new test and set values
index2 = [6, 7, 8, 50]
ds_object.qcfilter.add_test(
var_name,
index=index2,
test_number=9,
test_meaning='testing high number',
test_assessment='Suspect',
)
# Retrieve data from object as numpy masked array. Count number of masked
# elements and ensure equal to size of index array.
data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments='Bad')
assert np.ma.count_masked(data) == len(index)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments='Suspect', return_nan_array=True
)
assert np.sum(np.isnan(data)) == len(index2)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments=['Bad', 'Suspect'], ma_fill_value=np.nan
)
assert np.ma.count_masked(data) == len(index + index2)
# Test internal function for returning the index array of where the
# tests are set.
assert (
np.sum(
ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
- | np.array(index, dtype=int) | numpy.array |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from itertools import permutations
import scipy.sparse as sp
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
if torch.cuda.is_available():
return torch.sparse.FloatTensor(indices, values, shape).cuda()
else:
return torch.sparse.FloatTensor(indices, values, shape)
class GraphFunc(nn.Module):
def __init__(self, z_dim):
super(GraphFunc, self).__init__()
"""
DeepSets Function
"""
self.gc1 = GraphConvolution(z_dim, z_dim * 4)
self.gc2 = GraphConvolution(z_dim * 4, z_dim)
self.z_dim = z_dim
def forward(self, graph_input_raw, graph_label):
"""
set_input, seq_length, set_size, dim
"""
set_length, set_size, dim = graph_input_raw.shape
assert(dim == self.z_dim)
set_output_list = []
for g_index in range(set_length):
graph_input = graph_input_raw[g_index, :]
# construct the adj matrix
unique_class = np.unique(graph_label)
edge_set = []
for c in unique_class:
current_index = np.where(graph_label == c)[0].tolist()
if len(current_index) > 1:
edge_set.append(np.array(list(permutations(current_index, 2))))
if len(edge_set) == 0:
adj = sp.coo_matrix(( | np.array([0]) | numpy.array |
import os
import numpy as np
import pandas as pd
from scipy.optimize import least_squares
from prereise.gather.demanddata.bldg_electrification import const
def calculate_r2(endogenous, residuals):
"""Calculate r2 value of fit.
:param iterable endogenous: vector of observations of endogenous variable.
:param iterable residuals: vector of residuals between modeled fit and observations.
:return: (*float*) -- r-squared value of fit.
"""
sumres = np.square(np.array(residuals)).sum()
sumtot = np.square(np.array(endogenous) - np.array(endogenous).mean()).sum()
r2 = 1 - (sumres / sumtot)
return r2
def calculate_state_slopes(puma_data, year=const.base_year):
"""Estimate regression parameters per-state for residential and commercial fuel use.
:param pandas.DataFrame puma_data: data frame of per-puma data.
:param int/str year: year of data to use for analysis.
:return: (*tuple*) -- a pair of pandas.DataFrame objects for per-state residential
and commercial slopes, respectively.
"""
dti = pd.date_range(start=f"{year}-01-01", end=f"{year}-12-31 23:00:00", freq="H")
hours_in_month = dti.month.value_counts()
# Load in historical fossil fuel usage data for input/base year
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ng_usage_data = {
clas: pd.read_csv(
os.path.join(data_dir, f"ng_monthly_mmbtu_{year}_{clas}.csv"), index_col=0
)
for clas in {"res", "com"}
}
fok_usage_data = pd.read_csv(
os.path.join(data_dir, f"fok_data_bystate_{year}.csv"), index_col="state"
)
othergas_usage_data = pd.read_csv(
os.path.join(data_dir, f"propane_data_bystate_{year}.csv"), index_col="state"
)
# Initialize dataframes to store state heating slopes
state_slopes_res = pd.DataFrame(
columns=(["state", "r2", "sh_slope", "dhw_const", "dhw_slope", "other_const"])
)
state_slopes_com = pd.DataFrame(
columns=(
[
"state",
"r2",
"sh_slope",
"dhw_const",
"cook_const",
"other_const",
"other_slope",
]
)
)
for state in const.state_list:
# Load puma data
puma_data_it = puma_data.query("state == @state")
# Load puma temperatures
temps_pumas = temps_pumas = pd.read_csv(
f"https://besciences.blob.core.windows.net/datasets/bldg_el/pumas/temps/temps_pumas_{state}_{year}.csv"
)
temps_pumas_transpose = temps_pumas.T
for clas in const.classes:
# puma area * percentage of puma area that uses fossil fuel
areas_ff_sh_it = (
puma_data_it[f"{clas}_area_{year}_m2"]
* puma_data_it[f"frac_ff_sh_{clas}_{year}"]
)
areas_ff_dhw_it = (
puma_data_it[f"{clas}_area_{year}_m2"]
* puma_data_it[f"frac_ff_dhw_{clas}_{year}"]
)
areas_ff_cook_it = (
puma_data_it[f"{clas}_area_{year}_m2"]
* puma_data_it[f"frac_ff_cook_com_{year}"]
)
if clas == "res":
areas_ff_other_it = (
puma_data_it[f"{clas}_area_{year}_m2"]
* puma_data_it[f"frac_ff_other_res_{year}"]
)
else:
areas_ff_other_it = (
puma_data_it[f"{clas}_area_{year}_m2"]
* puma_data_it[f"frac_ff_sh_com_{year}"]
)
# sum of previous areas to be used in fitting
sum_areaff_dhw = sum(areas_ff_dhw_it)
sum_areaff_other = sum(areas_ff_other_it)
sum_areaff_cook = sum(areas_ff_cook_it)
# Load monthly natural gas usage for the state
natgas = ng_usage_data[clas][state]
# Load annual fuel oil/kerosene and other gas/propane usage for the state
fok = fok_usage_data.loc[state, f"fok.{clas}.mmbtu"]
other = othergas_usage_data.loc[state, f"propane.{clas}.mmbtu"]
totfuel = fok + other + natgas.sum()
# Scale total fossil fuel usage by monthly natural gas
ff_usage_data_it = totfuel * natgas / natgas.sum()
# Fossil fuel average monthly mmbtu, normalized by hours in month
ff_monthly_it = ff_usage_data_it / hours_in_month
# Hourly heating degrees for all pumas in a given state, multiplied by their corresponding area and percent fossil fuel, summed up to one hourly list
hd_hourly_it_sh = (
temps_pumas_transpose.applymap(
lambda x: max(const.temp_ref[clas] - x, 0)
)
.mul(areas_ff_sh_it, axis=0)
.sum(axis=0)
)
hd_monthly_it_sh = hd_hourly_it_sh.groupby(dti.month).mean()
if clas == "res":
hd_hourly_it_dhw = (
temps_pumas_transpose.applymap(lambda x: const.temp_ref[clas] - x)
.mul(areas_ff_dhw_it, axis=0)
.sum(axis=0)
)
hd_monthly_it_dhw = hd_hourly_it_dhw.groupby(dti.month).mean()
# Fitting function: Returns difference between fitted equation and actual fossil fuel usage for the least_squares function to minimize
def func_r(par, sh, dhw, ff):
err = hours_in_month ** (1 / 2) * (
ff
- (
par[0] * sh
+ par[1] * (sum_areaff_dhw + const.dhw_lin_scalar * dhw)
+ par[2] * sum_areaff_other
)
)
return err
# Least squares solver
lm_it = least_squares(
func_r,
const.bounds_lower_res,
args=(hd_monthly_it_sh, hd_monthly_it_dhw, ff_monthly_it),
bounds=(const.bounds_lower_res, const.bounds_upper_res),
)
# Solved coefficients for slopes and constants
par_sh_l = lm_it.x[0]
par_dhw_c = lm_it.x[1]
par_dhw_l = lm_it.x[1] * const.dhw_lin_scalar
par_other_c = lm_it.x[2]
corrected_residuals = np.array(lm_it.fun) / hours_in_month ** (1 / 2)
r2 = calculate_r2(ff_monthly_it, corrected_residuals)
# Add coefficients to output dataframe
df_i = len(state_slopes_res)
state_slopes_res.loc[df_i] = [
state,
r2,
par_sh_l,
par_dhw_c,
par_dhw_l,
par_other_c,
]
else:
hd_hourly_it_other = (
temps_pumas_transpose.applymap(
lambda x: max(x - const.temp_ref[clas], 0)
)
.mul(areas_ff_other_it, axis=0)
.sum(axis=0)
)
hd_monthly_it_other = hd_hourly_it_other.groupby(dti.month).mean()
bound_lower_consts_par = (
const.dhw_low_bound_com * sum_areaff_dhw
+ const.cook_c_scalar * const.dhw_low_bound_com * sum_areaff_cook
) / (sum_areaff_dhw + sum_areaff_cook + sum_areaff_other)
bound_upper_consts_par = (
const.dhw_high_bound_com * sum_areaff_dhw
+ const.cook_c_scalar * const.dhw_high_bound_com * sum_areaff_cook
+ const.other_high_bound_com * sum_areaff_other
) / (sum_areaff_dhw + sum_areaff_cook + sum_areaff_other)
bounds_lower_com = [0, bound_lower_consts_par, 0]
bounds_upper_com = [np.inf, bound_upper_consts_par, np.inf]
# Fitting function: Returns difference between fitted equation and actual fossil fuel usage for the least_squares function to minimize
def func_c(par, sh, other, ff):
err = hours_in_month ** (1 / 2) * (
ff
- (
par[0] * sh
+ par[1]
* (sum_areaff_dhw + sum_areaff_cook + sum_areaff_other)
+ par[2] * other
)
)
return err
# Least squares solver
lm_it = least_squares(
func_c,
bounds_lower_com,
args=(hd_monthly_it_sh, hd_monthly_it_other, ff_monthly_it),
bounds=(bounds_lower_com, bounds_upper_com),
)
# Solved dhw/cook/other constants
consts_par = lm_it.x[1]
bound_decision_point = (
consts_par
* (sum_areaff_dhw + sum_areaff_cook + sum_areaff_other)
/ (sum_areaff_dhw + const.cook_c_scalar * sum_areaff_cook)
)
if bound_decision_point <= const.dhw_high_bound_com:
par_dhw_c = bound_decision_point
par_other_c = 0
else:
par_dhw_c = const.dhw_high_bound_com
par_other_c = (
consts_par
* (sum_areaff_dhw + sum_areaff_cook + sum_areaff_other)
- (
const.dhw_high_bound_com * sum_areaff_dhw
+ const.cook_c_scalar
* const.dhw_high_bound_com
* sum_areaff_cook
)
) / sum_areaff_other
par_cook_c = const.cook_c_scalar * par_dhw_c
# Solved coefficients for slopes
par_sh_l = lm_it.x[0]
par_other_l = lm_it.x[2]
corrected_residuals = np.array(lm_it.fun) / hours_in_month ** (1 / 2)
r2 = calculate_r2(ff_monthly_it, corrected_residuals)
# Add coefficients to output dataframe
df_i = len(state_slopes_com)
state_slopes_com.loc[df_i] = [
state,
r2,
par_sh_l,
par_dhw_c,
par_cook_c,
par_other_c,
par_other_l,
]
# Export heating/hot water/cooking coefficients for each state
return state_slopes_res, state_slopes_com
def adjust_puma_slopes(
puma_data, state_slopes_res, state_slopes_com, year=const.base_year
):
"""Create per-puma slopes from per-state slopes.
:param pandas.DataFrame puma_data: puma data.
:param pandas.DataFrame state_slopes_res: residential state slopes.
:param pandas.DataFrame state_slopes_com: commercial state slopes.
:param int year: year of temperatures to download.
:return: (*tuple*) -- a pair of pandas.DataFrame objects for per-puma residential
and commercial slopes, respectively.
"""
# Minimize error between actual slopes and fitted function
# Note for fitting to converge, hdd must be divided by 1000 and slopes in btu
def model(par, hdd_div1000, slope_btu):
err = (
slope_btu
- (par[0] + par[1] * (1 - np.exp(-par[2] * hdd_div1000))) / hdd_div1000
)
return err
# Functions with solved coefficients for res and com - produces slopes in btu/m2-C for inputs of HDD
def func_slope_exp(x, a, b, c):
return (a + b * (1 - np.exp(-c * (x / 1000))) / (x / 1000)) * 1e-6
classes = ["res", "com"]
hd_col_names = {"res": f"hd_183C_{year}", "com": f"hd_167C_{year}"}
state_slopes = {
"res": state_slopes_res.set_index("state"),
"com": state_slopes_com.set_index("state"),
}
puma_slopes = {clas: puma_data["state"].to_frame() for clas in classes}
# Create data frames to hold output
adj_slopes = {clas: puma_data["state"].to_frame() for clas in classes}
for state in const.state_list:
# Load puma temperatures
temps_pumas = pd.read_csv(
f"https://besciences.blob.core.windows.net/datasets/bldg_el/pumas/temps/temps_pumas_{state}_{year}.csv"
)
# Hourly temperature difference below const.temp_ref_res/com for each puma
for clas in classes:
temp_diff = temps_pumas.applymap(lambda x: max(const.temp_ref[clas] - x, 0))
puma_data.loc[temp_diff.columns, hd_col_names[clas]] = temp_diff.sum()
# Load in state groups consistent with building area scale adjustments
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
area_scale = {
clas: pd.read_csv(
os.path.join(data_dir, f"area_scale_{clas}.csv"), index_col=False
)
for clas in classes
}
# Compute target year areas from the two survey years provided
area_scale["res"][f"{year}"] = area_scale["res"][f"RECS{const.recs_date_1}"] * (
(
area_scale["res"][f"RECS{const.recs_date_2}"]
/ area_scale["res"][f"RECS{const.recs_date_1}"]
)
** (
(const.base_year - const.recs_date_1)
/ (const.recs_date_2 - const.recs_date_1)
)
)
area_scale["com"][f"{year}"] = area_scale["com"][f"CBECS{const.cbecs_date_1}"] * (
(
area_scale["com"][f"CBECS{const.cbecs_date_2}"]
/ area_scale["com"][f"CBECS{const.cbecs_date_1}"]
)
** (
(const.base_year - const.cbecs_date_1)
/ (const.cbecs_date_2 - const.cbecs_date_1)
)
)
for clas in classes:
puma_slopes[clas]["htg_slope_mmbtu_m2_degC"] = puma_data["state"].map(
state_slopes[clas]["sh_slope"]
)
# Extract state groups from area_scale
state_to_group = {
elem: i
for i, row in area_scale[clas].iterrows()
for elem in row
if isinstance(elem, str)
}
# Calculate population-weighted HDD and slopes
state_puma_groupby = puma_data.groupby(puma_data["state"].map(state_to_group))
state_puma_slope_groupby = puma_slopes[clas].groupby(
puma_data["state"].map(state_to_group)
)
area_scale[clas]["hdd_normals_popwtd"] = [
(sum(data["hdd65_normals"] * data["pop"]) / data["pop"].sum())
for group, data in state_puma_groupby
]
area_scale[clas]["htg_slope_mmbtu_m2_degC_pophddwtd"] = [
sum(
state_puma_slope_groupby.get_group(group)["htg_slope_mmbtu_m2_degC"]
* data["hdd65_normals"]
* data["pop"]
)
/ sum(data["hdd65_normals"] * data["pop"])
for group, data in state_puma_groupby
]
ls_args = (
# Divide by 1000 for robust solver
| np.array(area_scale[clas]["hdd_normals_popwtd"]) | numpy.array |
# Copyright 2020, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Name:
# habex.m
#
# Purpose:
# Representation of the Habex telescope and coronagraph. To be called using
# the PROPER library procedure "proper.prop_run".
#
# Inputs:
# lambda_m
# The wavelength of propagation in meters (note that the wavelength is provided
# to proper.prop_run in microns and is converted to meters in there).
# gridsize
# Size of the computational grid (gridsize by gridsize elements). Must be
# a power of 2.
#
# Outputs:
# wavefront
# Variable in which the computed E-field at the final image plane is returned.
# The field is sampled by "final_sampling_lam0" lambda_m/D over "nout" by "nout"
# pixels.
# sampling_m
# The sampling at the final image plane in meters per pixel
#
# Optional keywords or switches:
# optval
# (Optional) Structure whose fields are values
# that are passed to the prescription for use as the prescription desires.
#
# Revision history:
# Written by <NAME> (Jet Propulsion Laboratory, California Inst. Technology), January 2020
# Translated to Python by <NAME> (JPL, CIT), February 2020. Added an option,
# use_pr, to retrieve the E-field at the pupil before the focal plane mask. Also
# added the vortex as the focal plane mask.
##----------------------------------------------------------------------------------
import numpy as np
#import matplotlib.pyplot as plt # For Debugging
#from astropy.io import fits # For Debugging
import proper # Use v3.2 or higher
import falco # FALCO needed for propagation to/from vortex
def habex(lambda_m, gridsize, PASSVALUE={'dummy':0}):
nact = 64; #-- number of actuators across DM
nact_across_pupil = 62; #-- number of actuators across pupil
dm_xc = 31.5; #-- wavefront centered at corner of DM actuator (0,0 is center of 1st actuator)
dm_yc = 31.5;
dm_sampling = 0.4e-3; #-- DM actuator spacing (BMC)
#-- default settings (override with optval)
map_dir = '../maps/'; #-- directory containing optical surface error maps
lambda0_um = 0.5; #-- default reference wavelength (center of bandpass) for star offsets & field stop size
use_errors = 1; #-- 1 = use optical surface errors, 0 = none
zindex = np.array([0,]) #-- vector of Zernike indices (Noll ordered)
zval = np.array([0,]) #-- vector of Zernike coefficients (unobscured RMS wavefront in meters)
xoffset = 0; #-- star X offset in lambda0/D units (must then provide lambda0_um)
yoffset = 0; #-- star Y offset in lambda0/D units
use_dm1 = 0; #-- use DM1 (if non-zero, must then provide pokes (meters) in "dm1" array)
use_dm2 = 0; #-- use DM2 (if non-zero, must then provide pokes (meters) in "dm2" array)
use_fpm = 1; #-- use focal plane mask (0 = no FPM)
use_lyot_stop = 1; #-- use Lyot stop (0 = no stop)
use_field_stop = 1; #-- use field stop (0 = no stop)
field_stop_radius = 25.0; #-- field stop radius in lam0/D
final_sampling_lam0 = 0.2; #-- sampling at final image plane in lam0/D
nout = 300; #-- output field size (nout x nout pixels)
normLyotDiam = 0.95; #-- Lyot stop outer diameter normalized to the beam diameter
vortexCharge = 6; #-- charge of the vortex focal plane mask
pupil_diam_pix = nact_across_pupil * 7 #-- define sampling of pupil based on having 7 pixels across each DM actuator
pr_pupil_diam_pix = pupil_diam_pix; #-- define sampling of pupil used for flattening phase with the DMs
use_pr = False #-- whether to return a fake phase retrieval of the pupil rather than the focal plane
#-- override defaults using values passed using optval structure
if 'PASSVALUE' in locals():
if 'lam0' in PASSVALUE: lamba0_um = PASSVALUE['lam0']
if 'lambda0_um' in PASSVALUE: lambda0_um = PASSVALUE['lambda0_um']
if 'use_errors' in PASSVALUE: use_errors = PASSVALUE['use_errors']
if 'zindex' in PASSVALUE: zindex = PASSVALUE['zindex']
if 'zval' in PASSVALUE: zval = PASSVALUE['zval']
if 'xoffset' in PASSVALUE: xoffset = PASSVALUE['xoffset']
if 'yoffset' in PASSVALUE: yoffset = PASSVALUE['yoffset']
if 'use_dm1' in PASSVALUE: use_dm1 = PASSVALUE['use_dm1']
if 'dm1' in PASSVALUE: dm1 = PASSVALUE['dm1']
if 'use_dm2' in PASSVALUE: use_dm2 = PASSVALUE['use_dm2']
if 'dm2' in PASSVALUE: dm2 = PASSVALUE['dm2']
if 'use_fpm' in PASSVALUE: use_fpm = PASSVALUE['use_fpm']
if 'use_lyot_stop' in PASSVALUE: use_lyot_stop = PASSVALUE['use_lyot_stop']
if 'use_field_stop' in PASSVALUE: use_field_stop = PASSVALUE['use_field_stop']
if 'field_stop_radius' in PASSVALUE: field_stop_radius = PASSVALUE['field_stop_radius']
if 'final_sampling_lam0' in PASSVALUE: final_sampling_lam0 = PASSVALUE['final_sampling_lam0']
if 'nout' in PASSVALUE: nout = PASSVALUE['nout']
if 'normLyotDiam' in PASSVALUE: normLyotDiam = PASSVALUE['normLyotDiam']
if 'vortexCharge' in PASSVALUE: vortexCharge = PASSVALUE['vortexCharge']
if 'map_dir' in PASSVALUE: map_dir = PASSVALUE['map_dir']
if 'pupil_diam_pix' in PASSVALUE: pupil_diam_pix = PASSVALUE['pupil_diam_pix']
if 'pr_pupil_diam_pix' in PASSVALUE: pr_pupil_diam_pix = PASSVALUE['pr_pupil_diam_pix']
if 'use_pr' in PASSVALUE: use_pr = PASSVALUE['use_pr']
# Convert 0 and 1 to False and True
use_errors = bool(use_errors)
use_dm1 = bool(use_dm1)
use_dm2 = bool(use_dm2)
use_fpm = bool(use_fpm)
use_lyot_stop = bool(use_lyot_stop)
use_field_stop = bool(use_field_stop)
use_pr = bool(use_pr)
if(np.isscalar(zindex)):
zindex = np.asarray((zindex,))
else: # Check if iterable. If not, then make an array containing 0
try:
temp = zindex[0]
except:
zindex = | np.array([0]) | numpy.array |
'''
'''
import keras
import tensorflow as tf
from keras.models import Model
from keras import backend as K
from keras.layers import Input, merge, Conv2D, ZeroPadding2D, UpSampling2D, Dense, concatenate, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import BatchNormalization, Dropout, Flatten, Lambda
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.optimizers import Adam, RMSprop, SGD
from keras.regularizers import l2
from keras.layers.noise import GaussianDropout
import numpy as np
smooth = 1.
dropout_rate = 0.5
act = "relu"
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# Custom loss function
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def bce_dice_loss(y_true, y_pred):
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
# Evaluation metric: IoU
def compute_iou(img1, img2):
img1 = np.array(img1)
img2 = np.array(img2)
if img1.shape[0] != img2.shape[0]:
raise ValueError("Shape mismatch: the number of images mismatch.")
IoU = np.zeros( (img1.shape[0],), dtype=np.float32)
for i in range(img1.shape[0]):
im1 = np.squeeze(img1[i]>0.5)
im2 = np.squeeze(img2[i]>0.5)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = | np.logical_and(im1, im2) | numpy.logical_and |
"""
created on Jan 29, 2014
@author: <NAME>, jajcay(at)cs.cas.cz
based on class by <NAME> -- https://github.com/vejmelkam/ndw-climate --
last update on Sep 26, 2017
"""
import csv
from datetime import date, timedelta, datetime
import numpy as np
from dateutil.relativedelta import relativedelta
from pyclits.functions import detrend_with_return
class DataField:
"""
Class holds the time series of a geophysical field. The fields for reanalysis data are
3-dimensional - two spatial and one temporal dimension. The fields for station data contains
temporal dimension and location specification.
"""
def __init__(self, data_folder='', data=None, lons=None, lats=None, time=None, verbose=False):
"""
Initializes either an empty data set or with given values.
"""
self.data_folder = data_folder
self.data = data
self.lons = lons
self.lats = lats
self.time = time
self.location = None # for station data
self.missing = None # for station data where could be some missing values
self.station_id = None # for station data
self.station_elev = None # in metres, for station data
self.var_name = None
self.nans = False
self.cos_weights = None
self.data_mask = None
self.verbose = verbose
def __str__(self):
"""
String representation.
"""
if self.data is not None:
return ("Geo data of shape %s as time x lat x lon." % str(self.data.shape))
else:
return("Empty DataField instance.")
def shape(self):
"""
Prints shape of data field.
"""
if self.data is not None:
return self.data.shape
else:
raise Exception("DataField is empty.")
def __getitem__(self, key):
"""
getitem representation.
"""
if self.data is not None:
return self.data[key]
else:
raise Exception("DataField is empty.")
def load(self, filename=None, variable_name=None, dataset='ECA-reanalysis', print_prog=True):
"""
Loads geophysical data from netCDF file for reanalysis or from text file for station data.
Now supports following datasets: (dataset - keyword passed to function)
ECA&D E-OBS gridded dataset reanalysis - 'ECA-reanalysis'
ECMWF gridded reanalysis - 'ERA'
NCEP/NCAR Reanalysis 1 - 'NCEP'
"""
from netCDF4 import Dataset
if dataset == 'ECA-reanalysis':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
self.lons = d.variables['longitude'][:]
self.lats = d.variables['latitude'][:]
self.time = d.variables['time'][:] # days since 1950-01-01 00:00
self.time += date.toordinal(date(1950, 1, 1))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'ERA':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:]
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
else:
self.data = data
self.lons = d.variables['longitude'][:]
self.lats = d.variables['latitude'][:]
if 'level' in d.variables.keys():
self.level = d.variables['level'][:]
self.time = d.variables['time'][:] # hours since 1900-01-01 00:00
self.time = self.time / 24.0 + date.toordinal(date(1900, 1, 1))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'NCEP':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
else:
self.data = data
self.lons = d.variables['lon'][:]
if np.any(self.lons < 0):
self._shift_lons_to_360()
self.lats = d.variables['lat'][:]
if 'level' in d.variables.keys():
self.level = d.variables['level'][:]
self.time = d.variables['time'][:] # hours or days since some date
date_since = self._parse_time_units(d.variables['time'].units)
if "hours" in d.variables['time'].units:
self.time = self.time / 24.0 + date.toordinal(date_since)
elif "days" in d.variables['time'].units:
self.time += date.toordinal(date_since)
elif "months" in d.variables['time'].units:
from dateutil.relativedelta import relativedelta
for t in range(self.time.shape[0]):
self.time[t] = date.toordinal(date_since + relativedelta(months=+int(self.time[t])))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'arbitrary':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
self.data_mask = data.mask.copy()
else:
self.data = data.copy()
self.data = np.squeeze(self.data)
for key in d.variables.keys():
if key == variable_name:
continue
if 'lat' in str(d.variables[key].name):
self.lats = d.variables[key][:]
if 'lon' in str(d.variables[key].name):
self.lons = d.variables[key][:]
if np.any(self.lons < 0):
self._shift_lons_to_360()
try: # handling when some netCDF variable hasn't assigned units
if 'since' in d.variables[key].units:
self.time = d.variables[key][:]
date_since = self._parse_time_units(d.variables[key].units)
if "hours" in d.variables[key].units:
self.time = self.time / 24.0 + date.toordinal(date_since)
elif "seconds" in d.variables[key].units:
self.time = self.time / 86400. + date.toordinal(date_since)
elif "days" in d.variables[key].units:
self.time += date.toordinal(date_since)
elif "months" in d.variables[key].units:
from dateutil.relativedelta import relativedelta
for t in range(self.time.shape[0]):
self.time[t] = date.toordinal(date_since + relativedelta(months = +int(self.time[t])))
except AttributeError:
pass
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
else:
raise Exception("Unknown or unsupported dataset!")
def _shift_lons_to_360(self):
"""
Shifts lons to 0-360 degree east.
"""
self.lons[self.lons < 0] += 360
ndx = np.argsort(self.lons)
self.lons = self.lons[ndx]
self.data = self.data[..., ndx]
@staticmethod
def _parse_time_units(time_string):
"""
Parses time units from netCDF file, returns date since the record.
"""
date_split = time_string.split('-')
y = ("%04d" % int(date_split[0][-4:]))
m = ("%02d" % int(date_split[1]))
d = ("%02d" % int(date_split[2][:2]))
return datetime.strptime("%s-%s-%s" % (y, m, d), '%Y-%m-%d')
def load_station_data(self, filename, dataset='ECA-station', print_prog=True, offset_in_file=0):
"""
Loads station data, usually from text file. Uses numpy.loadtxt reader.
"""
if dataset == 'Klem_day':
raw_data = np.loadtxt(self.data_folder + filename) # first column is continous year and second is actual data
self.data = np.array(raw_data[:, 1])
time = []
# use time iterator to go through the dates
y = int(np.modf(raw_data[0, 0])[1])
if np.modf(raw_data[0, 0])[0] == 0:
start_date = date(y, 1, 1)
delta = timedelta(days = 1)
d = start_date
while len(time) < raw_data.shape[0]:
time.append(d.toordinal())
d += delta
self.time = np.array(time)
self.location = 'Praha-Klementinum, Czech Republic'
print("Station data from %s saved to structure. Shape of the data is %s" % (self.location, str(self.data.shape)))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
if dataset == 'ECA-station':
with open(self.data_folder + filename, 'rb') as f:
time = []
data = []
missing = []
i = 0 # line-counter
reader = csv.reader(f)
for row in reader:
i += 1
if i == 16 + offset_in_file: # line with location
c_list = filter(None, row[1].split(" "))
del c_list[-2:]
country = ' '.join(c_list).lower()
station = ' '.join(row[0].split(" ")[7:]).lower()
self.location = station.title() + ', ' + country.title()
if i > 20 + offset_in_file: # actual data - len(row) = 5 as STAID, SOUID, DATE, TG, Q_TG
staid = int(row[0])
value = float(row[3])
year = int(row[2][:4])
month = int(row[2][4:6])
day = int(row[2][6:])
time.append(date(year, month, day).toordinal())
if value == -9999.:
missing.append(date(year, month, day).toordinal())
data.append(np.nan)
else:
data.append(value/10.)
self.station_id = staid
self.data = np.array(data)
self.time = np.array(time)
self.missing = np.array(missing)
if print_prog:
print("Station data from %s saved to structure. Shape of the data is %s" % (self.location, str(self.data.shape)))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
if self.missing.shape[0] != 0 and self.verbose:
print("** WARNING: There were some missing values! To be precise, %d missing values were found!" % (self.missing.shape[0]))
def copy_data(self):
"""
Returns the copy of data.
"""
return self.data.copy()
def copy(self, temporal_ndx=None):
"""
Returns a copy of DataField with data, lats, lons and time fields.
If temporal_ndx is not None, copies only selected temporal part of data.
"""
copied = DataField()
copied.data = self.data.copy()
copied.time = self.time.copy()
if temporal_ndx is not None:
copied.data = copied.data[temporal_ndx]
copied.time = copied.time[temporal_ndx]
if self.lats is not None:
copied.lats = self.lats.copy()
if self.lons is not None:
copied.lons = self.lons.copy()
if self.location is not None:
copied.location = self.location
if self.missing is not None:
copied.missing = self.missing.copy()
if self.station_id is not None:
copied.station_id = self.station_id
if self.station_elev is not None:
copied.station_elev = self.station_elev
if self.var_name is not None:
copied.var_name = self.var_name
if self.cos_weights is not None:
copied.cos_weights = self.cos_weights
if self.data_mask is not None:
copied.data_mask = self.data_mask
copied.nans = self.nans
return copied
def select_date(self, date_from, date_to, apply_to_data=True, exclusive=True):
"""
Selects the date range - date_from is inclusive, date_to is exclusive. Input is date(year, month, day).
"""
d_start = date_from.toordinal()
d_to = date_to.toordinal()
if exclusive:
ndx = np.logical_and(self.time >= d_start, self.time < d_to)
else:
ndx = np.logical_and(self.time >= d_start, self.time <= d_to)
if apply_to_data:
self.time = self.time[ndx] # slice time stamp
self.data = self.data[ndx, ...] # slice data
if self.data_mask is not None and self.data_mask.ndim > 2:
self.data_mask = self.data_mask[ndx, ...] # slice missing if exists
if self.missing is not None:
missing_ndx = np.logical_and(self.missing >= d_start, self.missing < d_to)
self.missing = self.missing[missing_ndx] # slice missing if exists
return ndx
def get_sliding_window_indexes(self, window_length, window_shift, unit='m', return_half_dates=False):
"""
Returns list of indices for sliding window analysis.
If return_half_dates is True, also returns dates in the middle of the interval for reference.
"""
from dateutil.relativedelta import relativedelta
if unit == 'm':
length = relativedelta(months = +window_length)
shift = relativedelta(months = +window_shift)
elif unit == 'd':
length = relativedelta(days = +window_length)
shift = relativedelta(days = +window_shift)
elif unit == 'y':
length = relativedelta(years = +window_length)
shift = relativedelta(years = +window_shift)
else:
raise Exception("Unknown time unit! Please, use one of the 'd', 'm', 'y'!")
ndxs = []
if return_half_dates:
half_dates = []
window_start = self.get_date_from_ndx(0)
window_end = window_start + length
while window_end <= self.get_date_from_ndx(-1):
ndx = self.select_date(window_start, window_end, apply_to_data=False)
ndxs.append(ndx)
if return_half_dates:
half_dates.append(window_start + (window_end - window_start) / 2)
window_start += shift
window_end = window_start + length
# add last
ndxs.append(self.select_date(window_start, window_end, apply_to_data=False))
if return_half_dates:
half_dates.append(window_start + (self.get_date_from_ndx(-1) - window_start) / 2)
if np.sum(ndxs[-1]) != np.sum(ndxs[-2]) and self.verbose:
print("**WARNING: last sliding window is shorter than others! (%d vs. %d in others)"
% (np.sum(ndxs[-1]), np.sum(ndxs[-2])))
if return_half_dates:
return ndxs, half_dates
else:
return ndxs
def create_time_array(self, date_from, sampling='m'):
"""
Creates time array for already saved data in 'self.data'.
From date_from to date_from + data length. date_from is inclusive.
Sampling:
'm' for monthly, could be just 'm' or '3m' as three-monthly
'd' for daily
'xh' where x = {1, 6, 12} for sub-daily.
"""
if 'm' in sampling:
if 'm' != sampling:
n_months = int(sampling[:-1])
timedelta = relativedelta(months=+n_months)
elif 'm' == sampling:
timedelta = relativedelta(months=+1)
elif sampling == 'd':
timedelta = relativedelta(days=+1)
elif sampling in ['1h', '6h', '12h']:
hourly_data = int(sampling[:-1])
timedelta = relativedelta(hours=+hourly_data)
elif sampling == 'y':
timedelta = relativedelta(years=+1)
else:
raise Exception("Unknown sampling.")
d_now = date_from
self.time = np.zeros((self.data.shape[0],))
for t in range(self.data.shape[0]):
self.time[t] = d_now.toordinal()
d_now += timedelta
def get_date_from_ndx(self, ndx):
"""
Returns the date of the variable from given index.
"""
return date.fromordinal(np.int(self.time[ndx]))
def get_spatial_dims(self):
"""
Returns the spatial dimensions of the data as list.
"""
return list(self.data.shape[-2:])
def find_date_ndx(self, date):
"""
Returns index which corresponds to the date. Returns None if the date is not contained in the data.
"""
d = date.toordinal()
pos = np.nonzero(self.time == d)
if not np.all(np.isnan(pos)):
return int(pos[0])
else:
return None
def get_closest_lat_lon(self, lat, lon):
"""
Returns closest lat, lon index in the data.
"""
return [np.abs(self.lats - lat).argmin(), np.abs(self.lons - lon).argmin()]
def select_months(self, months, apply_to_data=True):
"""
Subselects only certain months. Input as a list of months number.
"""
ndx = filter(lambda i: date.fromordinal(int(self.time[i])).month in months, range(len(self.time)))
if apply_to_data:
self.time = self.time[ndx]
self.data = self.data[ndx, ...]
return ndx
def select_lat_lon(self, lats, lons, apply_to_data = True):
"""
Selects region in lat/lon. Input is for both [from, to], both are inclusive. If None, the dimension is not modified.
"""
if self.lats is not None and self.lons is not None:
if lats is not None:
lat_ndx = np.nonzero(np.logical_and(self.lats >= lats[0], self.lats <= lats[1]))[0]
else:
lat_ndx = np.arange(len(self.lats))
if lons is not None:
if lons[0] < lons[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= lons[1]))[0]
elif lons[0] > lons[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons[1]))[0])
lon_ndx = np.array(l1 + l2)
else:
lon_ndx = np.arange(len(self.lons))
if apply_to_data:
if self.data.ndim >= 3:
d = self.data.copy()
d = d[..., lat_ndx, :]
self.data = d[..., lon_ndx].copy()
self.lats = self.lats[lat_ndx]
self.lons = self.lons[lon_ndx]
if self.data_mask is not None:
d = self.data_mask
d = d[..., lat_ndx, :]
self.data_mask = d[..., lon_ndx]
elif self.data.ndim == 2: # multiple stations data
d = self.data.copy()
d = d[:, lat_ndx]
self.lons = self.lons[lat_ndx]
self.lats = self.lats[lat_ndx]
if lons is not None:
if lons[0] < lons[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= lons[1]))[0]
elif lons[0] > lons[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons[1]))[0])
lon_ndx = np.array(l1 + l2)
else:
lon_ndx = np.arange(len(self.lons))
self.data = d[:, lon_ndx].copy()
self.lons = self.lons[lon_ndx]
self.lats = self.lats[lon_ndx]
if np.any(np.isnan(self.data)):
self.nans = True
else:
self.nans = False
return lat_ndx, lon_ndx
else:
raise Exception('Slicing data with no spatial dimensions, probably station data.')
def cut_lat_lon(self, lats_to_cut, lons_to_cut):
"""
Cuts region in lats/lons (puts NaNs in the selected regions).
Input is for both [from, to], both are inclusive. If None, the dimension is not modified.
"""
if self.lats is not None and self.lons is not None:
if lats_to_cut is not None:
lat_ndx = np.nonzero(np.logical_and(self.lats >= lats_to_cut[0], self.lats <= lats_to_cut[1]))[0]
if lons_to_cut is None:
self.data[..., lat_ndx, :] = np.nan
if lons_to_cut is not None:
if lons_to_cut[0] < lons_to_cut[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons_to_cut[0], self.lons <= lons_to_cut[1]))[0]
elif lons_to_cut[0] > lons_to_cut[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons_to_cut[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons_to_cut[1]))[0])
lon_ndx = np.array(l1 + l2)
if lats_to_cut is None:
self.data[..., lon_ndx] = np.nan
if lats_to_cut is not None and lons_to_cut is not None:
for lat in lat_ndx:
for lon in lon_ndx:
self.data[..., lat, lon] = np.nan
else:
raise Exception('Slicing data with no spatial dimensions, probably station data.')
def select_level(self, level):
"""
Selects the proper level from the data. Input should be integer >= 0.
"""
if self.data.ndim > 3:
self.data = self.data[:, level, ...]
self.level = self.level[level]
else:
raise Exception('Slicing level in single-level data.')
def extract_day_month_year(self):
"""
Extracts the self.time field into three fields containg days, months and years.
"""
n_days = len(self.time)
days = np.zeros((n_days,), dtype = np.int)
months = np.zeros((n_days,), dtype = np.int)
years = np.zeros((n_days,), dtype = np.int)
for i,d in zip(range(n_days), self.time):
dt = date.fromordinal(int(d))
days[i] = dt.day
months[i] = dt.month
years[i] = dt.year
return days, months, years
def latitude_cos_weights(self):
"""
Returns a grid with scaling weights based on cosine of latitude.
"""
if (np.all(self.cos_weights) is not None) and (self.cos_weights.shape == self.get_spatial_dims()):
return self.cos_weights
cos_weights = np.zeros(self.get_spatial_dims())
for ndx in range(self.lats.shape[0]):
cos_weights[ndx, :] = np.cos(self.lats[ndx] * np.pi/180.) ** 0.5
self.cos_weights = cos_weights
return cos_weights
def missing_day_month_year(self):
"""
Extracts the self.missing field (if exists and is non-empty) into three fields containing days, months and years.
"""
if (self.missing is not None) and (self.missing.shape[0] != 0):
n_days = len(self.missing)
days = np.zeros((n_days,), dtype = np.int)
months = np.zeros((n_days,), dtype = np.int)
years = np.zeros((n_days,), dtype = np.int)
for i,d in zip(range(n_days), self.missing):
dt = date.fromordinal(int(d))
days[i] = dt.day
months[i] = dt.month
years[i] = dt.year
return days, months, years
else:
raise Exception('Luckily for you, there is no missing values!')
def flatten_field(self, f = None):
"""
Reshape the field to 2dimensions such that axis 0 is temporal and axis 1 is spatial.
If f is None, reshape the self.data field, else reshape the f field.
Should only be used with single-level data.
"""
if f is None:
if self.data.ndim == 3:
self.data = np.reshape(self.data, (self.data.shape[0], np.prod(self.data.shape[1:])))
else:
raise Exception('Data field is already flattened, multi-level or only temporal (e.g. station)!')
elif f is not None:
if f.ndim == 3:
f = np.reshape(f, (f.shape[0], np.prod(f.shape[1:])))
return f
else:
raise Exception('The field f is already flattened, multi-level or only temporal (e.g. station)!')
def reshape_flat_field(self, f = None):
"""
Reshape flattened field to original time x lat x lon shape.
If f is None, reshape the self.data field, else reshape the f field.
Supposes single-level data.
"""
if f is None:
if self.data.ndim == 2:
new_shape = [self.data.shape[0]] + list((self.lats.shape[0], self.lons.shape[0]))
self.data = np.reshape(self.data, new_shape)
else:
raise Exception('Data field is not flattened, is multi-level or is only temporal (e.g. station)!')
elif f is not None:
if f.ndim == 2:
new_shape = [f.shape[0]] + list((self.lats.shape[0], self.lons.shape[0]))
f = np.reshape(f, new_shape)
return f
else:
raise Exception('The field f is not flattened, is multi-level or is only temporal (e.g. station)!')
def get_data_of_precise_length(self, length = '16k', start_date = None, end_date = None, apply_to_data = False):
"""
Selects the data such that the length of the time series is exactly length.
If apply_to_data is True, it will replace the data and time, if False it will return them.
If end_date is defined, it is exclusive.
"""
if isinstance(length, int):
ln = length
elif 'k' in length:
order = int(length[:-1])
pow2list = np.array([np.power(2,n) for n in range(10,22)])
ln = pow2list[np.where(order == pow2list/1000)[0][0]]
else:
raise Exception('Could not understand the length! Please type length as integer or as string like "16k".')
if start_date is not None and self.find_date_ndx(start_date) is None:
start_date = self.get_date_from_ndx(0)
if end_date is not None and self.find_date_ndx(end_date) is None:
end_date = self.get_date_from_ndx(-1)
if end_date is None and start_date is not None:
# from start date until length
idx = self.find_date_ndx(start_date)
data_temp = self.data[idx : idx + ln, ...].copy()
time_temp = self.time[idx : idx + ln, ...].copy()
idx_tuple = (idx, idx+ln)
elif start_date is None and end_date is not None:
idx = self.find_date_ndx(end_date)
data_temp = self.data[idx - ln + 1 : idx + 1, ...].copy()
time_temp = self.time[idx - ln + 1 : idx + 1, ...].copy()
idx_tuple = (idx - ln, idx)
else:
raise Exception('You messed start / end date selection! Pick only one!')
if apply_to_data:
self.data = data_temp.copy()
self.time = time_temp.copy()
return idx_tuple
else:
return data_temp, time_temp, idx_tuple
def _shift_index_by_month(self, current_idx):
"""
Returns the index in data shifted by month.
"""
dt = date.fromordinal(np.int(self.time[current_idx]))
if dt.month < 12:
mi = dt.month + 1
y = dt.year
else:
mi = 1
y = dt.year + 1
return self.find_date_ndx(date(y, mi, dt.day))
def get_annual_data(self, means = True, ts = None):
"""
Converts the data to annual means or sums.
If ts is None, uses self.data.
if means is True, computes annual means, otherwise computes sums.
"""
yearly_data = []
yearly_time = []
_, _, year = self.extract_day_month_year()
for y in range(year[0], year[-1]+1, 1):
year_ndx = np.where(year == y)[0]
if ts is None:
if means:
yearly_data.append(np.squeeze(np.nanmean(self.data[year_ndx, ...], axis = 0)))
else:
yearly_data.append(np.squeeze(np.nansum(self.data[year_ndx, ...], axis = 0)))
else:
if means:
yearly_data.append(np.squeeze(np.nanmean(ts[year_ndx, ...], axis = 0)))
else:
yearly_data.append(np.squeeze(np.nansum(ts[year_ndx, ...], axis = 0)))
yearly_time.append(date(y, 1, 1).toordinal())
if ts is None:
self.data = np.array(yearly_data)
self.time = np.array(yearly_time)
else:
return np.array(yearly_data)
def get_monthly_data(self, means = True):
"""
Converts the daily data to monthly means or sums.
"""
delta = self.time[1] - self.time[0]
if delta == 1:
# daily data
day, mon, year = self.extract_day_month_year()
monthly_data = []
monthly_time = []
# if first day of the data is not the first day of month - shift month
# by one to start with the full month
if day[0] != 1:
mi = mon[0]+1 if mon[0] < 12 else 1
y = year[0] if mon[0] < 12 else year[0] + 1
else:
mi = mon[0]
y = year[0]
start_idx = self.find_date_ndx(date(y, mi, 1))
end_idx = self._shift_index_by_month(start_idx)
while end_idx <= self.data.shape[0] and end_idx is not None:
if means:
monthly_data.append(np.nanmean(self.data[start_idx : end_idx, ...], axis = 0))
else:
monthly_data.append(np.nansum(self.data[start_idx : end_idx, ...], axis = 0))
monthly_time.append(self.time[start_idx])
start_idx = end_idx
end_idx = self._shift_index_by_month(start_idx)
if end_idx is None: # last piece, then exit the loop
if means:
monthly_data.append(np.nanmean(self.data[start_idx : , ...], axis = 0))
else:
monthly_data.append(np.nansum(self.data[start_idx : , ...], axis = 0))
monthly_time.append(self.time[start_idx])
self.data = np.array(monthly_data)
self.time = np.array(monthly_time)
elif abs(delta - 30) < 3.0:
# monhtly data
print('The data are already monthly values. Nothing happend.')
else:
raise Exception('Unknown temporal sampling in the field.')
def average_to_daily(self):
"""
Averages the sub-daily values (e.g. ERA-40 basic sampling is 6 hours) into daily.
"""
delta = self.time[1] - self.time[0]
if delta < 1:
n_times = int(1 / delta)
d = np.zeros_like(self.data)
d = np.delete(d, slice(0, (n_times-1) * d.shape[0]/n_times), axis = 0)
t = np.zeros(self.time.shape[0] / n_times)
for i in range(d.shape[0]):
d[i, ...] = np.nanmean(self.data[n_times*i : n_times*i+(n_times-1), ...], axis = 0)
t[i] = self.time[n_times*i]
self.data = d
self.time = t.astype(np.int)
else:
raise Exception('No sub-daily values, you can average to daily only values with finer time sampling.')
@staticmethod
def _interp_temporal(a):
"""
Helper function for temporal interpolation
"""
import scipy.interpolate as si
i, j, old_time, data, new_time, kind = a
f = si.interp1d(old_time, data, kind = kind)
new_data = f(new_time)
return i, j, new_data
def interpolate_to_finer_temporal_resolution(self, to_resolution = 'm', kind = 'linear', use_to_data = False,
pool = None):
"""
Interpolates data to finer temporal resolution, e.g. yearly to monthly.
Uses scipy's interp1d, for 'kind' keyword see the scipy's documentation.
If use_to_data is True, rewrites data in the class, else returns data.
"""
if self.data.ndim > 2:
num_lats = self.lats.shape[0]
num_lons = self.lons.shape[0]
elif self.data.ndim == 2: # lot of station data
num_lats = self.lats.shape[0]
num_lons = 1
self.data = self.data[:, :, np.newaxis]
else:
num_lats = 1
num_lons = 1
self.data = self.data[:, np.newaxis, np.newaxis]
if 'm' in to_resolution:
if 'm' != to_resolution:
n_months = int(to_resolution[:-1])
timedelta = relativedelta(months = +n_months)
elif 'm' == to_resolution:
timedelta = relativedelta(months = +1)
elif to_resolution == 'd':
timedelta = relativedelta(days = +1)
elif to_resolution in ['1h', '6h', '12h']:
hourly_data = int(to_resolution[:-1])
timedelta = relativedelta(hours = +hourly_data)
elif to_resolution == 'y':
timedelta = relativedelta(years = +1)
else:
raise Exception("Unknown to_resolution.")
new_time = []
first_date = self.get_date_from_ndx(0)
last_day = self.get_date_from_ndx(-1)
current_date = first_date
while current_date <= last_day:
new_time.append(current_date.toordinal())
current_date += timedelta
new_time = np.array(new_time)
job_args = [ (i, j, self.time, self.data[:, i, j], new_time, kind) for i in range(num_lats) for j in range(num_lons) ]
interp_data = np.zeros([new_time.shape[0]] + list(self.get_spatial_dims()))
if pool is None:
job_result = map(self._interp_temporal, job_args)
elif pool is not None:
job_result = pool.map(self._interp_temporal, job_args)
del job_args
for i, j, res in job_result:
interp_data[:, i, j] = res
interp_data = np.squeeze(interp_data)
self.data = np.squeeze(self.data)
if use_to_data:
self.time = new_time.copy()
self.data = interp_data.copy()
else:
return interp_data, new_time
def _ascending_descending_lat_lons(self, lats = True, lons = False, direction = 'asc'):
"""
Transforms the data (and lats and lons) so that they have strictly ascending (direction = 'asc')
or descending (direction = 'des') order. (Needed for interpolation).
Returns True if manipulation took place.
"""
lat_flg, lon_flg = False, False
if np.all(np.diff(self.lats) < 0) and lats and direction == 'asc':
self.lats = self.lats[::-1]
self.data = self.data[..., ::-1, :]
lat_flg = True
elif np.all(np.diff(self.lats) > 0) and lats and direction == 'des':
self.lats = self.lats[::-1]
self.data = self.data[..., ::-1, :]
lat_flg = True
if np.all(np.diff(self.lons) < 0) and lons and direction == 'asc':
self.lons = self.lons[::-1]
self.data = self.data[..., ::-1]
lon_flg = True
elif np.all(np.diff(self.lons) > 0) and lons and direction == 'des':
self.lons = self.lons[::-1]
self.data = self.data[..., ::-1]
lon_flg = True
return lat_flg, lon_flg
def subsample_spatial(self, lat_to, lon_to, start, average = False):
"""
Subsamples the data in the spatial sense to grid "lat_to" x "lon_to" in degress.
Start is starting point for subsampling in degrees as [lat, lon]
If average is True, the subsampling is due to averaging the data -- using SciPy's spline
interpolation on the rectangle. The interpolation is done for each time step and level
independently.
If average is False, the subsampling is just subsampling certain values.
"""
if self.lats is not None and self.lons is not None:
delta_lats = np.abs(self.lats[1] - self.lats[0])
delta_lons = np.abs(self.lons[1] - self.lons[0])
if lat_to % delta_lats == 0 and lon_to % delta_lons == 0:
lat_ndx = int(lat_to // delta_lats)
lon_ndx = int(lon_to // delta_lons)
lat_flg, lon_flg = self._ascending_descending_lat_lons(lats = True, lons = True, direction = 'asc')
start_lat_ndx = np.where(self.lats == start[0])[0]
start_lon_ndx = np.where(self.lons == start[1])[0]
if start_lon_ndx.size == 1 and start_lat_ndx.size == 1:
start_lat_ndx = start_lat_ndx[0]
start_lon_ndx = start_lon_ndx[0]
if not average:
self.lats = self.lats[start_lat_ndx::lat_ndx]
self.lons = self.lons[start_lon_ndx::lon_ndx]
d = self.data
d = d[..., start_lat_ndx::lat_ndx, :]
self.data = d[..., start_lon_ndx::lon_ndx]
else:
nan_flag = False
if self.nans:
if self.check_NaNs_only_spatial():
# for interpolation purposes, fill NaNs with 0.
msk = np.isnan(self.data)
self.data[msk] = 0.
msk = msk[0, ...]
nan_flag = True
else:
raise Exception("NaNs in the data are not only spatial, cannot interpolate!")
from scipy.interpolate import RectBivariateSpline
# if data is single-level - create additional dummy dimension
if self.data.ndim == 3:
self.data = self.data[:, np.newaxis, :, :]
# fields for new lats / lons
new_lats = np.arange(start[0], self.lats[-1]+lat_to, lat_to)
new_lons = np.arange(start[1], self.lons[-1], lon_to)
d = np.zeros((list(self.data.shape[:2]) + [new_lats.shape[0], new_lons.shape[0]]))
# interpolate using Bivariate spline
for t in range(self.time.shape[0]):
for lvl in range(self.data.shape[1]):
int_scheme = RectBivariateSpline(self.lats, self.lons, self.data[t, lvl, ...])
d[t, lvl, ...] = int_scheme(new_lats, new_lons)
if nan_flag:
# subsample mask to new grid
msk_temp = msk[start_lat_ndx::lat_ndx, :]
msk = msk_temp[..., start_lon_ndx::lon_ndx]
# return back NaNs
for t in range(self.time.shape[0]):
for lvl in range(self.data.shape[1]):
d[t, lvl, msk] = np.nan
self.lats = new_lats
self.lons = new_lons
self.data = np.squeeze(d)
if np.any(np.isnan(self.data)):
self.nans = True
else:
self.nans = False
else:
raise Exception("Start lat and / or lon for subsampling does not exist in the data!")
self._ascending_descending_lat_lons(lats = lat_flg, lons = lon_flg, direction = 'des')
else:
raise Exception("Subsampling lats only to multiples of %.2f and lons of %.2f" % (delta_lats, delta_lons))
else:
raise Exception("Cannot subsample station data, or data from one grid point!")
def smoothing_running_avg(self, points, cut_edges = False, use_to_data = False, ts = None):
"""
Smoothing of time series using running average over points.
If use_to_data is False, returns the data, otherwise rewrites the data in class.
"""
if ts is None:
ts = self.data.copy()
if cut_edges:
d = np.zeros(([ts.shape[0] - points + 1] + list(ts.shape[1:])))
else:
d = np.zeros_like(ts)
window = points//2
for i in range(d.shape[0]):
if cut_edges:
d[i, ...] = np.nanmean(ts[i : i+points, ...], axis = 0)
else:
d[i, ...] = np.nanmean(ts[max(i-window,1) : min(i+window,d.shape[0]), ...], axis = 0)
if use_to_data and ts is None:
self.data = d.copy()
if cut_edges:
if points % 2 == 1:
# time slicing when points is odd -- cut points//2 from the beginning and from the end
self.time = self.time[points//2 : -points//2 + 1]
else:
# time slicing when points is even -- not sure where to cut
pass
else:
return d
def plot_FFT_spectrum(self, ts = None, log = True, vlines = np.arange(1,11), fname = None):
"""
Estimates power spectrum using Welch method.
if ts is None, plots spectrum of the data.
ts should have same sampling frequency as data!
y axis is log by default, if log is True, also x axis is log.
"""
import matplotlib.pyplot as plt
delta = self.time[1] - self.time[0]
if delta == 1:
# daily time series
fs = 1./86400 # Hz
elif abs(delta - 30) < 3.0:
# monthly time series
fs = 1./2.628e+6
elif abs(delta - 365) < 2.0:
# yearly time series
fs = 1./3.154e+7
plt.figure(figsize = (15,7))
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
ts = ts if ts is not None else self.data.copy()
if isinstance(ts, list):
ts = np.array(ts).T
if ts.ndim > 2:
ts = ts.reshape([ts.shape[0], np.prod(ts.shape[1:])])
fft = np.abs(np.fft.rfft(ts, axis = 0))
freqs = np.fft.rfftfreq(ts.shape[0], d = 1./fs)
freqs *= 3.154e+7
if log:
plt.semilogx(freqs, 20*np.log10(fft), linewidth = 0.8) # in dB hopefully...
plt.xlabel('FREQUENCY [log 1/year]', size = 25)
else:
plt.plot(freqs, 20*np.log10(fft), linewidth = 0.8)
plt.xlabel('FREQUENCY [1/year]', size = 25)
for vline in vlines:
plt.axvline(1./vline, 0, 1, linestyle = ':',linewidth = 0.6, color = "#333333")
plt.xlim([freqs[0], freqs[-1]])
plt.ylabel('FFT SPECTRUM [dB]', size = 25)
if fname is None:
plt.show()
else:
plt.savefig(fname, bbox_inches = 'tight')
def temporal_filter(self, cutoff, btype, ftype = 'butter', order = 2, cut = 1, pool = None, cut_time = False,
rp = None, rs = None, cut_data = False):
"""
Filters data in temporal sense.
Uses Butterworth filter of order order.
btype:
lowpass
highpass
bandpass
bandstop
cutoff:
for low/high pass one frequency in months
for band* list of frequencies in months
ftype:
butter - for Butterworth filter
cheby1 - for Chebyshev type I filter
cheby2 - for Chebyshev type II filter
ellip - for Cauer/elliptic filter
bessel - for Bessel/Thomson filter
cut in years
"""
from scipy.signal import iirfilter
delta = self.time[1] - self.time[0]
if delta == 1:
# daily time series
fs = 1./86400 # Hz
y = 365.25
elif abs(delta - 30) < 3.0:
# monthly time series
fs = 1./2.628e+6 # Hz
y = 12
nyq = 0.5 * fs # Nyquist frequency
if 'cheby' in ftype or 'ellip' == ftype:
rp = rp if rp is not None else 60
if type(cutoff) == list and btype in ['bandpass', 'bandstop']:
low = cutoff[0] if cutoff[0] > cutoff[1] else cutoff[1]
high = cutoff[1] if cutoff[0] > cutoff[1] else cutoff[0]
low = 1./(low*2.628e+6) # in months
high = 1./(high*2.628e+6)
# get coefficients
b, a = iirfilter(order, [low/nyq, high/nyq], rp = rp, rs = rs, btype = btype, analog = False, ftype = ftype)
elif btype in ['lowpass', 'highpass']:
cutoff = 1./(cutoff*2.628e+6)
b, a = iirfilter(order, cutoff/nyq, rp = rp, rs = rs, btype = btype, analog = False, ftype = ftype)
else:
raise Exception("For band filter cutoff must be a list of [low,high] for low/high-pass cutoff must be a integer!")
if pool is None:
map_func = map
elif pool is not None:
map_func = pool.map
if self.data.ndim > 1:
num_lats = self.lats.shape[0]
num_lons = self.lons.shape[0]
else:
num_lats = 1
num_lons = 1
self.data = self.data[:, np.newaxis, np.newaxis]
self.filtered_data = np.zeros_like(self.data)
job_args = [ (i, j, self.data[:, i, j], b, a) for i in range(num_lats) for j in range(num_lons) ]
job_result = map_func(self._get_filtered_data, job_args)
del job_args
for i, j, res in job_result:
self.filtered_data[:, i, j] = res
del job_result
if cut is not None:
to_cut = int(y*cut)
if cut_time:
self.time = self.time[to_cut:-to_cut]
if cut_data:
self.data = self.data[to_cut:-to_cut]
self.data = np.squeeze(self.data)
self.filtered_data = np.squeeze(self.filtered_data) if cut is None else np.squeeze(self.filtered_data[to_cut:-to_cut, ...])
def spatial_filter(self, filter_weights = [1, 2, 1], use_to_data = False):
"""
Filters the data in spatial sense with weights filter_weights.
If use_to_data is False, returns the data, otherwise rewrites the data in class.
"""
if self.data.ndim == 3:
self.data = self.data[:, np.newaxis, :, :]
mask = np.zeros(self.data.shape[-2:])
filt = np.outer(filter_weights, filter_weights)
mask[:filt.shape[0], :filt.shape[1]] = filt
d = np.zeros((list(self.data.shape[:-2]) + [self.lats.shape[0] - len(filter_weights) + 1, self.lons.shape[0] - len(filter_weights) + 1]))
for i in range(d.shape[-2]):
for j in range(d.shape[-1]):
avg_mask = np.array([[mask for _ in range(d.shape[1])] for _ in range(d.shape[0])])
d[:, :, i, j] = np.average(self.data, axis = (2, 3), weights = avg_mask)
mask = np.roll(mask, 1, axis = 1)
# return mask to correct y position
mask = np.roll(mask, len(filter_weights)-1, axis = 1)
mask = np.roll(mask, 1, axis = 0)
if use_to_data:
self.data = np.squeeze(d).copy()
# space slicing when length of filter is odd -- cut length//2 from the beginning and from the end
if len(filter_weights) % 2 == 1:
self.lats = self.lats[len(filter_weights)//2 : -len(filter_weights)//2 + 1]
self.lons = self.lons[len(filter_weights)//2 : -len(filter_weights)//2 + 1]
else:
# space slicing when length of filter is even -- not sure where to cut
pass
else:
return np.squeeze(d)
@staticmethod
def _interp_spatial(a):
"""
Helper function for spatial interpolation.
"""
import scipy.interpolate as si
t, d, points, msk, grid_lat, grid_lon, method = a
new_data = si.griddata(points, d[~msk], (grid_lat, grid_lon), method = method)
return t, new_data
def interpolate_spatial_nans(self, method = 'cubic', apply_to_data = True, pool = None):
"""
Interpolates data with spatial NaNs in them.
Method is one of the following:
nearest, linear, cubic
If apply to data, interpolation is done in-place, if False, data field is returned.
Uses scipy's griddata.
"""
if self.nans:
if self.check_NaNs_only_spatial():
import scipy.interpolate as si
if self.data.ndim < 4:
self.data = self.data[:, np.newaxis, ...]
new_data = np.zeros_like(self.data)
for lvl in range(self.data.shape[1]):
msk = np.isnan(self.data[0, lvl, ...]) # nan mask
grid_lat, grid_lon = np.meshgrid(self.lats, self.lons, indexing = 'ij') # final grids
points = np.zeros((grid_lat[~msk].shape[0], 2))
points[:, 0] = grid_lat[~msk]
points[:, 1] = grid_lon[~msk]
args = [(t, self.data[t, lvl, ...], points, msk, grid_lat, grid_lon, method) for t in range(self.time.shape[0])]
if pool is None:
job_res = map(self._interp_spatial, args)
else:
job_res = pool.map(self._interp_spatial, args)
for t, i_data in job_res:
new_data[t, lvl, ...] = i_data
new_data = np.squeeze(new_data)
if apply_to_data:
self.data = new_data.copy()
else:
self.data = np.squeeze(self.data)
return new_data
else:
raise Exception("NaNs are also temporal, no way to filter them out!")
else:
print("No NaNs in the data, nothing happened!")
def check_NaNs_only_spatial(self, field = None):
"""
Returns True if the NaNs contained in the data are of spatial nature, e.g.
masked land from sea dataset and so on.
returns False if also there are some NaNs in the temporal sense.
E.g. with spatial NaNs, the PCA could be still done, when filtering out the NaNs.
"""
if self.nans or field is not None:
field = self.data.copy() if field is None else field
cnt = 0
nangrid0 = np.isnan(field[0, ...])
for t in range(1, field.shape[0]):
if np.all(nangrid0 == np.isnan(field[t, ...])):
cnt += 1
if field.shape[0] - cnt == 1:
return True
else:
return False
else:
pass
# print("No NaNs in the data, nothing happened!")
def filter_out_NaNs(self, field = None):
"""
Returns flattened version of 3D data field without NaNs (e.g. for computational purposes).
The data is just returned, self.data is still full 3D version. Returned data has first axis
temporal and second combined spatial.
Mask is saved for internal purposes (e.g. PCA) but also returned.
"""
if (field is None and self.nans) or (field is not None and np.any(np.isnan(field))):
if self.check_NaNs_only_spatial(field = field):
d = self.data.copy() if field is None else field
d = self.flatten_field(f = d)
mask = np.isnan(d)
spatial_mask = mask[0, :]
d_out_shape = (d.shape[0], d.shape[1] - np.sum(spatial_mask))
d_out = d[~mask].reshape(d_out_shape)
self.spatial_mask = spatial_mask
return d_out, spatial_mask
else:
raise Exception("NaNs are also temporal, no way to filter them out!")
else:
print("No NaNs in the data, nothing happened!")
def return_NaNs_to_data(self, field, mask = None):
"""
Returns NaNs to the data and reshapes it to the original shape.
Field has first axis temporal and second combined spatial.
"""
if self.nans:
if mask is not None or self.spatial_mask is not None:
mask = mask if mask is not None else self.spatial_mask
d_out = np.zeros((field.shape[0], mask.shape[0]))
ndx = np.where(mask == False)[0]
d_out[:, ndx] = field
d_out[:, mask] = np.nan
return self.reshape_flat_field(f = d_out)
else:
raise Exception("No mask given!")
else:
print("No NaNs in the data, nothing happened!")
@staticmethod
def _rotate_varimax(U, rtol= | np.finfo(np.float32) | numpy.finfo |
#!/usr/bin/env python
# coding: utf-8
# In this notebook, I delete a triple from the neighbourhood of the target triple based on the **IJCAI deletion scores**
#
# - neighbourhood refers to the triples that share the entities with target's entities
# - I get the deletion from both neighbourhood of s and o, then choose the one with higher score
#
#
# In[1]:
import pickle
from typing import Dict, Tuple, List
import os
import numpy as np
import pandas as pd
from collections import defaultdict
import operator
import json
import logging
import argparse
import math
from pprint import pprint
import errno
import time
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
import torch.autograd as autograd
from evaluation import evaluation
from model import Distmult, Complex, Conve, Transe
import utils
def get_nghbr_s_deletion(per_tr, test_trip, model, epsilon, lambda1):
# get neighbours of s
sub = test_trip[0]
nghbr_mask = (np.isin(per_tr[:,0], [sub]) | np.isin(per_tr[:,2], [sub]))
test_neighbours = np.where(nghbr_mask)[0] # this is index of neighbours in training data
nghbr_trip = per_tr[test_neighbours] # actual neighbour triples
# --- Get the perturbed embedding ---
test_trip = torch.from_numpy(test_trip).to(device)[None, :]
s,r,o = test_trip[:,0], test_trip[:,1], test_trip[:,2]
# get embeddings
emb_s = model.emb_e(s)
emb_r = model.emb_rel(r)
emb_o = model.emb_e(o)
score = model.score_emb(emb_s, emb_r, emb_o)
emb_s_grad = autograd.grad(score, emb_s)
epsilon_star = -epsilon * emb_s_grad[0]
perturbed_emb_s = emb_s + epsilon_star
# get scores for each neighbour
b_begin = 0
nghbr_scores = []
if args.attack_batch_size == -1:
nghbr_batch = nghbr_trip.shape[0]
else:
nghbr_batch = args.attack_batch_size
while b_begin < nghbr_trip.shape[0]:
b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]
b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)
b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]
#emb_nghbr_s = model.emb_e(b_nghbr_s)
emb_nghbr_r = model.emb_rel(b_nghbr_r)
emb_nghbr_o = model.emb_e(b_nghbr_o)
perturbed_emb_e = perturbed_emb_s.repeat(b_nghbr_s.shape[0],1)
emb_e = emb_s.repeat(b_nghbr_s.shape[0],1)
#print(emb_s.shape, emb_nghbr_r.shape)
s1 = model.score_emb(emb_e, emb_nghbr_r, emb_nghbr_o) #nghbr score
s2 = model.score_emb(perturbed_emb_e,
emb_nghbr_r, emb_nghbr_o) #nghbr score after perturbed s
score = s1 - lambda1*s2
score = score.detach().cpu().numpy().tolist()
nghbr_scores += score
b_begin += nghbr_batch
nghbr_scores = np.array(nghbr_scores)
nghbr_scores = torch.from_numpy(nghbr_scores).to(device)
# we want to remove the neighbour with maximum score
max_values, argsort = torch.sort(nghbr_scores, -1, descending=True)
del_idx = argsort[0].item() # index of neighbour to delete
max_val = max_values[0].item() # score of the nghbr to delete
trip_idx = test_neighbours[del_idx] # index of neighbour in train data
del_trip = nghbr_trip[del_idx] # actual triple to delete == per_tr[trip_idx]
return del_idx, max_val, trip_idx, del_trip
def get_nghbr_o_deletion(per_tr, test_trip, model, epsilon, lambda1):
# get neighbours of o
obj = test_trip[2]
nghbr_mask = (np.isin(per_tr[:,0], [obj]) | np.isin(per_tr[:,2], [obj]))
test_neighbours = np.where(nghbr_mask)[0] # this is index of neighbours in training data
nghbr_trip = per_tr[test_neighbours] # actual neighbour triples
# --- Get the perturbed embedding ---
test_trip = torch.from_numpy(test_trip).to(device)[None, :]
s,r,o = test_trip[:,0], test_trip[:,1], test_trip[:,2]
# get embeddings
emb_s = model.emb_e(s)
emb_r = model.emb_rel(r)
emb_o = model.emb_e(o)
score = model.score_emb(emb_s, emb_r, emb_o)
emb_o_grad = autograd.grad(score, emb_o)
epsilon_star = -epsilon * emb_o_grad[0]
perturbed_emb_o = emb_o + epsilon_star
# get scores for each neighbour
b_begin = 0
nghbr_scores = []
if args.attack_batch_size == -1:
nghbr_batch = nghbr_trip.shape[0]
else:
nghbr_batch = args.attack_batch_size
while b_begin < nghbr_trip.shape[0]:
b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]
b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)
b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]
emb_nghbr_s = model.emb_e(b_nghbr_s)
emb_nghbr_r = model.emb_rel(b_nghbr_r)
#emb_nghbr_o = model.emb_e(b_nghbr_o)
perturbed_emb_e = perturbed_emb_o.repeat(b_nghbr_s.shape[0],1)
emb_e = emb_o.repeat(b_nghbr_s.shape[0],1)
s1 = model.score_emb(emb_nghbr_s, emb_nghbr_r, emb_e) #nghbr score
s2 = model.score_emb(emb_nghbr_s,
emb_nghbr_r, perturbed_emb_e) #nghbr score after perturbed s
score = s1 - lambda1*s2
score = score.detach().cpu().numpy().tolist()
nghbr_scores += score
b_begin += nghbr_batch
nghbr_scores = np.array(nghbr_scores)
nghbr_scores = torch.from_numpy(nghbr_scores).to(device)
# we want to remove the neighbour with maximum score
max_values, argsort = torch.sort(nghbr_scores, -1, descending=True)
del_idx = argsort[0].item() # index of neighbour to delete
max_val = max_values[0].item() # score of the nghbr to delete
trip_idx = test_neighbours[del_idx] # index of neighbour in train data
del_trip = nghbr_trip[del_idx] # actual triple to delete == per_tr[trip_idx]
return del_idx, max_val, trip_idx, del_trip
def generate_nghbrs(test_set, train_set):
'''
For every triple in test set, return the index of
neighbouring triple in training set,
i.e. indices in training set are returned
'''
n_dict = {}
for t, triple in enumerate(test_set):
sub = triple[0]
obj = triple[2]
mask = (np.isin(train_set[:,0], [sub, obj]) | np.isin(train_set[:,2], [sub, obj]))
#nghbrs_dict[t] = pro_train[mask]
mask_idx = | np.where(mask) | numpy.where |
"""Model generic, a dual polarized antenna element."""
# TobiaC 2015-12-02
import numpy
import matplotlib.pyplot as plt
import matplotlib.dates
from antpat.reps.sphgridfun import pntsonsphere, tvecfun
from antpat.io.feko_ffe import FEKOffe
from antpat.radfarfield import RadFarField
class DualPolElem(object):
"""Main class for a dual-pol antenna element. It can be constructed
from two generic representations: two radiation far-fields (two
single pol antennas) or a tangential (Jones) matrix.
"""
def __init__(self, *args):
if len(args) == 0:
self.tmfd = None
elif len(args) == 1:
#"tmfd" stands for tangential matrix field on directions
self.tmfd = args[0]
self.radFFp = None
self.radFFq = None
elif len(args) == 2:
self.tmfd = None
self.radFFp = args[0]
self.radFFq = args[1]
else:
raise RuntimeError("Not more than two arguments")
self.basis = None
def getfreqs(self):
"""Get Frequencies"""
if self.tmfd is None:
return self.radFFp.getfreqs()
else:
return self.tmfd.getfreqs()
def getJonesPat(self,freqval):
"""Return the dual-pol antenna elements Jones pattern for a
given frequency."""
THETA, PHI, p_E_th, p_E_ph=self.radFFp.getFFongrid(freqval)
THETA, PHI, q_E_th, q_E_ph=self.radFFq.getFFongrid(freqval)
Jones=numpy.zeros(p_E_th.shape+(2,2), dtype=complex)
Jones[...,0,0]=p_E_th
Jones[...,0,1]=p_E_ph
Jones[...,1,0]=q_E_th
Jones[...,1,1]=q_E_ph
return THETA, PHI, Jones
def getJonesAlong(self, freqval, theta_phi_view):
theta_view, phi_view = theta_phi_view
(theta_build, phi_build) = self.view2build_coords(theta_view, phi_view)
if self.tmfd is None:
p_E_th, p_E_ph = self.radFFp.getFFalong_build(freqval,
(theta_build, phi_build) )
q_E_th, q_E_ph = self.radFFq.getFFalong_build(freqval,
(theta_build, phi_build) )
Jones=numpy.zeros(p_E_th.shape+(2,2), dtype=complex)
Jones[...,0,0] = p_E_th
Jones[...,0,1] = p_E_ph
Jones[...,1,0] = q_E_th
Jones[...,1,1] = q_E_ph
else:
Jones = self.tmfd.getJonesAlong(freqval,
(theta_build, phi_build) )
if self.basis is not None:
p_E_th = Jones[...,0,0]
p_E_ph = Jones[...,0,1]
q_E_th = Jones[...,1,0]
q_E_ph = Jones[...,1,1]
p_E_th, p_E_ph=tvecfun.transfVecField2RotBasis(self.basis,
(theta_build, phi_build),
(p_E_th, p_E_ph))
q_E_th, q_E_ph=tvecfun.transfVecField2RotBasis(self.basis,
(theta_build, phi_build),
(q_E_th, q_E_ph))
Jones[...,0,0] = p_E_th
Jones[...,0,1] = p_E_ph
Jones[...,1,0] = q_E_th
Jones[...,1,1] = q_E_ph
return Jones
def getFFalong(self, freqval, theta_phi_view, polchan=0):
jones = self.getJonesAlong(freqval, theta_phi_view)
E_th = jones[..., polchan, 0].squeeze()
E_ph = jones[..., polchan, 1].squeeze()
return E_th, E_ph
def view2build_coords(self, theta_view, phi_view):
"""Get the corresponding directions in the build frame."""
if self.basis is not None:
(theta_build, phi_build) = pntsonsphere.rotToFrame(
numpy.transpose(self.basis),
theta_view, phi_view)
else:
(theta_build, phi_build) = (theta_view, phi_view)
return (theta_build, phi_build)
def rotateframe(self, rotMat):
"""Rotate the frame of antenna. This is a 'passive' rotation: it
does not rotate the field, but when evaluated in some
direction the direction given will be rotated to the frame so
as to appear as if it were rotated.
The basis or rotation matrix is to be considered as acting on
the antenna, i.e.
view_crds=rotMat*build_crds
assuming the the antenna has not been rotated already. If it has then
the inputted rotation is added to the current rotation, so that
view_crds=rotMat*rotMat_0*build_crds
where rotMat_0 is previous rotation state (could be aggregate of many).
"""
if self.basis is None:
self.basis = rotMat
else:
self.basis = numpy.matmul(rotMat, self.basis)
def load_ffes(self, filename_p, filename_q):
"""Load a pair of FFE and make them correspond to this DualPolElem
object. First file will be pol-channel p and second q."""
ffefile_p = FEKOffe(filename_p)
tvf_p = tvecfun.TVecFields()
tvf_q = tvecfun.TVecFields()
tvf_p.load_ffe(filename_p)
tvf_q.load_ffe(filename_q)
self.radFFp = RadFarField(tvf_p)
self.radFFq = RadFarField(tvf_q)
def load_ffe(self, filename, request_p=None, request_q=None):
#FIX: This not the most efficient way to do this as it does two passes over feko file.
ffefile = FEKOffe(filename)
if request_p is None and request_q is None :
if len(ffefile.Requests) == 2:
requests = list(ffefile.Requests)
requests.sort() # # FIXME: Not sure how to order requests
request_p = requests[0]
request_q = requests[1]
else:
raise RuntimeError(
"File contains multiple FFs (specify one): "
+ ','.join(ffefile.Requests))
print("Request_p= "+request_p)
print("Request_q= "+request_q)
tvf_p = tvecfun.TVecFields()
tvf_q = tvecfun.TVecFields()
tvf_p.load_ffe(filename, request_p)
tvf_q.load_ffe(filename, request_q)
self.radFFp = RadFarField(tvf_p)
self.radFFq = RadFarField(tvf_q)
def plotJonesPat3D(self, freq=0.0, vcoord='sph',
projection='equirectangular', cmplx_rep='AbsAng'):
"""Plot the Jones pattern as two single pol antenna patterns."""
theta_rad, phi_rad, JonesPat=self.getJonesPat(freq)
Ep = numpy.squeeze(JonesPat[...,0,:])
Eq = numpy.squeeze(JonesPat[...,1,:])
tvecfun.plotvfonsph(theta_rad, phi_rad, numpy.squeeze(Ep[...,0]),
numpy.squeeze(Ep[...,1]), freq, vcoord,
projection, cmplx_rep, vfname='p-chan:'+self.radFFp.name)
tvecfun.plotvfonsph(theta_rad, phi_rad, numpy.squeeze(Eq[...,0]),
numpy.squeeze(Eq[...,1]), freq, vcoord,
projection, cmplx_rep, vfname='q-chan:'+self.radFFp.name)
def plot_polcomp_dynspec(tims, frqs, jones):
"""Plot dynamic power spectra of each polarization component."""
#fig, (ax0, ax1) = plt.subplots(nrows=2)
p_ch = numpy.abs(jones[:,:,0,0].squeeze())**2+numpy.abs(jones[:,:,0,1].squeeze())**2
q_ch = numpy.abs(jones[:,:,1,1].squeeze())**2+numpy.abs(jones[:,:,1,0].squeeze())**2
ftims=matplotlib.dates.date2num(tims)
dynspecunit = 'flux arb.'
# In dB
dBunit = False
if dBunit:
p_ch = 10* | numpy.log10(p_ch) | numpy.log10 |
import math
import Terminal
import numpy as np
type_factor_sigma = {'UWB' : 0, 'BlueTooth' : 0.3}
def get_distance(terminalA : 'Terminal.CartesianPoint', terminalB : 'Terminal.CartesianPoint'):
#print(terminalA._x, terminalB._x, terminalA._y, terminalB._y)
dis = math.sqrt( ((terminalA._x-terminalB._x)**2)+((terminalA._y-terminalB._y)**2) )
print("[DATA] Get Distance :" + terminalA._terminal_name + ": " , terminalA._x , terminalA._y , terminalB._terminal_name + ": " , terminalB._x , terminalB._y , "Dis: ", dis)
return dis
def get_distance_sq(terminalA : 'Terminal.CartesianPoint', terminalB : 'Terminal.CartesianPoint'):
#print(terminalA._x, terminalB._x, terminalA._y, terminalB._y)
print("[DATA] Get Squared Distance :" + terminalA._terminal_name + ": " , terminalA._x , terminalA._y , terminalB._terminal_name + ": " , terminalB._x , terminalB._y , "Dis: ", ((terminalA._x-terminalB._x)**2)+((terminalA._y-terminalB._y)**2))
return ((terminalA._x-terminalB._x)**2)+((terminalA._y-terminalB._y)**2)
def get_distance_from_origin(terminal : 'Terminal.CartesianPoint'):
return math.sqrt((terminal._x**2)+(terminal._y**2))
def get_distance_from_origin_by_coord(x : 'float', y : 'float'):
return math.sqrt((x**2)+(y**2))
def get_distance_by_set(set_a : 'np.array', set_b : 'np.array') -> 'float':
print("[PARM] Get Distance By ", set_a, set_b)
return math.sqrt((set_a[0]-set_b[0])**2 + (set_a[1]-set_b[1])**2) #done
def get_sigma(distance, terminal_type = 'UWB') -> 'float' :
print("[PARM] Get Sigma ", np.sqrt((1.0 + (distance / 7) ** 2) * (1 + type_factor_sigma[terminal_type]) + 1 * type_factor_sigma[terminal_type]), "By ", terminal_type, " In ", distance)
return np.sqrt((1.0 + (distance / 7) ** 2) * (1 + type_factor_sigma[terminal_type]) + 1 * type_factor_sigma[terminal_type])
def get_distance_from_origin_by_set(set : 'np.array'):
return math.sqrt((set[0]) ** 2 + (set[1]** 2))
def get_distance_from_weak_terminal_by_coord(x : 'float', y : 'float', weak_terminal :'Terminal.CartesianPoint') -> 'float':
return math.sqrt(((x-weak_terminal._x)**2)+((y-weak_terminal._y)**2))
def get_modified_distance_by_set_and_type(normal_set : 'np.array', terminal_type : "str" = "UWB"):
dis_set = np.zeros((len(normal_set), 1))
mod_set = np.zeros((len(normal_set), 1))
for x in range(normal_set.shape[0]):
dis_set[x] = get_distance_from_origin_by_coord(normal_set[x, 0], normal_set[x, 1])
#dis_set[x] = math.sqrt((normal_set[x, 0] - terminal._x)**2 + (normal_set[x, 1] - terminal._y)**2)
mod_set[x] = round(float(dis_set[x]*(1-(get_sigma(dis_set[x], terminal_type)**3/15))), 2)
print("[DATA] Get Mod Distance By Point {2}, {3} Distance: {0}, Mod Distance: {1}".format(dis_set[x], mod_set[x], normal_set[x, 0], normal_set[x, 1]))
return mod_set, dis_set
def get_modified_coord_by_nor_set_and_terminal(distribute_set : 'np.array', terminal :'Terminal.CartesianPoint'):
ratio_set = get_modified_distance_by_set_and_type(distribute_set)[0]
mod_coord_set = np.zeros((len(distribute_set), 2))
for x in range(distribute_set.shape[0]):
distance = get_distance_from_origin_by_coord(distribute_set[x, 0], distribute_set[x, 1])
ratio_set[x] /= distance
mod_coord_set[x, 0] = ((distribute_set[x, 0] - terminal._x) * ratio_set[x]) + terminal._x
mod_coord_set[x, 1] = ((distribute_set[x, 1] - terminal._y) * ratio_set[x]) + terminal._y
print("[DATA] Raw Point:({0}, {1}), Distance: {2}, Mod Ratio: {3}, Mod Coordinate: ({4}, {5})".format(distribute_set[x, 0], distribute_set[x, 1], distance, ratio_set[x], mod_coord_set[x, 0], mod_coord_set[x, 1]))
return mod_coord_set
def get_ideal_coord_by_set(set : 'np.array') -> 'np.array':
ideal_set = np.zeros((2, 1))
x_sum = 0
y_sum = 0
for x in range(set.shape[0]):
x_sum += set[x, 0]
y_sum += set[x, 1]
ideal_set[0] = x_sum / len(set)
ideal_set[1] = y_sum / len(set)
print("[DATA] Ideal Point Set Generated: ({0}, {1})".format(ideal_set[0], ideal_set[1]))
return ideal_set
def get_shift_coord_by_radius_and_degree(coord_set : 'np.array', radius : 'float' = 1, angle : 'float' = 0, origin : 'np.array' = None) -> 'np.array':
# return ([round(math.cos(np.arctan(coord_set[1]/coord_set[0]) + math.radians(angle)), 4) * radius,
# round(math.cos(np.arctan(coord_set[0, 1]/coord_set[0, 0]) + math.radians(angle)), 4) * radius])
origin = ([0, 0]) if origin is None else origin
print("[PARM] Get Shift Point By Radius ", radius, " And Degree ", angle, " In Coordination ", coord_set, " From ", origin)
return ([round(math.cos( | np.arctan2(coord_set[1] - origin[1], coord_set[0] - origin[0]) | numpy.arctan2 |
import copy
import numpy as np
import numpy.random as rng
from .loading import *
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True, compression_bias_min=1., verbose=True,\
compression_scatter=0., moreSamples=1., compression_assert=None, single_precision=False, rng_seed=None):
if rng_seed is not None:
rng.seed(rng_seed)
if len(loaded) == 0:
levels_orig = np.atleast_2d(my_loadtxt("levels.txt"))
sample_info = np.atleast_2d(my_loadtxt("sample_info.txt"))
else:
levels_orig, sample_info = loaded[0], loaded[1]
# Remove regularisation from levels_orig if we asked for it
if compression_assert is not None:
levels_orig[1:,0] = -np.cumsum(compression_assert*np.ones(levels_orig.shape[0] - 1))
cut = int(cut*sample_info.shape[0])
sample_info = sample_info[cut:, :]
if plot:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(sample_info[:,0], "k")
plt.xlabel("Iteration")
plt.ylabel("Level")
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels_orig[:,0]), "k")
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='g')
plt.axhline(-np.log(10.), color='g', linestyle="--")
plt.ylim(top=0.05)
plt.subplot(2,1,2)
good = np.nonzero(levels_orig[:,4] > 0)[0]
plt.plot(levels_orig[good,3]/levels_orig[good,4], "ko-")
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
# Convert to lists of tuples
logl_levels = [(levels_orig[i,1], levels_orig[i, 2]) for i in range(0, levels_orig.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in range(0, sample_info.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = | np.zeros((sample_info.shape[0], numResampleLogX)) | numpy.zeros |
"""
differential_correlation.py
Code for performing differential correlation with single cell RNA data.
"""
from statsmodels.stats.multitest import multipletests
from scipy.stats import pearsonr, spearmanr
import numpy as np
def _differential_correlation_statistic(x1, y1, x2, y2, which='spearman'):
""" Computes a differential correlation statistic for 4 1d samples. """
corr_1, pval_1 = correlation(x1, y1, which=which)
corr_2, pval_2 = correlation(x2, y2, which=which)
return (np.arctanh(corr_1) - np.arctanh(corr_2))/(np.sqrt(np.absolute((1/len(x1)) - (1/len(x2)))))
def _null_distribution(x1, y1, x2, y2, which='spearman', num_null=100):
""" Generates null p values by shuffling the labels. """
n_1, n_2 = len(x1), len(x2)
x_all, y_all = np.concatenate([x1, x2]), np.concatenate([y1, y2])
null_stats = []
for i in range(num_null):
idx1 = np.random.permutation(n_1+n_2)[:n_1]
idx2 = np.random.permutation(n_1+n_2)[:n_2]
null_stats.append(
_differential_correlation_statistic(
x_all[idx1],
y_all[idx1],
x_all[idx2],
y_all[idx2], which=which))
return np.array(null_stats)
def correlation(x, y, which='spearman'):
""" Measures a metric of correlation between two 1d samples. """
if which == 'spearman':
return spearmanr(x, y)
elif which == 'pearson':
return pearsonr(x, y)
else:
raise 'Not yet implemented'
def differential_correlation(x1, y1, x2, y2, which='spearman', num_null=200, test='!='):
"""
Performs differential correlation between 4 1d samples, 2 in each condition.
Returns a p-value based on random shuffling of data.
"""
statistic = _differential_correlation_statistic(x1, y1, x2, y2, which=which)
null_stats = _null_distribution(x1, y1, x2, y2, which=which, num_null=num_null)
if test == '!=':
return statistic, (np.absolute(null_stats) > | np.absolute(statistic) | numpy.absolute |
import numpy as np
import os
import multiprocessing
from PIL import Image
from skimage import io
pi = np.pi
"""
Run this program to perform the reconstruction of a MCRICM stack of images.
The stack should contain the different images grouped by color and increasing INA.
The simulations should previously done with Main_simu.py.
"""
###################################################
##################PARAMETERS#######################
###################################################
#The name of the MCRICM stack
filename = 'MCRICM_image_small.tif'
#List of refractive indices for the cytosplasm in green
#Decimal part only for simplicity (i.e. 340 -> 1.340)
ninG = [340, 345, 350, 355, 360, 365, 370, 375, 380, 385, 390, 395, 400]
#List of INA
#Decimal part only for simplicity (i.e. 45 -> 0.45)
INA = [50, 56, 68, 86, 105]
#Folder containing the simulations
SimuFolder = '../Simulation/Simu'
#Number of processors used
#If multiprocessing does not work, try NbProc = 1
NbProc = 4
#Define min, max, step of d and h in nm
#Use the same parameters as your simulations for simplicity
dmin = 0; dmax = 1000; dstep = 10 #d range
hmin = 0; hmax = 500; hstep = 2 #h range
#Number of different illuminations
NbIllum = 3*len(INA)
#Rescaled h and d for calculations
imax_d = int((dmax-dmin)/dstep)
jmax_h = int((hmax-hmin)/hstep)
def worker(rri_sub, q, out_q):
"""
Function called for multiprocessing.
Reconstruct the image stripe by stripe.
"""
outdict = {}
hh_dict_sub, dd_dict_sub, chi2_dict_sub = LoopNin(rri_sub)
hh_sub, dd_sub, chi2_sub, nin_sub \
= BestSol(hh_dict_sub, dd_dict_sub, chi2_dict_sub)
outdict = [hh_sub, dd_sub, chi2_sub, nin_sub, hh_dict_sub, dd_dict_sub, chi2_dict_sub]
out_q.put((q, outdict))
def Initialize(ninstring):
"""
Create the maps of reflectivity vs conditions (i.e. illuminations),
through selective extraction of the simulations previously done.
"""
nmax = 3*len(INA)
color_list = len(INA)*['B']+len(INA)*['G']+len(INA)*['R']
I_vs_cd_Map = np.ndarray( shape = (imax_d, jmax_h, nmax))
for n in range(nmax):
folder = SimuFolder + '/INA' + str(INA[n%len(INA)]) + '_nin' + str(ninstring)
I_vs_cd_Map[:,:,n] = np.load(folder+'/SimuMap'+color_list[n]+'.npy')
return I_vs_cd_Map
def LoopNin(rri_dict):
"""
Reconstruction for each nin independently (i.e. fixes nin, find best h and d).
"""
nmax = len(ninG)
#Each elem at position n contains the reconstruction for the index ninG[n]
#For h
hh_dict = {}
#For d
dd_dict = {}
#Corresponding chi2
chi2_dict = {}
for n in range(nmax):
I_vs_cd_Map = Initialize(ninG[n])
ddtemp, hhtemp, chi2temp\
= BestSol_FixNin(I_vs_cd_Map, rri_dict, ninG[n])
#Rescale the results
dd_dict[n] = ddtemp*dstep
hh_dict[n] = hhtemp*hstep
chi2_dict[n] = chi2temp
return hh_dict, dd_dict, chi2_dict
def BestSol(hh_dict, dd_dict, chi2_dict):
"""
Find best nin within the dictionnaries of best solutions for each nin,
by minimizing chi2 pixel by pixel.
"""
nmax = len(hh_dict)
imax = np.shape(hh_dict[0])[0]
jmax = np.shape(hh_dict[0])[1]
chi2_Map, hh_Map, dd_Map = (np.ndarray( shape = (imax, jmax, nmax)) for i in range(3))
nin_loc = [ninG[i%len(ninG)] for i in range(nmax)]
for n in range(nmax):
chi2_Map[:,:,n] = chi2_dict[n]
hh_Map[:,:,n] = hh_dict[n]
dd_Map[:,:,n] = dd_dict[n]
nin = np.zeros((imax, jmax))
hh, dd, chi2 = (np.ndarray( shape = (imax, jmax)) for i in range(3))
for i in range(0, imax):
for j in range(0, jmax):
V_minloc = chi2_Map[i,j,:].argmin()
nin[i,j] = 1+0.001*nin_loc[V_minloc]
hh[i,j] = hh_Map[i,j,V_minloc]
dd[i,j] = dd_Map[i,j,V_minloc]
chi2[i,j] = chi2_Map[i,j,V_minloc]
return hh, dd, chi2, nin
def BestSol_FixNin(I_vs_cd_Map, rri_dict, nin_n):
"""
Find the best solution, pixel by pixel, for a fix nin.
"""
imax = np.shape(rri_dict[0])[0]
jmax = np.shape(rri_dict[0])[1]
nmax = 3*len(INA)
dd_fixnin, hh_fixnin, chi2_fixnin = (np.zeros((imax, jmax)) for i in range(3))
#Store the MCRICM images in one array
rr = [np.array(rri_dict[n]) for n in range(nmax)]
#Reconstruction pixel by pixel
for i in range(imax):
for j in range(jmax):
I_vs_cd = [rr[n][i,j] for n in range(nmax)]
ImapDiff_illum = np.divide(np.square(I_vs_cd_Map-I_vs_cd),I_vs_cd_Map)
#ImapDiff is the map of Chi2 for all conditions. The min of ImapDiff gives the best solution.
ImapDiff = np.sum(ImapDiff_illum, 2)
ImapDiff = | np.round(ImapDiff, 8) | numpy.round |
#!usr/bin/env python2
import numpy as np
from . import setup
class Qsys:
"""
Base Class for Quantum System simulation
Constructor Parameters:
-----------------------
dim (int)
psi_naught (list)
delta_t (float)
omega (float)
Attributes:
-----------
dim: integer
dimension of the system
current_state: list_like
current wavefunction for the system
time: float
current time for system's evolution
spectrum: list_like ###STILL NEED TO REIMPLEMENT###
relative weights for pitches
chord1: list_like
first chord in oscillation
chord2: list_like
second chord in oscillation
omega: float
frequency for oscillation
hamiltonian: array_like
matrix representation of hamiltonian operator; optional kwarg to override spectrum generation of hamiltonian
"""
def __init__(self, argDim, argPsi_naught, argDelta_t, argOmega, argBareSpectrum, argStableSpectrum, argUnstableSpectrum, argRoot1, argRoot2, argHamiltonian = None):
self.bareSpectrum = argBareSpectrum
self.dim = argDim
self.current_state = argPsi_naught
self.current_probs = self.probabilities()
self.time = 0.
self.delta_t = argDelta_t
self.spectrum = np.array(argStableSpectrum)
self.unstable = np.array(argUnstableSpectrum)
#Normalize spectrum if it isn't normalized
self.spectrum = self.spectrum / np.sum(self.spectrum)
# self.atom = setup.atom(self.spectrum)
self.recombinedSpectrum = self.spectrum + self.unstable
self.recombinedSpectrum = self.recombinedSpectrum / np.sum(self.recombinedSpectrum)
self.omega = argOmega
self.root1 = argRoot1
self.root2 = argRoot2
self.lastOutput = 0
self.lastKey = 0
self.conditionalProbs = setup.conditional_probs(self.recombinedSpectrum)
self.POVMs = setup.POVMs(self.spectrum, self.conditionalProbs)
if argHamiltonian == None:
self.hamiltonian = setup.tune_H(self.root1, self.root2, self.bareSpectrum, self.omega)
else:
self.hamiltonian = argHamiltonian
self.conditional_probabilities = setup.conditional_probs(self.spectrum)
self.POVMs = setup.POVMs(self.spectrum, self.conditional_probabilities)
def normalize(self, state):
"""
Takes current state and normalizes
Parameters:
-----------
state: complex vector
state to normalize
"""
norm = np.linalg.norm(state)
return state / norm
def schrodinger(self, psi, t):
"""
Schrodinger Equation
Parameters:
-----------
psi: vector
initial wavefunction to evolve
t: scalar, np.float
time for evaluation
Returns:
--------
vector
differential time step for wavefunction evolution
"""
return 1 / (1j) * np.dot(self.hamiltonian, psi)
def rk4_step(self, u, t, du, delta_t):
"""
Implementation of the Runge-Kutta 4th order approximation for solving a system of coupled ODEs
Parameters:
-----------
u: array-like
initial values
delta_t: float
time step size
t: float
current time
du: lambda
vector-valued function for differential equation
Returns:
--------
tuple of floats
vector of values for the function at the next time step
"""
K1 = delta_t * du(u, t)
K2 = delta_t * du(u + K1 / 2, t + delta_t / 2)
K3 = delta_t * du(u + K2 / 2, t + delta_t / 2)
K4 = delta_t * du(u + K3, t + delta_t)# 4 intermediate approximations
return u + (K1 + 2 * K2 + 2 * K3 + K4) / 6
def probabilities(self):
"""
Returns mod^2 of the current state. This function normalizes probabilities
"""
probs = np.absolute(self.current_state)
norm = np.sum(probs) # Calculates the norm of the current wavefunction
return probs / norm
def get_probs(self):
return self.current_probs
def measure(self, key):
"""
Measures current state and initiates collapse
"""
#Randomly Sample to find "internal state"
wf_probability = self.probabilities()
internal_state = self.sample(wf_probability) #This is a hidden state--The User never sees or knows this
conditional_probability = self.conditional_probabilities[key, internal_state, :]
self.lastOutput = self.sample(conditional_probability)
self.lastKey = key
#Now, need to collapse state and output pitch
self.collapse()
return self.output()
def collapse(self):
POVM = self.POVMs[self.lastOutput][self.lastKey]
new_state = np.dot(np.sqrt(POVM), self.current_state) / np.dot(self.current_state, np.dot(POVM, self.current_state))
self.current_state = new_state
def sample(self, probabilities):
"""
Randomly sample over a set of probabilities using a threshold calculation. Returns the location of the sampled state in ordered pitch space
"""
thresholds = np.array([np.sum(probabilities[:i]) for i in range(1, self.dim+1)])
r = | np.random.rand() | numpy.random.rand |
from numpy.linalg import norm as _norm
import numpy as _np
r"""
Pore-scale models related to topology of the network.
"""
def coordination_number(target):
r"""
Find the number of neighbors for each pore
"""
network = target.network
N = network.num_neighbors(pores=network.Ps, flatten=False)
return N
def pore_to_pore_distance(target):
r"""
Find the center to center distance between each pair of pores
"""
network = target.project.network
cn = network['throat.conns']
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
values = _norm(C1 - C2, axis=1)
return values
def distance_to_nearest_neighbor(target):
r"""
Find the distance between each pore and its closest topological neighbor
"""
network = target.project.network
cn = network['throat.conns']
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
D = _norm(C1 - C2, axis=1)
im = network.create_incidence_matrix()
values = _np.ones((network.Np, ))*_np.inf
_np.minimum.at(values, im.row, D[im.col])
return _np.array(values)
def distance_to_furthest_neighbor(target):
r"""
Find the distance between each pore and its furthest topological neighbor
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
cn = network['throat.conns'][throats]
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
D = _norm(C1 - C2, axis=1)
im = network.create_incidence_matrix()
values = _np.zeros((network.Np, ))
_np.maximum.at(values, im.row, D[im.col])
return | _np.array(values) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = | N.array([1,1,2]) | numpy.array |
import numpy as np
from random import random, uniform
import matplotlib.pyplot as plt
def ackley(x, a=20, b=0.2, c=2*np.pi):
# n dimensional
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
n = len(x)
s1 = sum(x**2)
s2 = sum(np.cos(c * x))
return -a*np.exp(-b*np.sqrt(s1 / n)) - np.exp(s2 / n) + a + np.exp(1)
def bukin(x):
# 2 dimensional
return 100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)
def griewank(x):
# n dimensional
dim = len(x)
j = np.arange(1, dim + 1)
sq_list = [i ** 2 for i in x]
s = sum(sq_list)
p = np.prod(np.cos(x / np.sqrt(j)))
return s / 4000 - p + 1
def rosenbrock(x):
# n dimensional
x = np.asarray_chkfinite(x)
x0 = x[:-1]
x1 = x[1:]
return sum(100 * (x1 - x0 ** 2) ** 2 + (x0 - 1) ** 2)
def zakharov(x):
# n dimensional
x = np.asarray_chkfinite(x)
dim = len(x)
j = np.arange(1, dim + 1)
s1 = sum(0.5 * j * x)
return sum(x ** 2) + s1 ** 2 + s1 ** 4
def levy(x):
# n dimensional
x = np.asarray_chkfinite(x)
w = 1 + (x - 1) / 4
return (np.sin(np.pi * w[0])) ** 2 \
+ sum((w[:-1] - 1) ** 2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1) ** 2)) \
+ (w[-1] - 1) ** 2 * (1 + (np.sin(2 * np.pi * w[-1]) ** 2))
def rastrigin(x):
# n dimensional
x = np.asarray_chkfinite(x)
dim = len(x)
return 10 * dim + sum(x ** 2 - 10 * np.cos(2 * np.pi * x))
def schwefel(x):
# n dimensional
x = np.asarray_chkfinite(x)
dim = len(x)
return 418.9829 * dim - sum(x * np.sin(np.sqrt(np.abs(x))))
def sphere(x):
# n dimensional
x = np.asarray_chkfinite(x)
return sum(x ** 2)
def sum_diff_powers(x):
# n dimensional
x = np.asarray_chkfinite(x)
dim = len(x)
j = np.arange(1, dim + 1)
return sum(np.abs(x) ** (j + 1))
def sum_of_squares(x):
# n dimensional
x = | np.asarray_chkfinite(x) | numpy.asarray_chkfinite |
from fractions import Fraction
import numpy as np
from util import cvimage as Image
from util.richlog import get_logger
from . import imgops
from . import resources
from . import common
logger = get_logger(__name__)
def check_main(img):
vw, vh = common.get_vwvh(img.size)
gear1 = img.crop((3.148 * vh, 2.037 * vh, 9.907 * vh, 8.796 * vh)).convert('L')
gear2 = resources.load_image_cached('main/gear.png', 'L')
gear1, gear2 = imgops.uniform_size(gear1, gear2)
result = imgops.compare_ccoeff(gear1, gear2)
# result = np.corrcoef(np.asarray(gear1).flat, np.asarray(gear2).flat)[0, 1]
logger.logimage(gear1)
logger.logtext('ccoeff=%f' % result)
return result > 0.9
def get_ballte_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
(61.120 * vw, 16.944 * vh), (82.213 * vw, 15.139 * vh), (82.213 * vw, 37.083 * vh), (61.120 * vw, 38.333 * vh))
elif aspect == Fraction(18, 9):
return (
(64.693 * vw, 16.852 * vh), (82.378 * vw, 14.352 * vh), (82.378 * vw, 37.500 * vh), (64.693 * vw, 37.963 * vh))
else:
return imgops.match_feature(resources.load_image_cached('main/terminal.png', 'L'), img).template_corners
def get_task_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((55.602 * vw, 75.880 * vh)), np.array((70.367 * vw, 78.241 * vh)),
np.array((70.367 * vw, 91.991 * vh)), np.array((55.602 * vw, 88.518 * vh)))
elif aspect == Fraction(18, 9):
return (np.array((58.489 * vw, 76.296 * vh)), np.array((72.008 * vw, 78.611 * vh)),
np.array((72.008 * vw, 92.685 * vh)), np.array((58.489 * vw, 89.167 * vh)))
else:
return imgops.match_feature(resources.load_image_cached('main/quest.png', 'L'), img).template_corners
# 以下几条用于访问好友基建
def get_friend_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((22.734*vw, 76.667*vh)), np.array((33.203*vw, 76.667*vh)), np.array((33.203*vw, 82.083*vh)), np.array((22.734*vw, 82.083*vh)))
else:
return [x[0] for x in imgops.match_feature(resources.load_image_cached('main/friends.png', 'L'), img).template_corners]
def get_friend_list(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((1.484*vw, 25.694*vh)), np.array((16.797*vw, 25.694*vh)), np.array((16.797*vw, 36.111*vh)), np.array((1.484*vw, 36.111*vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_friend_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((74.065*vw, 17.134*vh)), np.array((79.967*vw, 17.134*vh)), np.array((79.967*vw, 28.065*vh)), np.array((74.065*vw, 28.065*vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_next_friend_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((85.625*vw, 79.444*vh)), np.array((99.531*vw, 79.444*vh)), np.array((99.531*vw, 93.750*vh)), np.array((85.625*vw, 93.750*vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back_my_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((72.266*vw, 81.528*vh)), np.array((88.750*vw, 81.528*vh)), np.array((88.750*vw, 92.500*vh)), np.array((72.266*vw, 92.500*vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 点击基建主界面右上角的提示(以凸显一键收取)
def get_my_build_task(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((92.031*vw, 10.417*vh)), np.array((99.688*vw, 10.417*vh)), np.array((99.688*vw, 15.417*vh)), np.array((92.031*vw, 15.417*vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 一键收取制造站的物品
def get_my_build_task_clear(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = common.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((12.500*vw, 91.667*vh)), | np.array((16.797*vw, 91.667*vh)) | numpy.array |
"""
miscelallaneous functions and classes to extract connectivity metrics
Author: <NAME>, PhD [<EMAIL>], https://twitter.com/davemomi
"""
import numpy as np
import pandas as pd
from math import pi
import glob
import seaborn as sns
import matplotlib.pyplot as plt
import bct as bct
class Connectivity_metrics(object):
def __init__(self, matrices_files, net_label_txt, labels_dic):
self.matrices_files = matrices_files
self.net_label_txt = net_label_txt
self.labels_dic = labels_dic
def nodes_overall_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing the overall connectivity of each node
regardless of network affiliation
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node regardless
of network affiliation
'''
self.nodes_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for nodes in range(self.matrix.shape[0]):
self._node_conn = np.sum(self.matrix[nodes])
self.nodes_conn.append(self._node_conn)
self.nodes_conn = np.array(self.nodes_conn)
self.nodes_conn = self.nodes_conn.reshape(len(self.matrices_files), self.matrix.shape[0])
return self.nodes_conn
def node_inner_conn(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None):
'''
computing the connectivity of each node with its own network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node with its own
network
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for network in net:
for nodes in self.labels_dic[network]:
self.sub_matrix =self.matrix[nodes]
self.streamlines_sum = np.sum(self.sub_matrix[self.labels_dic[network]])
self.all_conn[subj, nodes] = self.streamlines_sum/self.labels_dic[network].shape[0]
return self.all_conn
def node_outer_conn(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None):
'''
computing the connectivity of each node with the other nodes
which don't belong to the same network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node with regions that
are outsite the node's network
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
self.nodes_ranges = np.arange(len(self.labels_dic['nodes']))
for network in net:
self.outer_idx = np.setdiff1d(self.nodes_ranges, self.labels_dic[network])
for nodes in self.outer_idx:
self.sub_matrix =self.matrix[nodes]
self.streamlines_sum = np.sum(self.sub_matrix[self.outer_idx])
self.all_conn[subj, nodes] = self.streamlines_sum/self.outer_idx.shape[0]
return self.all_conn
def node_ranking(self, sbj_number, nodes_number, networks_number,
make_symmetric=True, upper_threshold=None, lower_threshold=None):
'''
computing how much each node is connected with the each network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
networks_number: int|
number of networks
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy a 3D array (dim number of subject X number of network X number of network)
representing the connectivity of each node with all the networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number, networks_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for nodes in range(self.matrix.shape[0]):
self.node_conn = self.matrix[nodes]
for network in net:
self.streamlines_sum = np.sum(self.node_conn[self.labels_dic[network]])
self.all_conn[subj, nodes, net.index(network)] = self.streamlines_sum/self.labels_dic[network].shape[0]
return self.all_conn
def net_inner_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing the how much each network is connected with itself
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of network)
representing the connectivity of each network with itself
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for network in net:
self.subj_matrix = self.matrix[self.labels_dic[network]]
self.subj_matrix = self.subj_matrix[:,self.labels_dic[network]]
self.streamlines_sum = np.sum( | np.sum(self.subj_matrix) | numpy.sum |
from dataclasses import replace
import numpy as np
import pandas as pd
from sklearn import metrics, ensemble
from datasets.make_datasets import Dataset, val_split, Food101
def gradient_boosting_results(ds: Dataset, ood: Dataset) -> dict:
from conf import conf, normal
result_collection = dict()
# load model
conf = replace(conf, strategy=normal, in_distribution_data=ds, out_of_distribution_data=None)
model = conf.make_model()
model.load_weights(conf.checkpoint_filepath)
# true labels and predictions
y_true = np.hstack([y.numpy() for (x, y, w) in ds.load()])
pred_ds = model.predict(ds.load(), verbose=1)
pred_ood = model.predict(ood.load(), verbose=1)
# fit supervised classifier
val_ds_ = replace(ds, split=val_split).load()
val_ood_ = Food101(split=val_split).load()
X = model.predict(val_ds_, verbose=1)
y = [0] * X.shape[0]
X_out = model.predict(val_ood_, verbose=1)
y += [1] * X_out.shape[0]
X = np.vstack((X, X_out))
clf = ensemble.GradientBoostingClassifier(random_state=29)
clf.fit(X, y)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, X, y, cv=5, n_jobs=5)
print(f'Gradient Boosting: {ds.__class__.__name__} vs. {ood.__class__.__name__}')
print(f'Average CV Accuracy {ds.__class__.__name__} vs. Food101: {scores.mean().round(2)}. Full scores:', scores)
class_error = 1. - metrics.accuracy_score(y_true, pred_ds.argmax(1))
result_collection['classification error'] = class_error
print('Classification Error on dataset:', class_error)
pred = np.vstack((pred_ood, pred_ds))
ood_labels = [1] * len(pred_ood) + [0] * len(pred_ds)
r = pd.DataFrame({'pred': clf.predict(pred), 'scores': clf.predict_proba(pred)[:, 1]})
ood_error = 1. - metrics.accuracy_score(ood_labels, r.pred)
print('OOD Error:', ood_error)
result_collection['OOD error'] = ood_error
scores = r.scores
ood_auc = metrics.roc_auc_score(ood_labels, scores)
result_collection['OOD AUC'] = ood_auc
print('OOD Area under Curve:', ood_auc)
# comparison of anomaly score and misclassification
erroneous_prediction = y_true != pred_ds.argmax(1)
ood_labels = np.array(ood_labels)
clf_scores = r[ood_labels == 0]
try:
auc = metrics.roc_auc_score(erroneous_prediction, clf_scores['scores'].values)
except ValueError:
auc = -123456789
result_collection['AUC anomaly score and misclassification'] = auc
def fpr95(y_true, y_pred):
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)
ix = | np.argwhere(tpr >= 0.95) | numpy.argwhere |
'''
<NAME>
15863
Exercise 8
'''
from math import exp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
blur = pd.read_csv("blur.txt", sep=' ', header=None)
def G(x, y):
return exp(-(x ** 2 + y ** 2) / (2 * 25 ** 2))
gauss = np.ones((1024, 1024))
for i in range(1024):
for j in range(1024):
gauss[i - 512][j - 512] = G(i - 512, j - 512)
plt.imshow(blur)
plt.title('Blur')
plt.figure()
plt.imshow(gauss)
plt.title('Gauss')
plt.figure()
blur_fft = | np.fft.rfft2(blur) | numpy.fft.rfft2 |
## ############################################################### ##
## RC_FIAP (Reinforced Concrete Frame Inelastic Analysis Platform) ##
## ##
## Developed by: ##
## <NAME> (<EMAIL>) ##
## <NAME> (<EMAIL> ##
## RCFIAPMain.py : this is the main script that calls ##
## GUIFrameNonLinearACI.py : graphical environment ##
## mplwidget.py : cript to help plot the plastic hinge projector ##
## ############################################################### ##
import sys
from math import pi, sqrt, ceil, floor
from scipy import interpolate
import openseespy.opensees as op
from PyQt5.QtWidgets import *
# from PyQt5.uic import loadUi
from PyQt5.QtGui import QDoubleValidator, QIntValidator
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QApplication
from GUIFrameNonLinearACI1d import *
import numpy as np # load the numpy module, calling it np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors, colorbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
matplotlib.use("Qt5Agg")
import pandas as pd
import os
import subprocess
import runpy
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Definition of units
m = 1. # define basic units -- output units
kN = 1. # define basic units -- output units
sec = 1. # define basic units -- output units
mm = m / 1000. # define engineering units
cm = m / 100.
N = kN / 1000.
MPa = N / mm ** 2
GPa = MPa * 1000
m2 = m ** 2 # m^2
m3 = m ** 3 # m^3
m4 = m ** 4 # m^4
inch = cm * 2.54
ft = 12. * inch
g = 9.81 * m / sec ** 2 # gravitational acceleration
kip = 4.448 * kN
ksi = kip / inch ** 2
psi = ksi / 1000.
lbf = psi * inch ** 2 # pounds force
pcf = lbf / ft ** 3 # pounds per cubic foot
psf = lbf / ft ** 3 # pounds per square foot
in2 = inch ** 2 # inch^2
in4 = inch ** 4 # inch^4
GConc = 24. * kN / m ** 3 # Specific gravity of concrete
cbar = False
np.set_printoptions(precision=6)
class RegistroBeams:
def __init__(self, tbl_data_design_beams, id_, b, h, L_As_top, L_As_bot, L_Leg_n, L_Sstirrup, R_As_top, R_As_bot,
R_Leg_n, R_Sstirrup):
fila = tbl_data_design_beams.rowCount()
tbl_data_design_beams.insertRow(fila)
self.spx_id = QLineEdit(tbl_data_design_beams) # setattr(self, 'spx_id', QLineEdit(tbl_data_design_beams))
self.spx_id.setValidator(QIntValidator(0, 100))
self.spx_id.setText(f'B{id_}')
# self.spx_id.setStyleSheet('border-top: none; border-right: none; border-bottom: none')
# self.spx_id.setFont(('Times', 10))
self.spx_b = QLineEdit(tbl_data_design_beams)
self.spx_b.setValidator(QIntValidator(20, 1000))
self.spx_b.setText('{:d}'.format(int(b)))
# self.spx_b.setStyleSheet('border-top: none; border-right: none; border-bottom: none')
self.spx_h = QLineEdit(tbl_data_design_beams)
self.spx_h.setValidator(QIntValidator(20, 1000))
self.spx_h.setText('{:d}'.format(int(h)))
# self.spx_h.setStyleSheet('border-top: none; border-right: none; border-bottom: none')
self.spx_L_As_top = QLineEdit(tbl_data_design_beams)
self.spx_L_As_top.setValidator(QDoubleValidator(2., 400., 2))
self.spx_L_As_top.setText('{:.2f}'.format(L_As_top))
# self.spx_L_As_top.setStyleSheet('border-top: none; border-right: none; border-bottom: none')
self.spx_L_As_bot = QLineEdit(tbl_data_design_beams)
self.spx_L_As_bot.setValidator(QDoubleValidator(2., 400., 2))
self.spx_L_As_bot.setText('{:.2f}'.format(L_As_bot))
self.spx_L_Leg_n = QLineEdit(tbl_data_design_beams)
self.spx_L_Leg_n.setValidator(QIntValidator(2, 10))
self.spx_L_Leg_n.setText('{:d}'.format(int(L_Leg_n)))
self.spx_L_Sstirrup = QLineEdit(tbl_data_design_beams)
self.spx_L_Sstirrup.setValidator(QIntValidator(4, 30))
self.spx_L_Sstirrup.setText('{:d}'.format(int(L_Sstirrup)))
self.spx_R_As_top = QLineEdit(tbl_data_design_beams)
self.spx_R_As_top.setValidator(QDoubleValidator(2., 400., 2))
self.spx_R_As_top.setText('{:.2f}'.format(R_As_top))
self.spx_R_As_bot = QLineEdit(tbl_data_design_beams)
self.spx_R_As_bot.setValidator(QDoubleValidator(2., 400., 2))
self.spx_R_As_bot.setText('{:.2f}'.format(R_As_bot))
self.spx_R_Leg_n = QLineEdit(tbl_data_design_beams)
self.spx_R_Leg_n.setValidator(QIntValidator(2, 10))
self.spx_R_Leg_n.setText('{:d}'.format(int(R_Leg_n)))
self.spx_R_Sstirrup = QLineEdit(tbl_data_design_beams)
self.spx_R_Sstirrup.setValidator(QIntValidator(4, 30))
self.spx_R_Sstirrup.setText('{:d}'.format(int(R_Sstirrup)))
tbl_data_design_beams.setCellWidget(fila, 0, self.spx_id)
tbl_data_design_beams.setCellWidget(fila, 1, self.spx_b)
tbl_data_design_beams.setCellWidget(fila, 2, self.spx_h)
tbl_data_design_beams.setCellWidget(fila, 3, self.spx_L_As_top)
tbl_data_design_beams.setCellWidget(fila, 4, self.spx_L_As_bot)
tbl_data_design_beams.setCellWidget(fila, 5, self.spx_L_Leg_n)
tbl_data_design_beams.setCellWidget(fila, 6, self.spx_L_Sstirrup)
tbl_data_design_beams.setCellWidget(fila, 7, self.spx_R_As_top)
tbl_data_design_beams.setCellWidget(fila, 8, self.spx_R_As_bot)
tbl_data_design_beams.setCellWidget(fila, 9, self.spx_R_Leg_n)
tbl_data_design_beams.setCellWidget(fila, 10, self.spx_R_Sstirrup)
tbl_data_design_beams.setColumnWidth(0, 40)
tbl_data_design_beams.setColumnWidth(1, 40)
tbl_data_design_beams.setColumnWidth(2, 40)
tbl_data_design_beams.setColumnWidth(3, 60)
tbl_data_design_beams.setColumnWidth(4, 60)
tbl_data_design_beams.setColumnWidth(5, 60)
tbl_data_design_beams.setColumnWidth(6, 60)
tbl_data_design_beams.setColumnWidth(7, 60)
tbl_data_design_beams.setColumnWidth(8, 60)
tbl_data_design_beams.setColumnWidth(9, 60)
tbl_data_design_beams.setColumnWidth(10, 60)
class RegistroColumns:
def __init__(self, tbl_data_design_columns, id_, b, h, db, nbH, nbB, Leg_n_H, Leg_n_B, Sstirrup):
fila = tbl_data_design_columns.rowCount()
tbl_data_design_columns.insertRow(fila)
self.spx_id = QLineEdit(tbl_data_design_columns)
self.spx_id.setValidator(QIntValidator(0, 1000))
self.spx_id.setText(f'C{id_}')
self.spx_b = QLineEdit(tbl_data_design_columns)
self.spx_b.setValidator(QIntValidator(20, 1000))
self.spx_b.setText('{:d}'.format(int(b)))
self.spx_h = QLineEdit(tbl_data_design_columns)
self.spx_h.setValidator(QIntValidator(20, 1000))
self.spx_h.setText('{:d}'.format(int(h)))
self.spx_db = QLineEdit(tbl_data_design_columns)
self.spx_db.setValidator(QDoubleValidator(1., 10., 2))
self.spx_db.setText('{:.2f}'.format(db))
self.spx_nbH = QLineEdit(tbl_data_design_columns)
self.spx_nbH.setValidator(QIntValidator(2, 100))
self.spx_nbH.setText('{:d}'.format(int(nbH)))
self.spx_nbB = QLineEdit(tbl_data_design_columns)
self.spx_nbB.setValidator(QIntValidator(2, 100))
self.spx_nbB.setText('{:d}'.format(int(nbB)))
self.spx_Leg_n_H = QLineEdit(tbl_data_design_columns)
self.spx_Leg_n_H.setValidator(QIntValidator(2, 100))
self.spx_Leg_n_H.setText('{:d}'.format(int(Leg_n_H)))
self.spx_Leg_n_B = QLineEdit(tbl_data_design_columns)
self.spx_Leg_n_B.setValidator(QIntValidator(2, 100))
self.spx_Leg_n_B.setText('{:d}'.format(int(Leg_n_B)))
self.spx_Sstirrup = QLineEdit(tbl_data_design_columns)
self.spx_Sstirrup.setValidator(QIntValidator(2, 100))
self.spx_Sstirrup.setText('{:d}'.format(int(Sstirrup)))
tbl_data_design_columns.setCellWidget(fila, 0, self.spx_id)
tbl_data_design_columns.setCellWidget(fila, 1, self.spx_b)
tbl_data_design_columns.setCellWidget(fila, 2, self.spx_h)
tbl_data_design_columns.setCellWidget(fila, 3, self.spx_db)
tbl_data_design_columns.setCellWidget(fila, 4, self.spx_nbH)
tbl_data_design_columns.setCellWidget(fila, 5, self.spx_nbB)
tbl_data_design_columns.setCellWidget(fila, 6, self.spx_Leg_n_H)
tbl_data_design_columns.setCellWidget(fila, 7, self.spx_Leg_n_B)
tbl_data_design_columns.setCellWidget(fila, 8, self.spx_Sstirrup)
tbl_data_design_columns.setColumnWidth(0, 40)
tbl_data_design_columns.setColumnWidth(1, 40)
tbl_data_design_columns.setColumnWidth(2, 40)
tbl_data_design_columns.setColumnWidth(3, 60)
tbl_data_design_columns.setColumnWidth(4, 40)
tbl_data_design_columns.setColumnWidth(5, 40)
tbl_data_design_columns.setColumnWidth(6, 60)
tbl_data_design_columns.setColumnWidth(7, 60)
tbl_data_design_columns.setColumnWidth(8, 60)
class BeamElasticElement:
def __init__(self, EleTag, Nod_ini, Nod_end, AEle, EcEle, IzEle, LEle, BEle, HEle, ElegTr, RZi, RZe):
self.EleTag = EleTag
self.Nod_ini = Nod_ini
self.Nod_end = Nod_end
self.AEle = AEle
self.EcEle = EcEle
self.IzEle = IzEle
self.LEle = LEle
self.BEle = BEle
self.HEle = HEle
self.ElegTr = ElegTr
self.RZi = RZi
self.RZe = RZe
class BeamDesing:
def __init__(self, EleTag, b, h, Ast1, dt1, Mn_n1, Asb1, db1, Mn_p1, ns1, ss1, Ast2, dt2, Mn_n2, Asb2, db2, Mn_p2,
ns2, ss2, Nod_ini, Nod_end, db_t1, db_b1, db_t2, db_b2, Vpr, VU1, VU2):
self.EleTag = EleTag
self.b = b
self.h = h
self.Ast1 = Ast1
self.dt1 = dt1
self.Mn_n1 = Mn_n1
self.Asb1 = Asb1
self.db1 = db1
self.Mn_p1 = Mn_p1
self.ns1 = ns1
self.ss1 = ss1
self.Ast2 = Ast2
self.dt2 = dt2
self.Mn_n2 = Mn_n2
self.Asb2 = Asb2
self.db2 = db2
self.Mn_p2 = Mn_p2
self.ns2 = ns2
self.ss2 = ss2
self.Nod_ini = Nod_ini
self.Nod_end = Nod_end
self.db_t1 = db_t1
self.db_b1 = db_b1
self.db_t2 = db_t2
self.db_b2 = db_b2
self.Vpr = Vpr
self.VU1 = VU1
self.VU2 = VU2
class ColDesing:
def __init__(self, EleTag, b, h, nbH, nbB, db, As, Pu_v, Mu_v, fiPn, fiMn, Mn_i, d, dist, ro, Mu_i,
sst, nsB, nsH, Nod_ini, Nod_end, NUD1, NUD2, NUG1, NUG2, MUD1, MUD2, VUD1, VUD2, ColBeamStr):
self.EleTag = EleTag
self.b = b
self.h = h
self.nbH = nbH
self.nbB = nbB
self.db = db
self.As = As
self.Pu_v = Pu_v
self.Mu_v = Mu_v
self.fiPn = fiPn
self.fiMn = fiMn
self.Mn_i = Mn_i
self.d = d
self.dist = dist
self.ro = ro
self.Mu_i = Mu_i
self.sst = sst
self.nsB = nsB
self.nsH = nsH
self.Nod_ini = Nod_ini
self.Nod_end = Nod_end
self.NUD1 = NUD1
self.NUD2 = NUD2
self.NUG1 = NUG1
self.NUG2 = NUG2
self.MUD1 = MUD1
self.MUD2 = MUD2
self.VUD1 = VUD1
self.VUD2 = VUD2
self.ColBeamStr = ColBeamStr
class DuctilityCurve:
def __init__(self, xi, xe, yi, ye, CD_i, CD_e):
self.xi = xi
self.xe = xe
self.yi = yi
self.ye = ye
self.CD_i = CD_i
self.CD_e = CD_e
class PlasticRotationAngle:
def __init__(self, xi, xe, yi, ye, PRA_i, PRA_e):
self.xi = xi
self.xe = xe
self.yi = yi
self.ye = ye
self.PRA_i = PRA_i
self.PRA_e = PRA_e
class AcceptanceCriteria:
def __init__(self, IO_1, LS_1, CP_1, IO_2, LS_2, CP_2):
self.IO_1 = IO_1
self.LS_1 = LS_1
self.CP_1 = CP_1
self.IO_2 = IO_2
self.LS_2 = LS_2
self.CP_2 = CP_2
class PlasticHingeLength:
def __init__(self, phl1, phl2):
self.phl1 = phl1
self.phl2 = phl2
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_NonLinearFrameDialog()
#self.setStyleSheet("QLineEdit {border: none}")
self.ui.setupUi(self)
self.ui.Design.clicked.connect(self.Design)
self.ui.CreateNLM.clicked.connect(self.CreateNLM)
self.ui.Pushover.clicked.connect(self.Pushover)
self.ui.IDA.clicked.connect(self.IDA)
self.ui.progressBarPushover.hide()
self.ui.progressBarIDA.hide()
self.ui.progressBarBeamDesign.hide()
self.ui.progressBarColumnDesign.hide()
self.ui.Exit.clicked.connect(self.Exit)
self.show()
def Exit(self):
self.close()
def Design(self):
# global Loc_span, Loc_heigth, ListNodes, Elements, DataBeamDesing, DataColDesing, WDL, WLL, WDLS, Wtotal, \
# cover, num_elems, Beta1B, Beta1C, fcB, fcC
CodeDesign = self.ui.comboBoxDesignCode.currentText()
if CodeDesign == 'ACI 318S-19 IMF':
exec(open("Design_ACI_318S_19_IFM.py").read())
elif CodeDesign == 'ACI 318S-19 SMF':
exec(open("Design_ACI_318S_19_SFM.py").read())
# Creation of the nonlinear model
def CreateNLM(self):
global T1m, T2m, EleCol, EleBeam, MG_ElemsForceS1, MG_ElemsDeforS1, MG_ElemsForceS6, MG_ElemsDeforS6,\
DataBeamPhl, DataColPhl
# Validation of beam and column design table data
def validate_data(self):
cover = 4*cm
dst = 3/8*inch
for (r, DB) in zip(self.registros_beams, DataBeamDesing):
DB.b = float(r.spx_b.text()) * cm
DB.h = float(r.spx_h.text()) * cm
DB.Ast1 = float(r.spx_L_As_top.text()) * cm ** 2
DB.Asb1 = float(r.spx_L_As_bot.text()) * cm ** 2
DB.Ast2 = float(r.spx_R_As_top.text()) * cm ** 2
DB.Asb2 = float(r.spx_R_As_bot.text()) * cm ** 2
DB.ns1 = int(r.spx_L_Leg_n.text())
DB.ns2 = int(r.spx_R_Leg_n.text())
DB.ss1 = float(r.spx_L_Sstirrup.text()) * cm
DB.ss2 = float(r.spx_R_Sstirrup.text()) * cm
for (r, DC) in zip(self.registros_cols, DataColDesing):
DC.b = float(r.spx_b.text()) * cm
DC.h = float(r.spx_h.text()) * cm
DC.db = float(r.spx_db.text()) * mm
DC.nbH = int(r.spx_nbH.text())
DC.nbB = int(r.spx_nbB.text())
DC.nsH = int(r.spx_Leg_n_H.text())
DC.nsB = int(r.spx_Leg_n_B.text())
DC.sst = float(r.spx_Sstirrup.text()) * cm
dp = cover + dst + 0.5 * DC.db
DC.dist = np.linspace(dp, DC.h - dp, DC.nbH)
Ab = pi * DC.db ** 2. / 4.
DC.As = np.hstack([DC.nbB * Ab, np.ones(DC.nbH - 2) * 2 * Ab, DC.nbB * Ab])
# Function: Parameters of regularized unconfined concrete
def con_inconf_regu():
fpc = -fc
epsc0 = 2 * fpc / Ec
Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm)
epscu = Gfc / (0.6 * fpc * phl) - 0.8 * fpc / Ec + epsc0
fcu = 0.2 * fpc
lambdaU = 0.10
ft = 0.33 * sqrt(-fpc * MPa)
Ets = ft / 0.002
return fpc, epsc0, fcu, epscu, lambdaU, ft, Ets
# Function: Parameters of regularized confined concrete
def con_conf_regu(b, h, nsB, nsH, sst):
fpc = -fc
bcx = h - 2. * cover - dst
bcy = b - 2. * cover - dst
Asx = nsB * Ast
Asy = nsH * Ast
Asvt = Asx + Asy
flx = Asvt * fy / sst / bcx
fly = Asvt * fy / sst / bcy
slx = bcx / (nsB - 1)
sly = bcy / (nsH - 1)
k2x = min(0.26 * sqrt((bcx / sst) * (bcx / slx) * (1000. / flx)), 1)
k2y = min(0.26 * sqrt((bcy / sst) * (bcy / sly) * (1000. / fly)), 1)
flex = k2x * flx
fley = k2y * fly
fle = (flex * bcx + fley * bcy) / (bcx + bcy)
k1 = 6.7 * (fle / 1000.) ** (-0.17)
fcc = fc + k1 * fle
fpcc = -fcc
Ecc = Ec
Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm)
K = k1 * fle / fc
epscc0 = eo1 * (1. + 5. * K)
Gfcc = 1.7 * Gfc
epsccu = Gfcc / (0.6 * fpcc * phl) - 0.8 * fpcc / Ecc + epscc0
fccu = 0.2 * fpcc
lambdaC = 0.10
ft = 0.33 * sqrt(-fpc * MPa)
Ets = ft / 0.002
return fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets
# Function: Regularized steel parameters
def steel_mat_regu():
FyTestN4 = 490.0 * MPa
FsuTestN4 = 630.0 * MPa
epsuTestN4 = 0.10
LgageTestN4 = 200.0 * mm
Es = 200.0 * GPa
FyPosN4 = FyTestN4
epsyPosN4 = FyPosN4 / Es
FyNegN4 = FyTestN4
epsyNegN4 = FyNegN4 / Es
FsuPosN4 = FsuTestN4
epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4)
bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4))
epsuNegN4 = min(-epsccu, epsuPosN4)
bNegN4 = bPosN4
# FsuNegN4 = FsuTestN4
FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4))
FsrPosN4 = 0.2 * FyPosN4
epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4
FsrNegN4 = 0.2 * FsuNegN4
epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4
pinchX = 0.2
pinchY = 0.8
damage1 = 0.0
damage2 = 0.0
beta = 0.0
# op.uniaxialMaterial('Hysteretic', Ele.EleTag * 6 + 4 + pos, FyPosN4, epsyPosN4, FsuPosN4, epsuPosN4
# , FsrPosN4, epsrPosN4, -FyNegN4, -epsyNegN4, -FsuNegN4, -epsuNegN4, -FsrNegN4
# , -epsrNegN4, pinchX, pinchY, damage1, damage2, beta)
SteelN4Mat = Ele.EleTag * 6 + 4 + pos
SteelMPFTag = int(1e6 * SteelN4Mat)
R0 = 20.0
cR1 = 0.925
cR2 = 0.15
a1 = 0.0
a2 = 1.0
a3 = 0.0
a4 = 0.0
print(Ele.EleTag, 'SteelMPF', int(SteelMPFTag), FyPosN4/MPa, FyNegN4/MPa, Es/GPa, bPosN4, bNegN4, R0, cR1, cR2, a1, a2, a3, a4)
op.uniaxialMaterial('SteelMPF', SteelMPFTag, FyPosN4, FyNegN4, Es, bPosN4, bNegN4, R0, cR1, cR2, a1, a2, a3,
a4)
print('MinMax', int(SteelN4Mat), int(SteelMPFTag), '-min', -epsuNegN4, '-max', epsuPosN4)
op.uniaxialMaterial('MinMax', SteelN4Mat, SteelMPFTag, '-min', -epsuNegN4, '-max', epsuPosN4)
# Function: Parameters of non-regularized confined concrete
def con_conf(b, h, nsB, nsH, sst):
fpc = -fc
bcx = h - 2. * cover - dst
bcy = b - 2. * cover - dst
Asx = nsB * Ast
Asy = nsH * Ast
Asvt = Asx + Asy
flx = Asvt * fy / sst / bcx
fly = Asvt * fy / sst / bcy
slx = bcx / (nsB - 1)
sly = bcy / (nsH - 1)
k2x = min(0.26 * sqrt((bcx / sst) * (bcx / slx) * (1000. / flx)), 1)
k2y = min(0.26 * sqrt((bcy / sst) * (bcy / sly) * (1000. / fly)), 1)
flex = k2x * flx
fley = k2y * fly
fle = (flex * bcx + fley * bcy) / (bcx + bcy)
k1 = 6.7 * (fle / 1000.) ** (-0.17)
fcc = fc + k1 * fle
fpcc = -fcc
K = k1 * fle / fc
epscc0 = eo1 * (1. + 5. * K)
rov = Asvt / sst / (bcx + bcy)
e85 = 260 * rov * epscc0 + eo85
epsccu = (e85 - epscc0) * (0.2 * fcc - fcc) / (0.85 * fcc - fcc) + epscc0
fccu = 0.2 * fpcc
lambdaC = 0.10
ft = 0.33 * sqrt(-fpc * MPa)
Ets = ft / 0.002
return fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets
# Function: Parameters of non-regularized steel
def steel_mat():
FyTestN4 = 490.0 * MPa
FsuTestN4 = 630.0 * MPa
epsuTestN4 = 0.10
LgageTestN4 = phl
Es = 200.0 * GPa
FyPosN4 = FyTestN4
epsyPosN4 = FyPosN4 / Es
FyNegN4 = FyTestN4
epsyNegN4 = FyNegN4 / Es
FsuPosN4 = FsuTestN4
epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4)
bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4))
epsuNegN4 = min(-epsccu, epsuPosN4)
bNegN4 = bPosN4
# FsuNegN4 = FsuTestN4
FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4))
FsrPosN4 = 0.2 * FyPosN4
epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4
FsrNegN4 = 0.2 * FsuNegN4
epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4
pinchX = 0.2
pinchY = 0.8
damage1 = 0.0
damage2 = 0.0
beta = 0.0
# op.uniaxialMaterial('Hysteretic', Ele.EleTag * 6 + 4 + pos, FyPosN4, epsyPosN4, FsuPosN4, epsuPosN4
# , FsrPosN4, epsrPosN4, -FyNegN4, -epsyNegN4, -FsuNegN4, -epsuNegN4, -FsrNegN4
# , -epsrNegN4, pinchX, pinchY, damage1, damage2, beta)
SteelN4Mat = Ele.EleTag * 6 + 4 + pos
SteelMPFTag = 1e6 * SteelN4Mat
R0 = 20.0
cR1 = 0.925
cR2 = 0.15
a1 = 0.0
a2 = 1.0
a3 = 0.0
a4 = 0.0
# print('SteelMPF', int(SteelMPFTag), FyPosN4, FyNegN4, Es, bPosN4, bNegN4, R0, cR1, cR2, a1, a2, a3, a4)
op.uniaxialMaterial('SteelMPF', SteelMPFTag, FyPosN4, FyNegN4, Es, bPosN4, bNegN4, R0, cR1, cR2, a1, a2, a3,
a4)
# print('MinMax', int(SteelN4Mat), int(SteelMPFTag), '-min', -epsuNegN4, '-max', epsuPosN4)
op.uniaxialMaterial('MinMax', SteelN4Mat, SteelMPFTag, '-min', -epsuNegN4, '-max', epsuPosN4)
# Function: Creation of fibers in beams
def fiber_beam(Ast, Asb, pos):
op.section('Fiber', Ele.EleTag * 2 + pos)
op.patch('rect', Ele.EleTag * 6 + 2 + pos, 10, 1, -y2 + dp, -z2 + dp, y2 - dp, z2 - dp)
op.patch('rect', Ele.EleTag * 6 + pos, 10, 1, -y2 + dp, z2 - dp, y2 - dp, z2)
op.patch('rect', Ele.EleTag * 6 + pos, 10, 1, -y2 + dp, -z2, y2 - dp, -z2 + dp)
op.patch('rect', Ele.EleTag * 6 + pos, 2, 1, -y2, -z2, -y2 + dp, z2)
op.patch('rect', Ele.EleTag * 6 + pos, 2, 1, y2 - dp, -z2, y2, z2)
print(Ele.EleTag * 6 + 4 + pos, 1, Ast/cm**2, y2 - dp, z2 - dp, y2 - dp, -z2 + dp)
op.layer('straight', Ele.EleTag * 6 + 4 + pos, 1, Ast, y2 - dp, z2 - dp, y2 - dp, -z2 + dp)
print(Ele.EleTag * 6 + 4 + pos, 1, Asb/cm**2, -y2 + dp, z2 - dp, -y2 + dp, -z2 + dp)
op.layer('straight', Ele.EleTag * 6 + 4 + pos, 1, Asb, -y2 + dp, z2 - dp, -y2 + dp, -z2 + dp)
validate_data(self)
op.wipe() # The models is restarted in opensees
op.model('Basic', '-ndm', 2, '-ndf', 3)
for node in ListNodes:
op.node(int(node[0]), int(node[1]), int(node[2]))
if node[2] == 0.:
op.fix(int(node[0]), 1, 1, 1)
cover = 4 * cm
dst = 3 / 8 * inch
Ast = pi * dst ** 2 / 4. # area de la barra del estribo
# creacion de columnas
HBeam = float(self.ui.HBeam.text())
HColi = float(self.ui.HColi.text()) # Column inside Depth # if node[2] > 0 and node[1] == 0:
# MasterNode = node[0]
# if node[2] > 0 and node[1] != 0:
# op.equalDOF(int(MasterNode), int(node[0]), 1)
HCole = float(self.ui.HCole.text()) # Column outside Depth
fy = float(self.ui.fy.text()) * MPa
Es = 200.0 * GPa
fcB = float(self.ui.fcB.text()) * MPa
fcC = float(self.ui.fcC.text()) * MPa
op.geomTransf('PDelta', 1, '-jntOffset', 0, 0, 0, -HBeam / 2)
op.geomTransf('PDelta', 2, '-jntOffset', 0, HBeam / 2, 0, -HBeam / 2)
op.geomTransf('Corotational', 3, '-jntOffset', HColi / 2., 0, -HColi / 2., 0)
op.geomTransf('Corotational', 4, '-jntOffset', HCole / 2., 0, -HColi / 2., 0)
op.geomTransf('Corotational', 5, '-jntOffset', HColi / 2., 0, -HCole / 2., 0)
EleCol = []
EleBeam = []
for Ele in Elements:
if ListNodes[Ele.Nod_ini, 1] == ListNodes[Ele.Nod_end, 1]:
EleCol.append(Ele)
else:
EleBeam.append(Ele)
# Creation of non-linear elements (beams and columns)
eo1, eo85, eo20, lambdaU = -0.002, -0.0038, -0.006, 0.1
DataColPhl = []
for (Ele, DC) in zip(EleCol, DataColDesing):
fc, Ec = fcC, Ele.EcEle
if self.ui.radioButton05H.isChecked() == True:
phl = 0.5 * DC.h
if self.ui.radioButtonPark.isChecked() == True:
phl = 0.08 * Ele.LEle + 0.022 * fy / MPa * DC.db / mm
if self.ui.radioButtonBerry.isChecked() == True:
phl = 0.05 * Ele.LEle + 0.1 * fy / MPa * DC.db / mm / sqrt(fc * MPa)
DataColPhl.append(PlasticHingeLength(phl, phl))
if self.ui.radioButtonYesRegu.isChecked() == True:
fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
print('fpc, epsc0, fcu, epscu, lambdaU, ft, Ets', fpc/MPa, epsc0, fcu/MPa, epscu, lambdaU, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6, fpc, epsc0, fcu, epscu, lambdaU, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 1, fpc, epsc0, fcu, epscu, lambdaU, ft, Ets)
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(DC.b, DC.h, DC.nsB, DC.nsH, DC.sst)
print('fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets', fpcc/MPa, epscc0, fccu/MPa, epsccu, lambdaC, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 2, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 3, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
pos = 0
steel_mat_regu()
pos = 1
steel_mat_regu()
if self.ui.radioButtonNoRegu.isChecked() == True:
ft = 0.33 * sqrt(fcC * MPa)
Ets = ft / 0.002
# print('Concrete02', Ele.EleTag * 6, -fcC, eo1, -0.2 * fcC, eo20, lambdaU, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6, -fcC, eo1, -0.2 * fcC, eo20, lambdaU, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 1, -fcC, eo1, -0.2 * fcC, eo20, lambdaU, ft, Ets)
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(DC.b, DC.h, DC.nsB, DC.nsH, DC.sst)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 2, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 3, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
pos = 0
steel_mat()
pos = 1
steel_mat()
dp = DC.dist[0]
y1 = DC.h / 2.0
z1 = DC.b / 2.0
op.section('Fiber', Ele.EleTag)
op.patch('rect', Ele.EleTag * 6 + 2, 10, 1, -y1 + dp, -z1 + dp, y1 - dp, z1 - dp)
op.patch('rect', Ele.EleTag * 6, 10, 1, -y1 + dp, z1 - dp, y1 - dp, z1)
op.patch('rect', Ele.EleTag * 6, 10, 1, -y1 + dp, -z1, y1 - dp, -z1 + dp)
op.patch('rect', Ele.EleTag * 6, 2, 1, -y1, -z1, -y1 + dp, z1)
op.patch('rect', Ele.EleTag * 6, 2, 1, y1 - dp, -z1, y1, z1)
for dist, As in zip(DC.dist, DC.As):
# print('Col ', Ele.EleTag * 6 + 4, 1, As, -y1 + dist, z1 - dp, -y1 + dist, -z1 + dp)
op.layer('straight', Ele.EleTag * 6 + 4, 1, As, -y1 + dist, z1 - dp, -y1 + dist, -z1 + dp)
MassDens = Ele.AEle * GConc / g
op.beamIntegration('HingeRadau', Ele.EleTag, Ele.EleTag, phl, Ele.EleTag, phl, Ele.EleTag)
op.element('forceBeamColumn', Ele.EleTag, Ele.Nod_ini, Ele.Nod_end, Ele.ElegTr, Ele.EleTag
, '-mass', MassDens)
# print('DataColPhl =', DataColPhl)
DataBeamPhl = []
for (Ele, DB) in zip(EleBeam, DataBeamDesing):
fc, Ec, nsH = fcB, Ele.EcEle, 2
if self.ui.radioButton05H.isChecked() == True:
phl1 = 0.5 * DB.h
phl2 = 0.5 * DB.h
if self.ui.radioButtonPark.isChecked() == True:
phl1 = 0.08 * Ele.LEle + 0.022 * fy / MPa * DB.db_t1 / mm
phl2 = 0.08 * Ele.LEle + 0.022 * fy / MPa * DB.db_t2 / mm
if self.ui.radioButtonPark.isChecked() == True:
phl1 = 0.05 * Ele.LEle + 0.1 * fy / MPa * DB.db_t1 / mm / sqrt(fc * MPa)
phl2 = 0.05 * Ele.LEle + 0.1 * fy / MPa * DB.db_t2 / mm / sqrt(fc * MPa)
DataBeamPhl.append(PlasticHingeLength(phl1, phl2))
if self.ui.radioButtonYesRegu.isChecked() == True:
phl = phl1
fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
print('fpc, epsc0, fcu, epscu, lambdaU, ft, Ets', fpc/MPa, epsc0, fcu/MPa, epscu, lambdaU, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6, fpc, epsc0, fcu, epscu, lambdaU, ft, Ets)
phl = phl2
fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
print('fpc, epsc0, fcu, epscu, lambdaU, ft, Ets', fpc/MPa, epsc0, fcu/MPa, epscu, lambdaU, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 1, fpc, epsc0, fcu, epscu, lambdaU, ft, Ets)
phl, pos = phl1, 0
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(DB.b, DB.h, DB.ns1, nsH, DB.ss1)
print('fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets', fpcc/MPa, epscc0, fccu/MPa, epsccu, lambdaC, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 2, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
steel_mat_regu()
phl, pos = phl2, 1
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(DB.b, DB.h, DB.ns2, nsH, DB.ss2)
print('fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets', fpcc/MPa, epscc0, fccu/MPa, epsccu, lambdaC, ft/MPa, Ets/MPa)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 3, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
steel_mat_regu()
if self.ui.radioButtonNoRegu.isChecked() == True:
ft = 0.33 * sqrt(fcB * MPa)
Ets = ft / 0.002
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6, -fcB, eo1, -0.2 * fcB, eo20, lambdaU, ft, Ets)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 1, -fcB, eo1, -0.2 * fcB, eo20, lambdaU, ft, Ets)
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(DB.b, DB.h, DB.ns1, nsH, DB.ss1)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 2, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(DB.b, DB.h, DB.ns2, nsH, DB.ss2)
op.uniaxialMaterial('Concrete02', Ele.EleTag * 6 + 3, fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets)
pos = 0
steel_mat()
pos = 1
steel_mat()
y2 = DB.h / 2.0
z2 = DB.b / 2.0
dp = DB.h - min(DB.db1, DB.dt1)
print('dp1 =', dp)
pos = 0
fiber_beam(DB.Ast1, DB.Asb1, pos)
dp = DB.h - min(DB.db2, DB.dt2)
print('dp2 =', dp)
pos = 1
fiber_beam(DB.Ast2, DB.Asb2, pos)
MassDens = Ele.AEle * GConc / g + WDLS / g
print('phl1', phl1, 'phl2', phl2)
op.beamIntegration('HingeRadau', Ele.EleTag, Ele.EleTag * 2, phl1, Ele.EleTag * 2 + 1, phl2, Ele.EleTag * 2)
op.element('forceBeamColumn', Ele.EleTag, Ele.Nod_ini, Ele.Nod_end, Ele.ElegTr, Ele.EleTag
, '-mass', MassDens)
# print('DataBeamPhl =', DataBeamPhl)
list_beams = [Ele.EleTag for Ele in EleBeam]
list_cols = [Ele.EleTag for Ele in EleCol]
print('list_beams =', list_beams)
print('list_cols =', list_cols)
if not os.path.exists("Pushover"):
os.mkdir("Pushover")
# Recording of forces and deformations from nonlinear analysis
op.recorder('Element', '-file', 'Pushover/beams_force_1.out',
'-time', '-ele', *list_beams, 'section', 1, 'force')
op.recorder('Element', '-file', 'Pushover/beams_def_1.out',
'-time', '-ele', *list_beams, 'section', 1, 'deformation')
op.recorder('Element', '-file', 'Pushover/beams_force_6.out',
'-time', '-ele', *list_beams, 'section', 6, 'force')
op.recorder('Element', '-file', 'Pushover/beams_def_6.out',
'-time', '-ele', *list_beams, 'section', 6, 'deformation')
op.recorder('Element', '-file', 'Pushover/cols_force_1.out',
'-time', '-ele', *list_cols, 'section', 1, 'force')
op.recorder('Element', '-file', 'Pushover/cols_def_1.out',
'-time', '-ele', *list_cols, 'section', 1, 'deformation')
op.recorder('Element', '-file', 'Pushover/cols_force_6.out',
'-time', '-ele', *list_cols, 'section', 6, 'force')
op.recorder('Element', '-file', 'Pushover/cols_def_6.out',
'-time', '-ele', *list_cols, 'section', 6, 'deformation')
op.recorder('Node', '-file', 'Pushover/HoriNodes.out',
'-time', '-node', *ListNodes, '-dof', 1, 'disp')
op.recorder('Node', '-file', 'Pushover/VertNodes.out',
'-time', '-node', *ListNodes, '-dof', 2, 'disp')
# Create a Plain load pattern for gravity loading with a Linear TimeSeries
op.timeSeries('Linear', 1)
op.pattern('Plain', 1, 1)
for Ele in EleCol:
op.eleLoad('-ele', Ele.EleTag, '-type', '-beamUniform', 0, -Ele.AEle * GConc)
for Ele in EleBeam:
op.eleLoad('-ele', Ele.EleTag, '-type', '-beamUniform', -Ele.AEle * GConc - WDL - 0.25*WLL)
Tol = 1.0e-6 # convergence tolerance for test
op.constraints('Plain') # how it handles boundary conditions
op.numberer('Plain') # renumber dof to minimize band-width (optimization), if you want to
op.system('BandGeneral') # how to store and solve the system of equations in the analysis
op.test('NormDispIncr', Tol, 100) # determine if convergence has been achieved at the end of an iteration step
op.algorithm('KrylovNewton') # use Newton solution algorithm: updates tangent stiffness at every iteration
NstepGravity = 10 # apply gravity in 10 steps
DGravity = 1. / NstepGravity # first load increment;
op.integrator('LoadControl', DGravity) # determine the next time step for an analysis
op.analysis('Static') # define type of analysis static or transient
nele = num_elems-1
MG_ElemsForceS1, MG_ElemsDeforS1, MG_ElemsForceS6, MG_ElemsDeforS6 = np.zeros(2*nele), np.zeros(2*nele), \
np.zeros(2*nele), np.zeros(2*nele)
step = 1
loadf = 1.0
while step <= NstepGravity and loadf > 0:
ElemsForceS1, ElemsDeforS1, ElemsForceS6, ElemsDeforS6 = [], [], [], []
op.analyze(1)
for Element in Elements:
ForcesS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'force'))
ForcesS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'force'))
DeforsS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'deformation'))
DeforsS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'deformation'))
ElemsForceS1 = np.append(ElemsForceS1, ForcesS1)
ElemsDeforS1 = np.append(ElemsDeforS1, DeforsS1)
ElemsForceS6 = np.append(ElemsForceS6, ForcesS6)
ElemsDeforS6 = np.append(ElemsDeforS6, DeforsS6)
MG_ElemsForceS1 = np.vstack((MG_ElemsForceS1, ElemsForceS1))
MG_ElemsDeforS1 = np.vstack((MG_ElemsDeforS1, ElemsDeforS1))
MG_ElemsForceS6 = np.vstack((MG_ElemsForceS6, ElemsForceS6))
MG_ElemsDeforS6 = np.vstack((MG_ElemsDeforS6, ElemsDeforS6))
loadf = op.getTime()
step += 1
op.loadConst('-time', 0.0)
print("Model Nonlinear Built")
xi = 0.02 # damping ratio
MpropSwitch = 1.0
KcurrSwitch = 0.0
KcommSwitch = 1.0
KinitSwitch = 0.0
floors_num = len(Loc_heigth) - 1
print('floors_num =', floors_num)
if floors_num >= 2:
nEigenI = 1 # mode 1
nEigenI2 = 2 # mode 2
nEigenJ = 3 # mode 3
lambdaN = op.eigen(nEigenJ) # eigenvalue analysis for nEigenJ modes
lambdaI = lambdaN[nEigenI - 1] # eigenvalue mode i
lambdaI2 = lambdaN[nEigenI2 - 1] # eigenvalue mode i2
# lambdaJ = lambdaN[nEigenJ - 1] # eigenvalue mode j
print('lambdaN ', lambdaN)
omegaI = pow(lambdaI, 0.5)
omegaI2 = pow(lambdaI2, 0.5)
# omegaJ = pow(lambdaJ, 0.5)
T1m = 2. * pi / omegaI
T2m = 2. * pi / omegaI2
print('Ta1=', T1m, 'seg', ' Ta2=', T2m, ' seg')
# alphaM = MpropSwitch * xi * (2. * omegaI * omegaJ) / (omegaI + omegaJ) # M-prop. damping D = alphaM*M
# betaKcurr = KcurrSwitch * 2. * xi / (omegaI + omegaJ) # current-K +beatKcurr*KCurrent
# betaKcomm = KcommSwitch * 2. * xi / (omegaI + omegaJ) # last-committed K +betaKcomm*KlastCommitt
# betaKinit = KinitSwitch * 2. * xi / (omegaI + omegaJ) # initial-K +beatKinit*Kini
# op.rayleigh(alphaM, betaKcurr, betaKinit, betaKcomm) # RAYLEIGH damping
elif floors_num == 1:
nEigenI = 1 # mode 1
# nEigenI2 = 2 # mode 2
lambdaN = op.eigen(nEigenI) # eigenvalue analysis for nEigenJ modes
lambdaI = lambdaN[nEigenI - 1] # eigenvalue mode i
# lambdaI2 = lambdaN[nEigenI2 - 1] # eigenvalue mode i2
print('lambdaN ', lambdaN)
omegaI = pow(lambdaI, 0.5)
# omegaI2 = pow(lambdaI2, 0.5)
T1m = 2. * pi / omegaI
# T2m = 2. * pi / omegaI2
print('Ta1=', T1m, 'seg')
self.ui.tabWidget.setCurrentIndex(2)
# Pushover function
def Pushover(self):
global cbar
def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
IOflag = 2
testType = 'RelativeNormDispIncr'
# set testType EnergyIncr; # Dont use with Penalty constraints
# set testType RelativeNormUnbalance; # Dont use with Penalty constraints
# set testType RelativeNormDispIncr; # Dont use with Lagrange constraints
# set testType RelativeTotalNormDispIncr; # Dont use with Lagrange constraints
# set testType RelativeEnergyIncr; # Dont use with Penalty constraints
tolInit = 1.0e-6 # the initial Tolerance, so it can be referred back to
iterInit = 50 # the initial Max Number of Iterations
algorithmType = 'KrylovNewton' # the algorithm type
op.test(testType, tolInit
, iterInit) # determine if convergence has been achieved at the end of an iteration step
op.algorithm(algorithmType) # use Newton solution algorithm: updates tangent stiffness at every iteration
disp = dref * mu
dU = disp / (1.0 * nSteps)
print('dref ', dref, 'mu ', mu, 'dU ', dU, 'disp ', disp)
op.integrator('DisplacementControl', ctrlNode, dispDir, dU) # determine the next time step for an analysis
op.analysis('Static') # define type of analysis static or transient
# Print values
if IOflag >= 1:
print('singlePush: Push ', ctrlNode, ' to ', mu)
# the initial values to start the while loop
nele = num_elems - 1
MP_ElemsForceS1, MP_ElemsDeforS1, MP_ElemsForceS6, MP_ElemsDeforS6 = np.zeros(2 * nele), np.zeros(2 * nele), \
np.zeros(2 * nele), np.zeros(2 * nele)
ok = 0
step = 1
loadf = 1.0
# This feature of disabling the possibility of having a negative loading has been included.
# This has been adapted from a similar script by <NAME>
htot = op.nodeCoord(ctrlNode, 2)
maxDriftPiso = 0.0
VBasal_v = []
DriftTecho_v = []
while step <= nSteps and ok == 0 and loadf > 0:
self.ui.progressBarPushover.setValue(100 * step / nSteps)
ElemsForceS1, ElemsDeforS1, ElemsForceS6, ElemsDeforS6 = [], [], [], []
ok = op.analyze(1)
loadf = op.getTime()
temp = op.nodeDisp(ctrlNode, dispDir)
# Print the current displacement
if IOflag >= 2:
print('Pushed ', ctrlNode, ' in ', dispDir, ' to ', temp, ' with ', loadf, 'step', step)
# If the analysis fails, try the following changes to achieve convergence
# Analysis will be slower in here though...
if ok != 0:
print('Trying relaxed convergence..')
op.test(testType, tolInit * 0.01,
iterInit * 50) # determine if convergence has been achieved at the end of an iteration step
ok = op.analyze(1)
op.test(testType, tolInit,
iterInit) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
print('Trying Newton with initial then current .')
op.test(testType, tolInit * 0.01,
iterInit * 50) # determine if convergence has been achieved at the end of an iteration step
op.algorithm('Newton', '-initialThenCurrent')
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit,
iterInit) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
print('Trying ModifiedNewton with initial ..')
op.test(testType, tolInit * 0.01,
iterInit * 50) # determine if convergence has been achieved at the end of an iteration step
op.algorithm('ModifiedNewton', '-initial')
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit,
iterInit) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
print('Trying KrylovNewton ..')
op.test(testType, tolInit * 0.01,
iterInit * 50) # determine if convergence has been achieved at the end of an iteration step
op.algorithm('KrylovNewton')
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit,
iterInit) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
print('Trying FixedNumIter .. ....')
op.test('FixedNumIter',
iterInit) # determine if convergence has been achieved at the end of an iteration step
ok = op.analyze(1)
for (nod_ini, nod_end) in zip(ListNodesDrift[:-1, 0], ListNodesDrift[1:, 0]):
# print('nod_ini ', nod_ini, 'nod_end', nod_end)
nod_ini = int(nod_ini)
nod_end = int(nod_end)
pos_i = op.nodeCoord(nod_ini, 2)
pos_s = op.nodeCoord(nod_end, 2)
hpiso = pos_s - pos_i
desp_i = op.nodeDisp(nod_ini, 1)
desp_s = op.nodeDisp(nod_end, 1)
desp_piso = abs(desp_s - desp_i)
drift_piso = desp_piso / hpiso
if drift_piso >= maxDriftPiso:
maxDriftPiso = drift_piso
VBasal = 0.
op.reactions()
for node in ListNodesBasal:
# print('ind Basal ', node[0])
VBasal = VBasal + op.nodeReaction(node[0], 1)
VBasal_v = np.append(VBasal_v, VBasal)
DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot
DriftTecho_v = np.append(DriftTecho_v, DriftTecho)
for Element in Elements:
ForcesS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'force'))
ForcesS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'force'))
DeforsS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'deformation'))
DeforsS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'deformation'))
ElemsForceS1 = np.append(ElemsForceS1, ForcesS1)
ElemsDeforS1 = np.append(ElemsDeforS1, DeforsS1)
ElemsForceS6 = np.append(ElemsForceS6, ForcesS6)
ElemsDeforS6 = np.append(ElemsDeforS6, DeforsS6)
MP_ElemsForceS1 = np.vstack((MP_ElemsForceS1, ElemsForceS1))
MP_ElemsDeforS1 = np.vstack((MP_ElemsDeforS1, ElemsDeforS1))
MP_ElemsForceS6 = np.vstack((MP_ElemsForceS6, ElemsForceS6))
MP_ElemsDeforS6 = np.vstack((MP_ElemsDeforS6, ElemsDeforS6))
loadf = op.getTime()
step += 1
# print('MP_ElemsForceS1 =', MP_ElemsForceS1)
# print('Tamñano de MP_ElemsForceS1 =', np.shape(MP_ElemsForceS1))
maxDriftTecho = dU * step / htot
maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot
if ok != 0:
print('DispControl Analysis FAILED')
else:
print('DispControl Analysis SUCCESSFUL')
if loadf <= 0:
print('Stopped because of Load factor below zero: ', loadf)
# if PrintFlag == 0:
# os.remove("singlePush.txt")
# print singlePush.txt
return maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v, MP_ElemsForceS1, MP_ElemsDeforS1, MP_ElemsForceS6, MP_ElemsDeforS6
# Pushover function varying tests and algorithms
def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
# --------------------------------------------------
# Description of Parameters
# --------------------------------------------------
# dref: Reference displacement to which cycles are run. Corresponds to yield or equivalent other, such as 1mm
# mu: Multiple of dref to which the push is run. So pushover can be run to a specifived ductility or displacement
# ctrlNode: Node to control with the displacement integrator.
# dispDir: DOF the loading is applied.
# nSteps: Number of steps.
# IOflag: Option to print details on screen. 2 for print of each step, 1 for basic info (default), 0 for off
# ---------------------------------------------------
test = {1: 'NormDispIncr', 2: 'RelativeEnergyIncr', 3: 'EnergyIncr',
4: 'RelativeNormUnbalance', 5: 'RelativeNormDispIncr',
6: 'NormUnbalance', 7: 'FixedNumIter'}
alg = {1: 'KrylovNewton', 2: 'SecantNewton', 3: 'ModifiedNewton',
4: 'RaphsonNewton', 5: 'PeriodicNewton', 6: 'BFGS',
7: 'Broyden', 8: 'NewtonLineSearch'}
# test = {1:'NormDispIncr', 2: 'RelativeEnergyIncr', 3:'EnergyIncr'}
# alg = {1:'KrylovNewton', 2:'ModifiedNewton'}
IOflag = 2
PrintFlag = 0
testType = 'RelativeNormDispIncr' # Dont use with Penalty constraints
tolInit = 1.0e-7 # the initial Tolerance, so it can be referred back to
iterInit = 50 # the initial Max Number of Iterations
algorithmType = 'KrylovNewton' # the algorithm type
# algorithmType Newton; # the algorithm type
# algorithmType Newton; # the algorithm type
# op.constraints('Transformation') # how it handles boundary conditions
# op.numberer('RCM') # renumber dof to minimize band-width (optimization), if you want to
# op.system('BandGeneral') # how to store and solve the system of equations in the analysis
op.test(testType, tolInit,
iterInit) # determine if convergence has been achieved at the end of an iteration step
op.algorithm(algorithmType) # use Newton solution algorithm: updates tangent stiffness at every iteration
disp = dref * mu
dU = disp / (1.0 * nSteps)
print('dref ', dref, 'mu ', mu, 'dU ', dU, 'disp ', disp, 'nSteps ', nSteps)
op.integrator('DisplacementControl', ctrlNode, dispDir, dU) # determine the next time step for an analysis
op.analysis('Static') # defivne type of analysis static or transient
# Print values
if IOflag >= 1:
print('singlePush: Push ', ctrlNode, ' to ', mu)
nele = num_elems - 1
MP_ElemsForceS1, MP_ElemsDeforS1, MP_ElemsForceS6, MP_ElemsDeforS6 = np.zeros(2 * nele), np.zeros(2 * nele), \
np.zeros(2 * nele), np.zeros(2 * nele)
# the initial values to start the while loop
ok = 0
step = 1
loadf = 1.0
# This feature of disabling the possibility of having a negative loading has been included.
# This has been adapted from a similar script by <NAME>
maxDriftPiso = 0.0
htot = op.nodeCoord(ctrlNode, 2)
VBasal_v = []
DriftTecho_v = []
# factor_v = np.array([1,0.75,0.5,0.25,0.1,2,3,5,10])
# fact_v = np.array([50,100,500])
# factor = 100
# fact = 1.
while step <= nSteps and ok == 0 and loadf > 0:
self.ui.progressBarPushover.setValue(100 * step / nSteps)
ElemsForceS1, ElemsDeforS1, ElemsForceS6, ElemsDeforS6 = [], [], [], []
ok = op.analyze(1)
loadf = op.getTime()
temp = op.nodeDisp(ctrlNode, dispDir)
if IOflag >= 2:
print('Pushed ', ctrlNode, ' in ', dispDir, ' to ', temp, ' with ', loadf, 'step ', step)
# for factor in factor_v:
# op.integrator('DisplacementControl',ctrlNode,dispDir,factor*dU) # determine the next time step for an analysis
# for fact in fact_v:
for j in alg:
for i in test:
for fact in [1, 20, 50]:
if ok != 0 and j >= 4 and i != 7:
# print('Trying ',str(alg[j]))
op.test(test[i], tolInit * .01, iterInit * fact)
op.algorithm(alg[j])
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit, iterInit)
elif ok != 0 and j < 4 and i != 7:
# print('Trying ',str(alg[j]))
op.test(test[i], tolInit, iterInit * fact)
op.algorithm(alg[j], '-initial')
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit, iterInit)
if ok == 0:
break
if ok != 0 and i == 7:
op.test(test[i], iterInit)
op.algorithm(alg[j])
ok = op.analyze(1)
if ok == 0:
break
if ok == 0:
break
# if ok == 0:
# break
# if ok == 0:
# break
# op.integrator('DisplacementControl',ctrlNode,dispDir,dU) # determine the next time step for an analysis
# Calculation of maximum Drift between floors
for (nod_ini, nod_end) in zip(ListNodesDrift[:-1, 0], ListNodesDrift[1:, 0]):
# print('nod_ini ', nod_ini, 'nod_end', nod_end)
nod_ini = int(nod_ini)
nod_end = int(nod_end)
pos_i = op.nodeCoord(nod_ini, 2)
pos_s = op.nodeCoord(nod_end, 2)
hpiso = pos_s - pos_i
desp_i = op.nodeDisp(nod_ini, 1)
desp_s = op.nodeDisp(nod_end, 1)
desp_piso = abs(desp_s - desp_i)
drift_piso = desp_piso / hpiso
if drift_piso >= maxDriftPiso:
maxDriftPiso = drift_piso
VBasal = 0.
op.reactions()
for node in ListNodesBasal:
# print('ind Basal ', node[0])
VBasal = VBasal + op.nodeReaction(node[0], 1)
VBasal_v = np.append(VBasal_v, VBasal)
DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot
DriftTecho_v = np.append(DriftTecho_v, DriftTecho)
for Element in Elements:
ForcesS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'force'))
ForcesS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'force'))
DeforsS1 = np.array(op.eleResponse(Element.EleTag, 'section', 1, 'deformation'))
DeforsS6 = np.array(op.eleResponse(Element.EleTag, 'section', 6, 'deformation'))
ElemsForceS1 = np.append(ElemsForceS1, ForcesS1)
ElemsDeforS1 = np.append(ElemsDeforS1, DeforsS1)
ElemsForceS6 = np.append(ElemsForceS6, ForcesS6)
ElemsDeforS6 = np.append(ElemsDeforS6, DeforsS6)
MP_ElemsForceS1 = np.vstack((MP_ElemsForceS1, ElemsForceS1))
MP_ElemsDeforS1 = np.vstack((MP_ElemsDeforS1, ElemsDeforS1))
MP_ElemsForceS6 = np.vstack((MP_ElemsForceS6, ElemsForceS6))
MP_ElemsDeforS6 = np.vstack((MP_ElemsDeforS6, ElemsDeforS6))
loadf = op.getTime()
step += 1
maxDriftTecho = dU * step / htot
maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot
if ok != 0:
print('DispControl Analysis FAILED')
else:
print('DispControl Analysis SUCCESSFUL')
if loadf <= 0:
print('Stopped because of Load factor below zero: ', loadf)
# if PrintFlag == 0:
# os.remove("singlePush.txt")
# print singlePush.txt
return maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v
ListNodesDrift = ListNodes[np.where(ListNodes[:, 1] == 0.)]
ListNodesBasal = ListNodes[np.where(ListNodes[:, 2] == 0.)]
if T1m <= 0.5:
k = 1.
elif T1m <= 2.5:
k = 0.75 + 0.5 * T1m
else:
k = 2.
sumH = np.sum(np.power(Loc_heigth, k))
floors_num = len(Loc_heigth)
# Defining the pushover lateral distribution type
if self.ui.radioButtonTriangular.isChecked() == True:
Fp = np.power(Loc_heigth, k) / sumH
if self.ui.radioButtonUniform.isChecked() == True:
Fp = 1. / floors_num * np.ones(floors_num + 1)
print('Fp =', Fp)
op.loadConst('-time', 0.0)
op.timeSeries('Linear', 2)
op.pattern('Plain', 2, 1)
for (node, fp, ind) in zip(ListNodesDrift, Fp, range(floors_num)):
op.load(int(node[0]), fp, 0.0, 0.0)
Htotal = Loc_heigth[-1]
Der_obj = float(self.ui.Der_obj.text())
Des_obj = Der_obj * Htotal # Desplazamiento objetivo
nSteps = int(self.ui.nSteps.text())
dref = Des_obj / nSteps
mu = nSteps
IDctrlNode = int(ListNodesDrift[-1, 0]) # Node where displacement is read
print('IDctrlNode =', IDctrlNode)
IDctrlDOF = 1 # DOF x=1, y=2
Tol = 1.0e-4 # Tolerance
self.ui.progressBarPushover.show()
if self.ui.radioButtonFast.isChecked() == True:
maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v, MP_ElemsForceS1, MP_ElemsDeforS1,\
MP_ElemsForceS6, MP_ElemsDeforS6 = singlePush1(dref, mu, IDctrlNode, IDctrlDOF, nSteps)
if self.ui.radioButtonForced.isChecked() == True:
maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v, MP_ElemsForceS1, MP_ElemsDeforS1,\
MP_ElemsForceS6, MP_ElemsDeforS6 = singlePush(dref, mu, IDctrlNode, IDctrlDOF, nSteps)
self.ui.progressBarPushover.hide()
op.wipe()
# Plot pushover curve
fig = self.ui.PushCurve.canvas.axes
fig.clear()
ax = fig.add_axes([0.1, 0.2, 0.85, 0.7])
ax.plot(DriftTecho_v*100, -VBasal_v / Wtotal, '.-')
ax.set_ylabel('Vb/Ws')
ax.set_xlabel('Roof Drift %')
ax.set_title('Pushover Curve')
ax.grid(True)
self.ui.PushCurve.canvas.draw()
self.ui.PushCurve.canvas.show()
num_beams = len(EleBeam)
num_cols = len(EleCol)
print('num_cols =', num_cols)
# Reading of forces and deflections of beams and columns from recorders
M_ElemsForceS1 = np.vstack((MG_ElemsForceS1, MP_ElemsForceS1))
M_ElemsDeforS1 = np.vstack((MG_ElemsDeforS1, MP_ElemsDeforS1))
M_ElemsForceS6 = np.vstack((MG_ElemsForceS6, MP_ElemsForceS6))
M_ElemsDeforS6 = np.vstack((MG_ElemsDeforS6, MP_ElemsDeforS6))
M_BeamsForceS1 = M_ElemsForceS1[:, 2*num_cols:]
M_BeamsDeforS1 = M_ElemsDeforS1[:, 2*num_cols:]
M_BeamsForceS6 = M_ElemsForceS6[:, 2*num_cols:]
M_BeamsDeforS6 = M_ElemsDeforS6[:, 2*num_cols:]
M_ColsForceS1 = M_ElemsForceS1[:, :2*num_cols]
M_ColsDeforS1 = M_ElemsDeforS1[:, :2*num_cols]
M_ColsForceS6 = M_ElemsForceS6[:, :2*num_cols]
M_ColsDeforS6 = M_ElemsDeforS6[:, :2*num_cols]
print('M_BeamsForceS1', M_BeamsForceS1)
print('Tamñano de M_BeamsForceS1 =', np.shape(M_BeamsForceS1))
print('M_ColsForceS1', M_ColsForceS1)
print('Tamñano de M_ColsForceS1 =', np.shape(M_ColsForceS1))
# beams_force_1 = np.loadtxt('Pushover/beams_force_1.out')
# beams_def_1 = np.loadtxt('Pushover/beams_def_1.out')
# beams_force_6 = np.loadtxt('Pushover/beams_force_6.out')
# beams_def_6 = np.loadtxt('Pushover/beams_def_6.out')
# cols_force_1 = np.loadtxt('Pushover/cols_force_1.out')
# cols_def_1 = np.loadtxt('Pushover/cols_def_1.out')
# cols_force_6 = np.loadtxt('Pushover/cols_force_6.out')
# cols_def_6 = np.loadtxt('Pushover/cols_def_6.out')
# print('cols_def_1', cols_def_1)
fy = float(self.ui.fy.text()) * MPa
Es = 200.0 * GPa
ey = fy / Es
CD_Beams = np.zeros([num_beams, 2]) # Curvature Ductility - Beams
PRA_Beams = np.zeros([num_beams, 2]) # Plastic Rotation Angle - Beams
My_Beams = np.zeros([num_beams, 2])
# Calculation of curvature ductility of beams and columns
for (ind, DB, DBPhl) in zip(range(num_beams), DataBeamDesing, DataBeamPhl):
ets_beam_1 = M_BeamsDeforS1[:-2, 2*ind] + M_BeamsDeforS1[:-2, 2*ind+1]*(DB.dt1-DB.h/2)
ebs_beam_1 = M_BeamsDeforS1[:-2, 2*ind] + M_BeamsDeforS1[:-2, 2*ind+1]*(DB.h/2-DB.db1)
ets_beam_6 = M_BeamsDeforS6[:-2, 2*ind] + M_BeamsDeforS6[:-2, 2*ind+1]*(DB.dt2-DB.h/2)
ebs_beam_6 = M_BeamsDeforS6[:-2, 2*ind] + M_BeamsDeforS6[:-2, 2*ind+1]*(DB.h/2-DB.db1)
fi_1 = np.absolute(M_BeamsDeforS1[:-2, 2 * ind + 1])
M_beam_1 = np.absolute(M_BeamsForceS1[:-2, 2 * ind + 1])
fi_6 = np.absolute(M_BeamsDeforS6[:-2, 2 * ind + 1])
M_beam_6 = np.absolute(M_BeamsForceS6[:-2, 2 * ind + 1])
# es_beam_1 = np.maximum(np.absolute(ets_beam_1), np.absolute(ebs_beam_1))
# es_beam_6 = np.maximum(np.absolute(ets_beam_6), np.absolute(ebs_beam_6))
# print('es_beam_1', es_beam_1, 'es_beam_6', es_beam_6)
if np.max(ets_beam_1) <= ey and np.max(ets_beam_1) <= ey:
CD_1 = 0
My_1 = 0
PRA1 = DBPhl.phl1*np.max(fi_1)
else:
if np.max(ets_beam_1) >= ey:
ft = interpolate.interp1d(ets_beam_1, M_beam_1, kind='nearest')
My_1t = ft(ey)
else:
My_1t = float('inf')
if np.max(ebs_beam_1) >= ey:
fb = interpolate.interp1d(ebs_beam_1, M_beam_1, kind='nearest')
My_1b = fb(ey)
else:
My_1b = float('inf')
print('ind', ind, 'My_1t', My_1t, 'My_1b', My_1b)
My_1 = min(My_1t, My_1b)
f = interpolate.interp1d(M_beam_1, fi_1, kind='nearest')
fiy_1 = f(My_1)
CD_1 = np.max(fi_1) / fiy_1
PRA1 = DBPhl.phl1*np.max(fi_1)
if np.max(ets_beam_6) <= ey and np.max(ebs_beam_6) <= ey:
CD_6 = 0
My_6 = 0
PRA6 = DBPhl.phl2*np.max(fi_6)
else:
if np.max(ets_beam_6) >= ey:
ft = interpolate.interp1d(ets_beam_6, M_beam_6, kind='nearest')
My_6t = ft(ey)
else:
My_6t = float('inf')
if np.max(ebs_beam_6) >= ey:
fb = interpolate.interp1d(ebs_beam_6, M_beam_6, kind='nearest')
My_6b = fb(ey)
else:
My_6b = float('inf')
My_6 = min(My_6t, My_6b)
f = interpolate.interp1d(M_beam_6, fi_6, kind='nearest')
fiy_6 = f(My_6)
CD_6 = np.max(fi_6) / fiy_6
PRA6 = DBPhl.phl2*np.max(fi_6)
CD_Beams[ind, :] = [CD_1, CD_6]
PRA_Beams[ind, :] = [PRA1, PRA6]
My_Beams[ind, :] = [My_1, My_6]
# print('CD_Beams =', CD_Beams)
CD_Cols = np.zeros([num_cols, 2])
PRA_Cols = np.zeros([num_cols, 2])
My_Cols = np.zeros([num_cols, 2])
for (ind, DC, DCPhl) in zip(range(num_cols), DataColDesing, DataColPhl):
ets_col_1 = np.absolute(M_ColsDeforS1[:-2, 2*ind] + M_ColsDeforS1[:-2, 2*ind+1]*(DC.d-DC.h/2))
ebs_col_1 = | np.absolute(M_ColsDeforS1[:-2, 2*ind] + M_ColsDeforS1[:-2, 2*ind+1]*(DC.h/2-DC.d)) | numpy.absolute |
import numpy as np
import time
import copy
import math
import scipy
import logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)
# included modules
from pynumdiff.finite_difference import first_order as finite_difference
import pynumdiff.smooth_finite_difference
from pynumdiff.utils import utility as utility
from pynumdiff import smooth_finite_difference
__friedrichs_kernel__ = utility.__friedrichs_kernel__
__gaussian_kernel__ = utility.__gaussian_kernel__
KERNELS = {'friedrichs': __friedrichs_kernel__,
'gaussian': __gaussian_kernel__}
# optional packages
warned = False
try:
import pychebfun
except:
logging.info('Import Error\nCould not import pychebfun.\nInstall pychebfun (https://github.com/pychebfun/pychebfun/) to use chebfun derivatives.\n')
warned = True
try:
import pydmd.dmdc
except:
logging.info('Import Error\nCould not import pydmd.\nInstall pydmd (florisvb fork: https://github.com/florisvb/PyDMD) to use dmd derivatives.\n')
warned = True
try:
import cvxpy
except:
logging.info('Import Error\nCould not import cvxpy.\nInstall cvxpy (http://www.cvxpy.org/install/index.html) to use lineardiff.\nRecommended solver: MOSEK, free academic license available: https://www.mosek.com/products/academic-licenses/ \n')
warned = True
if warned == True:
logging.info('Import Error\nDespite these import errors, you can still use many of the methods without additional installations.\n')
####################################################################################################################################################
# Helper functions
####################################################################################################################################################
def __slide_function__(func, x, dt, params, window_size, step_size, kernel_name):
'''
Slide a smoothing derivative function across a timeseries with specified window size.
Inputs
------
func : (function) name of the function to slide
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) see func for requirements
window_size : (int) size of the sliding window
step_size : (int) step size for slide (e.g. 1 means slide by 1 step)
kernel_name : (string) name of the smoothing kernel
(e.g. 'friedrichs' or 'gaussian')
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
# get smoothing kernel
if not window_size%2: # then make odd
window_size += 1
ker = KERNELS[kernel_name](window_size)
x_hat_list = []
dxdt_hat_list = []
weight_list = []
for p in range(0, len(x), step_size):
# deal with end points
start = p- int((window_size-1)/2)
end = p+ int((window_size-1)/2)+1
ker_start = 0
ker_end = window_size
ker_middle = int((window_size-1)/2)
if start < 0:
ker_start = np.abs(start)
ker_middle = ker_middle - np.abs(start)
start = 0
if end > len(x):
ker_end = window_size - (end-len(x))
end = len(x)
# weights
w = ker[ker_start:ker_end]
w = w/np.sum(w)
# run the function on the window
_x = x[start:end]
x_hat, dxdt_hat = func(_x, dt, params, options={'weights': w})
# stack results
z_x_hat = np.zeros([len(x)])
z_x_hat[start:end] = x_hat
x_hat_list.append(z_x_hat)
z_dxdt_hat = np.zeros([len(x)])
z_dxdt_hat[start:end] = dxdt_hat
dxdt_hat_list.append(z_dxdt_hat)
z_weights = np.zeros([len(x)])
z_weights[start:end] = w
weight_list.append(z_weights)
# column norm weights
weights = np.vstack(weight_list)
for col in range(weights.shape[1]):
weights[:, col] = weights[:, col] / np.sum(weights[:, col])
# stack and weight x_hat and dxdt_hat
x_hat = np.vstack(x_hat_list)
dxdt_hat = np.vstack(dxdt_hat_list)
x_hat = np.sum(weights*x_hat, axis=0)
dxdt_hat = np.sum(weights*dxdt_hat, axis=0)
return x_hat, dxdt_hat
####################################################################################################################################################
# Savitzky-Golay filter
####################################################################################################################################################
def savgoldiff(x, dt, params, options={'smooth': True}):
'''
Use the Savitzky-Golay to smooth the data and calculate the first derivative.
Uses scipy.signal.savgol_filter
The Savitzky-Golay is very similar to the sliding polynomial fit, but slightly noisier, and much faster.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N, : (int) order of the polynomial
window_size, : (int) size of the sliding window, must be odd (if not, 1 is added)
smoothing_win] : (int) size of the window used for gaussian smoothing, a good default is = window_size, but smaller for high freq data
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
N, window_size, smoothing_win = params
if window_size > len(x)-1:
window_size = len(x)-1
if smoothing_win > len(x)-1:
smoothing_win = len(x)-1
if window_size <= N:
window_size = N+1
if not window_size%2: # then make odd
window_size += 1
dxdt_hat = scipy.signal.savgol_filter(x, window_size, N, deriv=1) / dt
if 1: #options['smooth']:
kernel = __gaussian_kernel__(smoothing_win)
dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(dxdt_hat, kernel, 1)
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_initial_condition(x, x_hat)
x_hat = x_hat + x0
return x_hat, dxdt_hat
####################################################################################################################################################
# Polynomial fitting
####################################################################################################################################################
def __polydiff__(x, dt, params, options={}):
'''
Fit polynomials to the timeseries, and differentiate the polynomials.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N] : (int) order of the polynomial
options : (dict) {'weights'} : (np.array, optional) weights applied to each point in calculating the polynomial fit. Defaults to 1s if missing.
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
if 'weights' in options.keys():
w = options['weights']
else:
w = np.ones_like(x)
if type(params) is list:
order = params[0]
else:
order = params
t = np.arange(1, len(x)+1)*dt
# polyfit
r = np.polyfit(t, x, order, w=w)[::-1]
# derivative coefficients
dr = copy.copy(r[1:])
for i in range(len(dr)):
dr[i] = dr[i]*(i+1)
## evaluate dxdt_hat
dxdt_hat = 0
for i in range(len(dr)):
dxdt_hat += dr[i]*t**(i)
## evaluate smooth x
x_hat = 0
for i in range(len(r)):
x_hat += r[i]*t**(i)
return x_hat, dxdt_hat
def polydiff(x, dt, params, options={'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'}):
'''
Fit polynomials to the timeseries, and differentiate the polynomials.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N, : (int) order of the polynomial
window_size], : (int) size of the sliding window (ignored if not sliding)
options : (dict) {'sliding' : (bool) slide the method (True or False)
'step_size' : (int) step size for sliding (smaller value is more accurate and more time consuming)
'kernel_name'} : (string) kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
if 'sliding' in options.keys() and options['sliding'] is True:
window_size = copy.copy(params[-1])
if window_size < params[0]*3:
window_size = params[0]*3+1
params[1] = window_size
return __slide_function__(__polydiff__, x, dt, params, window_size, options['step_size'], options['kernel_name'])
else:
return __polydiff__(x, dt, params, options={})
####################################################################################################################################################
# Chebychev
####################################################################################################################################################
def __chebydiff__(x, dt, params, options={}):
'''
Fit the timeseries with chebyshev polynomials, and differentiate this model.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N] : (int) order of the polynomial
options : (dict) {}
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
if type(params) is list:
N = params[0]
else:
N = params
mean = np.mean(x)
x = x - mean
def f(y):
t = np.linspace(-1, 1, len(x))
return np.interp(y, t, x)
# Chebychev polynomial
poly = pychebfun.chebfun(f, N=N, domain=[-1, 1])
ts = np.linspace(poly.domain()[0], poly.domain()[-1], len(x) )
x_hat = poly(ts) + mean
dxdt_hat = poly.differentiate()(ts)*(2/len(x))/dt
return x_hat, dxdt_hat
def chebydiff(x, dt, params, options={'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'}):
'''
Slide a smoothing derivative function across a timeseries with specified window size.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N, : (int) order of the polynomial
window_size], : (int) size of the sliding window (ignored if not sliding)
options : (dict) {'sliding' : (bool) slide the method (True or False)
'step_size' : (int) step size for sliding (smaller value is more accurate and more time consuming)
'kernel_name'} : (string) kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
if 'sliding' in options.keys() and options['sliding'] is True:
window_size = copy.copy(params[-1])
if window_size < params[0]*2:
window_size = params[0]*2+1
params[1] = window_size
return __slide_function__(__chebydiff__, x, dt, params, window_size, options['step_size'], options['kernel_name'])
else:
return __chebydiff__(x, dt, params, options={})
####################################################################################################################################################
# Dynamic Mode Decomposition
####################################################################################################################################################
def __dmddiff__(X, dt, params, options={}):
'''
Fit the timeseries with optimized dynamic mode decomposition model.
Inputs
------
X : (np.matrix of floats, NxM) hankel matrix of time delay embedded time series to differentiate.
N = time steps, M = time delay embedding
dt : (float) time step
Parameters
----------
params : (list) [delay_embedding, : (int) amount of delay_embedding
svd_rank] : (int) rank trunkcation
options : (dict) {}
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
delay_embedding = params[0]
svd_rank = params[1]
if len(params) == 2: # not sliding:
L = X.shape[0] + delay_embedding
else: # sliding
L = X.shape[0]
X = X.T
Ue = np.zeros_like(X)
# DMD
dmdc = pydmd.dmdc.DMDc(svd_rank=svd_rank, opt=True)
dmdc.fit(np.array(X), np.array(Ue[:,1:]), B=np.eye(X.shape[0]))
# DMD reconstruction
x0 = X[:, 0]
Xr = dmdc.forward_backward_reconstruct(x0, L, 0, overlap=2)
integral_x_hat = np.real(np.ravel( Xr[0,:] ))
integral_x_hat, x_hat = finite_difference(integral_x_hat, dt)
x_hat, dxdt_hat = finite_difference(x_hat, dt)
return x_hat, dxdt_hat
def dmddiff(x, dt, params, options={'sliding': True, 'step_size': 10, 'kernel_name': 'gaussian'}):
'''
Fit the timeseries with optimized dynamic mode decomposition model.
The DMD runs on the itnegral of x, to reduce effects of noise.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [delay_embedding, : (int) amount of delay_embedding
svd_rank, : (int) rank trunkcation
window_size], : (int) size of the sliding window (ignored if not sliding)
options : (dict) {'sliding' : (bool) slide the method (True or False)
'step_size' : (int) step size for sliding (smaller value is more accurate and more time consuming)
'kernel_name'} : (string) kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
delay_embedding = params[0]
svd_rank = params[1]
if delay_embedding >= len(x-1):
delay_embedding = len(x-1)
if delay_embedding < svd_rank:
delay_embedding = svd_rank
if 'sliding' in options.keys() and options['sliding'] is True:
window_size = copy.copy(params[2])
if window_size <= svd_rank + 1:
window_size = svd_rank + 1
# forward
integral_x = utility.integrate_dxdt_hat(x, dt)
X = utility.hankel_matrix(np.matrix(integral_x), delay_embedding).T
if 'sliding' in options.keys() and options['sliding'] is True:
x_hat_forward, dxdt_hat_forward = __slide_function__(__dmddiff__, X, dt, params, window_size, options['step_size'], options['kernel_name'])
else:
x_hat_forward, dxdt_hat_forward = __dmddiff__(X, dt, params, options={})
# backward
integral_x = utility.integrate_dxdt_hat(x[::-1], dt)
X = utility.hankel_matrix(np.matrix(integral_x), delay_embedding).T
if 'sliding' in options.keys() and options['sliding'] is True:
x_hat_backward, dxdt_hat_backward = __slide_function__(__dmddiff__, X, dt, params, window_size, options['step_size'], options['kernel_name'])
else:
x_hat_backward, dxdt_hat_backward = __dmddiff__(X, dt, params, options={})
# weights
w = np.arange(1,len(x_hat_forward)+1,1)[::-1]
w = np.pad(w, [0, len(x)-len(w)], mode='constant')
wfb = np.vstack((w, w[::-1]))
norm = np.sum(wfb, axis=0)
# orient and pad
x_hat_forward = np.pad(x_hat_forward, [0, len(x)-len(x_hat_forward)], mode='constant')
x_hat_backward = np.pad(x_hat_backward[::-1], [len(x)-len(x_hat_backward), 0], mode='constant')
# merge
x_hat = x_hat_forward*w/norm + x_hat_backward*w[::-1]/norm
x_hat, dxdt_hat = finite_difference(x_hat, dt)
return x_hat, dxdt_hat
####################################################################################################################################################
# Integral formulation
####################################################################################################################################################
class DeWhiten(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def dewhiten(self, m):
return (m+self.mean)*self.std
class Whiten(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def whiten(self, m):
return (m-self.mean)/self.std
def whiten_library(library):
white_library = []
dewhiten = []
whiten = []
for m in library:
m += 2*(np.random.random(len(m))-0.5)*1e-16 # in case we have a pure constant
std_m = np.std(m)
mean_m = np.mean(m)
w = Whiten(mean_m, std_m)
dw = DeWhiten(mean_m, std_m)
white_library.append(w.whiten(m))
whiten.append(w)
dewhiten.append(dw)
return white_library, whiten, dewhiten
def __solve_for_A_and_C_given_X_and_Xdot__(X, Xdot, num_integrations, dt, gammaC=1e-1, gammaA=1e-6, solver='MOSEK', A_known=None, epsilon=1e-6, rows_of_interest='all'):
assert type(X) == np.matrix
assert type(Xdot) == np.matrix
if rows_of_interest == 'all':
rows_of_interest = np.arange(0, X.shape[0])
# Set up the variables
A = cvxpy.Variable((X.shape[0], X.shape[0]))
C = cvxpy.Variable((X.shape[0], num_integrations))
# Integrate the integration constants
Csum = 0
t = np.arange(0, X.shape[1])*dt
for n in range(num_integrations):
C_subscript = n
t_exponent = num_integrations - n -1
den = math.factorial(t_exponent)
Cn = cvxpy.vstack((1/den*C[i, C_subscript]*t**t_exponent for i in range(X.shape[0])))
Csum = Csum + Cn
# Define the objective function
error = cvxpy.sum_squares(Xdot[rows_of_interest, :] - (A@X + Csum)[rows_of_interest, :])
C_regularization = gammaC*cvxpy.sum(cvxpy.abs(C))
A_regularization = gammaA*cvxpy.sum(cvxpy.abs(A))
obj = cvxpy.Minimize(error + C_regularization + A_regularization)
# constraints
constraints = []
if A_known is not None:
for i in range(A_known.shape[0]):
for j in range(A_known.shape[1]):
if not np.isnan(A_known[i,j]):
constraint_lo = A[i,j] >= A_known[i,j]-epsilon
constraint_hi = A[i,j] <= A_known[i,j]+epsilon
constraints.extend([constraint_lo, constraint_hi])
# Solve the problem
prob = cvxpy.Problem(obj, constraints)
prob.solve(solver=solver) # MOSEK does not take max_iters
A = np.matrix(A.value)
return A, np.matrix(C.value)
def __integrate_dxdt_hat_matrix__(dxdt_hat, dt):
assert type(dxdt_hat) == np.matrix
x = np.matrix(scipy.integrate.cumtrapz(dxdt_hat, axis=1))
first_value = x[:,0] - np.mean(dxdt_hat[:, 0:1], axis=1)
x = np.hstack((first_value, x))*dt
return x
def __lineardiff__(x, dt, params, options={}):
'''
Estimate the parameters for a system xdot = Ax, and use that to calculate the derivative
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N, : (int, >1) order (e.g. 2: velocity; 3: acceleration)
gamma] : (float) regularization term
options : (dict) {}
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
N, gamma = params
mean = np.mean(x)
x = x - mean
# Generate the matrix of integrals of x
X = [x]
for n in range(1,N):
X.append(utility.integrate_dxdt_hat(X[-1], dt) )
X = np.matrix(np.vstack(X[::-1]))
integral_Xdot = X
integral_X = __integrate_dxdt_hat_matrix__(X, dt)
# Solve for A and the integration constants
A, C = __solve_for_A_and_C_given_X_and_Xdot__(integral_X, integral_Xdot, N, dt, gamma)
# Add the integration constants
Csum = 0
t = | np.arange(0, X.shape[1]) | numpy.arange |
import numpy as np
import pytest
import autolamella.data
from autolamella.data.mocktypes import MockAdornedImage
from autolamella.conversions import (
realspace_to_pixel_coordinate,
pixel_to_realspace_coordinate,
realspace_to_relative_coordinate,
relative_to_realspace_coordinate,
pixel_to_relative_coordinate,
relative_to_pixel_coordinate,
)
@pytest.fixture
def image():
image_array = np.random.random((10, 10))
return MockAdornedImage(image_array, pixelsize_x=1e-6, pixelsize_y=1e-6)
def test_conversion_types():
pytest.importorskip("autoscript_sdb_microscope_client",
reason="Autoscript is not available.")
image = autolamella.data.adorned_image()
input_coord = [0, 0]
assert isinstance(realspace_to_pixel_coordinate(input_coord, image), list)
assert isinstance(pixel_to_realspace_coordinate(input_coord, image), list)
assert isinstance(realspace_to_relative_coordinate(input_coord, image), list)
assert isinstance(relative_to_realspace_coordinate(input_coord, image), list)
assert isinstance(pixel_to_relative_coordinate(input_coord, image), list)
assert isinstance(relative_to_pixel_coordinate(input_coord, image), list)
@pytest.mark.parametrize(
"coord, expected_output",
[
([0, 0], [5, 5]),
([1e-6, 0], [6, 5]),
([0, 1e-6], [5, 4]),
([1e-6, 1e-6], [6, 4]),
([-1e-6, -1e-6], [4, 6]),
([-1e-6, 1e-6], [4, 4]),
([1e-6, -1e-6], [6, 6]),
],
)
def test_realspace_to_pixel_coordinate(coord, image, expected_output):
result = realspace_to_pixel_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
@pytest.mark.parametrize(
"coord, expected_output",
[
([5, 5], [0, 0]),
([6, 5], [1e-6, 0]),
([5, 4], [0, 1e-6]),
([6, 4], [1e-6, 1e-6]),
([4, 6], [-1e-6, -1e-6]),
([4, 4], [-1e-6, 1e-6]),
([6, 6], [1e-6, -1e-6]),
],
)
def test_pixel_to_realspace_coordinate(coord, image, expected_output):
result = pixel_to_realspace_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
@pytest.mark.parametrize(
"coord, expected_output",
[
([0, 0], [0.5, 0.5]),
([5e-6, 0], [1.0, 0.5]),
([0, 5e-6], [0.5, 0.0]),
([-5e-6, 0], [0.0, 0.5]),
([0, -5e-6], [0.5, 1.0]),
([5e-6, 5e-6], [1.0, 0.0]),
([-5e-6, -5e-6], [0.0, 1.0]),
],
)
def test_realspace_to_relative_coordinate(coord, image, expected_output):
result = realspace_to_relative_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
@pytest.mark.parametrize(
"coord, expected_output",
[
([0.5, 0.5], [0, 0]),
([1.0, 0.5], [5e-6, 0]),
([0.5, 0.0], [0, 5e-6]),
([0.0, 0.5], [-5e-6, 0]),
([0.5, 1.0], [0, -5e-6]),
([1.0, 0.0], [5e-6, 5e-6]),
([0.0, 1.0], [-5e-6, -5e-6]),
],
)
def test_relative_to_realspace_coordinate(coord, image, expected_output):
result = relative_to_realspace_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
@pytest.mark.parametrize(
"coord, expected_output",
[
([0, 0], [0, 0]),
([5, 5], [0.5, 0.5]),
([10, 10], [1.0, 1.0]),
([0, 10], [0, 1.0]),
([10, 0], [1.0, 0]),
],
)
def test_pixel_to_relative_coordinate(coord, image, expected_output):
result = pixel_to_relative_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
@pytest.mark.parametrize(
"coord, expected_output",
[
([0, 0], [0, 0]),
([0.5, 0.5], [5, 5]),
([1.0, 1.0], [10, 10]),
([0, 1.0], [0, 10]),
([1.0, 0], [10, 0]),
],
)
def test_relative_to_pixel_coordinate(coord, image, expected_output):
result = relative_to_pixel_coordinate(coord, image)
assert np.allclose(np.array(result), | np.array(expected_output) | numpy.array |
import numpy as np
try:
import shapefile
except ImportError:
print('warning: shapefile package not installed')
try:
import pyproj
except ImportError:
print('warning: pyproj package not installed')
def shapefile_latlon(es_shapefile,thresh=500):
"""
Return latitude and longitude points from a GIS shapefile downloaded from
http://www.elkhornslough.org/gis/index.htm
Inputs:
es_shapefile: file path/prefix (for exmaple, if the coastline data files,
(cz.dbf, cz.shp, cz.shx) are in a directory
called CZ, this would be 'CZ/cz')
thresh: defines a threshold for gaps between points (in m), gaps more than
this distance apart separated by NaN values to make lines look
better when they are plotted
Output:
A dictionary with keys 'lon' and 'lat'
Required packages:
pyproj - https://pypi.python.org/pypi/pyproj
pyshp - https://pypi.python.org/pypi/pyshp
"""
"""
<NAME>, MLML
"""
sf = shapefile.Reader(es_shapefile)
lons = np.array(np.nan)
lats = np.array(np.nan)
for shape in sf.shapes():
points = np.asarray(shape.points)
x = points[:,0]
y = points[:,1]
dist = (np.diff(x)**2+np.diff(y)**2)**0.5
ii = | np.where(dist>thresh) | numpy.where |
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "mncosta(at)isr(dot)tecnico(dot)ulisboa(dot)pt"
import numpy as np
from sklearn.linear_model import HuberRegressor
import math
from random import randint
import cvxopt as cvx
from RiskPerception.OpticalFlow import getWeightFromOFDistance, calcDistance
from RiskPerception.Objects import getOFWeightFromObjects
from RiskPerception.CONFIG import CVX_SUPRESS_PRINT,\
HUBER_LOSS_EPSILON,\
RANSAC_MINIMUM_DATAPOINTS,\
RANSAC_NUMBER_ITERATIONS, \
RANSAC_MINIMUM_RATIO_INLIERS,\
RANSAC_MINIMUM_ERROR_ANGLE,\
RANSAC_RATIO_INCREASE_ETA,\
ITERATIVE_OBJECT_WEIGHTS_ITERATIONS,\
MAXIMUM_INLIERS_ANGLE,\
EXPONENTIAL_DECAY_NBR_WEIGHTS,\
EXPONENTIAL_DECAY_INITIAL,\
EXPONENTIAL_DECAY_TAU
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0)
def l2_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l2-norm optimization problem."""
# Non-Weighted optimization:
if w_i is None:
aux1 = -2 * ((np.sum(np.multiply(b_i, b_i))) * (
np.sum(np.multiply(a_i, a_i))) / float(
np.sum(np.multiply(a_i, b_i))) - (np.sum(np.multiply(a_i, b_i))))
aux2 = 2 * ((np.sum( | np.multiply(b_i, b_i) | numpy.multiply |
# -*- coding: utf-8 -*-
"""
pytests for resource handlers
"""
from datetime import datetime
import h5py
import numpy as np
import os
import pandas as pd
import pytest
from rex import TESTDATADIR
from rex.multi_file_resource import (MultiH5, MultiH5Path, MultiFileNSRDB,
MultiFileWTK)
from rex.renewable_resource import (NSRDB, WindResource)
from rex.utilities.exceptions import ResourceKeyError, ResourceRuntimeError
def NSRDB_res():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')
return NSRDB(path)
def NSRDB_2018():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb', 'nsrdb*2018.h5')
return MultiFileNSRDB(path)
def NSRDB_2018_list():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/nsrdb*2018.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileNSRDB(h5_files)
def WindResource_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
return WindResource(path)
def FiveMinWind_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk', 'wtk*m.h5')
return MultiFileWTK(path)
def FiveMinWind_list():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/wtk*m.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileWTK(h5_files)
def wind_group():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_wtk_2012_group.h5')
return WindResource(path, group='group')
def check_res(res_cls):
"""
Run test on len and shape methods
"""
time_index = res_cls.time_index
meta = res_cls.meta
res_shape = (len(time_index), len(meta))
assert len(res_cls) == len(time_index)
assert res_cls.shape == res_shape
assert np.all(np.isin(['meta', 'time_index'],
res_cls.datasets))
assert np.all(~np.isin(['meta', 'time_index', 'coordinates'],
res_cls.resource_datasets))
def check_attrs(res_cls, dset):
"""
Check dataset attributes extraction
"""
truth = res_cls.get_attrs(dset=dset)
test = res_cls.attrs[dset]
msg = "{} attributes do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_scale_factor(dset)
test = res_cls.scale_factors[dset]
msg = "{} scale factors do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_units(dset)
test = res_cls.units[dset]
msg = "{} units do not match!".format(dset)
assert truth == test, msg
def check_properties(res_cls, dset):
"""
Check dataset properties extraction
"""
shape, dtype, chunks = res_cls.get_dset_properties(dset)
test = res_cls.shapes[dset]
msg = "{} shape does not match!".format(dset)
assert shape == test, msg
test = res_cls.dtypes[dset]
msg = "{} dtype does not match!".format(dset)
assert dtype == test, msg
test = res_cls.chunks[dset]
msg = "{} chunks do not match!".format(dset)
assert chunks == test, msg
def check_meta(res_cls):
"""
Run tests on meta data
"""
with h5py.File(res_cls.h5_file, 'r') as f:
ds_name = 'meta'
if res_cls._group:
ds_name = '{}/{}'.format(res_cls._group, ds_name)
baseline = pd.DataFrame(f[ds_name][...])
sites = slice(0, len(baseline))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = len(baseline)
sites = slice(int(sites / 3), int(sites / 2))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = 5
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = sorted(np.random.choice(len(baseline), 5, replace=False))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
meta = res_cls['meta']
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values, meta[cols].values)
assert isinstance(meta, pd.DataFrame)
meta_shape = meta.shape
max_sites = int(meta_shape[0] * 0.8)
# single site
meta = res_cls['meta', max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (1, meta_shape[1])
# site slice
meta = res_cls['meta', :max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (max_sites, meta_shape[1])
# site list
sites = sorted(np.random.choice(meta_shape[0], max_sites, replace=False))
meta = res_cls['meta', sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (len(sites), meta_shape[1])
# select columns
meta = res_cls['meta', :, ['latitude', 'longitude']]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (meta_shape[0], 2)
lat_lon = res_cls.lat_lon
assert np.allclose(baseline[['latitude', 'longitude']].values, lat_lon)
def check_time_index(res_cls):
"""
Run tests on time_index
"""
time_index = res_cls['time_index']
time_shape = time_index.shape
assert isinstance(time_index, pd.DatetimeIndex)
assert str(time_index.tz) == 'UTC'
# single timestep
time_index = res_cls['time_index', 50]
assert isinstance(time_index, datetime)
# time slice
time_index = res_cls['time_index', 100:200]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (100,)
# list of timesteps
steps = sorted(np.random.choice(time_shape[0], 50, replace=False))
time_index = res_cls['time_index', steps]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (50,)
def check_dset(res_cls, ds_name):
"""
Run tests on dataset ds_name
"""
ds_shape = res_cls.shape
max_sites = int(ds_shape[1] * 0.8)
arr = res_cls[ds_name]
ds = res_cls[ds_name]
assert isinstance(ds, np.ndarray)
assert ds.shape == ds_shape
assert np.allclose(arr, ds)
# single site all time
ds = res_cls[ds_name, :, 1]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0],)
# single time all sites
ds = res_cls[ds_name, 10]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[1],)
assert np.allclose(arr[10], ds)
# single value
ds = res_cls[ds_name, 10, max_sites]
assert isinstance(ds, (np.integer, np.floating))
assert np.allclose(arr[10, max_sites], ds)
# site slice
sites = slice(int(max_sites / 2), max_sites)
ds = res_cls[ds_name, :, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0], sites.stop - sites.start)
assert np.allclose(arr[:, sites], ds)
# time slice
ds = res_cls[ds_name, 10:20]
assert isinstance(ds, np.ndarray)
assert ds.shape == (10, ds_shape[1])
assert np.allclose(arr[10:20], ds)
# slice in time and space
ds = res_cls[ds_name, 100:200, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (100, sites.stop - sites.start)
assert | np.allclose(arr[100:200, sites], ds) | numpy.allclose |
import unittest
import numpy
from cqcpy import test_utils
class TestTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-14
def test_framework(self):
self.assertTrue(True)
def test_Isym(self):
I = test_utils.make_random_Itot(5)
test = I - I.transpose((1, 0, 3, 2))
s1 = numpy.linalg.norm(test) < self.thresh
test = I - I.transpose((3, 2, 1, 0))
s2 = numpy.linalg.norm(test) < self.thresh
test = I - I.transpose((2, 3, 0, 1))
s3 = numpy.linalg.norm(test) < self.thresh
err = "Bad symmetry in full I"
self.assertTrue(s1 and s2 and s3, err)
def test_int_sym(self):
F, I = test_utils.make_random_integrals(2, 3)
test = F.oo - F.oo.transpose((1, 0))
sym = | numpy.linalg.norm(test) | numpy.linalg.norm |
# code to calculate fundamental stellar parameters and distances using
# a "direct method", i.e. adopting a fixed reddening map and bolometric
# corrections
import astropy.units as units
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import RegularGridInterpolator
import pdb
def distance_likelihood(plx, plxe, ds):
"""Distance Likelihood
Likelihood of distance given measured parallax
Args:
plx (float): parallax
plxe (float): parallax uncertainty
ds (array): distance in parsecs
Returns:
array: likelihood (not log-likelihood)
"""
lh = ((1.0/(np.sqrt(2.0*np.pi)*plxe))
* np.exp( (-1.0/(2.0*plxe**2))*(plx - 1.0/ds)**2))
return lh
def distance_prior(ds, L):
"""Distance prior
Exponetial decreasing vol density prior
Returns:
array: prior probability (not log-prior)
"""
prior = ds**2/(2.0*L**3.0)*np.exp(-ds/L)
return prior
def stparas(input, dnumodel=-99, bcmodel=-99, dustmodel=-99, dnucor=-99,
useav=-99, plot=0, band='k', ext=-99):
# IAU XXIX Resolution, Mamajek et al. (2015)
r_sun = 6.957e10
gconst = 6.67408e-8
gm = 1.3271244e26
m_sun = gm/gconst
rho_sun = m_sun/(4./3.*np.pi*r_sun**3)
g_sun = gconst*m_sun/r_sun**2.
# solar constants
numaxsun = 3090.
dnusun = 135.1
teffsun = 5777.
Msun = 4.74 # NB this is fixed to MESA BCs!
# assumed uncertainty in bolometric corrections
err_bc=0.02
# assumed uncertainty in extinction
err_ext=0.02
# object containing output values
out = resdata()
## extinction coefficients
extfactors=ext
if (len(band) == 4):
bd=band[0:1]
else:
bd=band[0:2]
######################################
# case 1: input is parallax + colors #
######################################
#with h5py.File(bcmodel,'r') as h5:
teffgrid = bcmodel['teffgrid'][:]
logggrid = bcmodel['logggrid'][:]
fehgrid = bcmodel['fehgrid'][:]
avgrid = bcmodel['avgrid'][:]
bc_band = bcmodel['bc_'+bd][:]
if ((input.plx > 0.)):
# load up bolometric correction grid
# only K-band for now
points = (teffgrid,logggrid,fehgrid,avgrid)
values = bc_band
interp = RegularGridInterpolator(points,values)
### Monte Carlo starts here
# number of samples
nsample = int(1e5)
# length scale for exp decreasing vol density prior in pc
L = 1350.0
# maximum distance to sample (in pc)
maxdis = 1e5
# get a rough maximum and minimum distance
tempdis = 1.0/input.plx
tempdise = input.plxe/input.plx**2
maxds = tempdis + 5.0*tempdise
minds = tempdis - 5.0*tempdise
ds = np.arange(1.0, maxdis, 1.0)
lh = distance_likelihood(input.plx, input.plxe, ds)
prior = distance_prior(ds, L)
dis = lh*prior
dis2 = dis/np.sum(dis)
norm = dis2/np.max(dis2)
# Deal with negative and positive parallaxes differently:
if tempdis > 0:
# Determine maxds based on posterior:
um = np.where((ds > tempdis) & (norm < 0.001))[0]
# Determine minds just like maxds:
umin = np.where((ds < tempdis) & (norm < 0.001))[0]
else:
# Determine maxds based on posterior, taking argmax
# instead of tempdis which is wrong:
um = np.where((ds > np.argmax(norm)) & (norm < 0.001))[0]
# Determine minds just like maxds:
umin = np.where((ds < np.argmax(norm)) & (norm < 0.001))[0]
if (len(um) > 0):
maxds = np.min(ds[um])
else:
maxds = 1e5
if (len(umin) > 0):
minds = np.max(ds[umin])
else:
minds = 1.0
print('using max distance:', maxds)
print('using min distance:', minds)
ds = np.linspace(minds,maxds,nsample)
lh = distance_likelihood(input.plx, input.plxe, ds)
prior = distance_prior(ds, L)
dis = lh*prior
dis2=dis/np.sum(dis)
# sample distances following the discrete distance posterior
np.random.seed(seed=10)
dsamp = np.random.choice(ds, p=dis2, size=nsample)
# interpolate dustmodel dataframe to determine values of reddening.
if (isinstance(dustmodel,pd.DataFrame) == False):
ebvs = np.zeros(len(dsamp))
avs = ebvs
else:
xp = np.concatenate(
([0.0], np.array(dustmodel.columns[2:].str[3:], dtype='float'))
)
fp = np.concatenate(([0.0],np.array(dustmodel.iloc[0][2:])))
ebvs=np.interp(x=dsamp, xp=xp, fp=fp)
avs = extfactors['av']*ebvs
# NB the next line means that useav is not actually working yet
if (useav > -99):
ebvs = np.zeros(len(dsamp)) + useav
ext = extfactors['a'+bd]*ebvs
map = input.mag
mape = input.mage
np.random.seed(seed=12)
map_samp = map + np.random.randn(nsample)*mape
# NB no extinction correction here yet since it is either:
# - already taken into account in ATLAS BCs below
# - corrected for M dwarfs further below
absmag = -5.0*np.log10(dsamp) + map_samp + 5.
# assume solar metallicity if no input feh is provided
if (input.feh == -99.0):
feh = 0.0
else:
feh = input.feh
# if no Teff is provided, use color-Teff relations:
### A and earlier: Flower et al.
### FGK dwarfs: Casagrande et al. 2010
### M dwarfs: Mann et al. 2015
if (input.teff == -99.0):
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
bvmag = ((input.bmag-np.median(ebvs*extfactors['ab']))
- (input.vmag-np.median(ebvs*extfactors['av'])))
print(bvmag)
col=((input.bmag-ebvs*extfactors['ab'])-(input.vmag-ebvs*extfactors['av']))
#pdb.set_trace()
if ((bvmag >= 0.18) & (bvmag <= 1.29)):
input.teff=casagrande_bv(bvmag,feh)
print('using Casagrande B-V for Teff')
if (bvmag < 0.19):
input.teff=torres_bv(bvmag,feh)
print('using Flower/Torres B-V for Teff')
print(input.teff)
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
bvtmag = ((input.btmag-np.median(ebvs*extfactors['abt']))
- (input.vtmag-np.median(ebvs*extfactors['avt'])))
if ((bvmag >= 0.19) & (bvmag <= 1.49)):
input.teff = casagrande_bvt(bvtmag, feh)
print('using Casagrande Bt-Vt for Teff')
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
jkmag = ((input.jmag-np.median(ebvs*extfactors['aj']))
- (input.kmag-np.median(ebvs*extfactors['ak'])))
if ((jkmag >= 0.07) & (jkmag <= 0.8)):
input.teff=casagrande_jk(jkmag,feh)
print('using Casagrande J-K for Teff')
if (jkmag > 0.8):
input.teff=mist_jk(jkmag)
print('using MIST J-K for Teff')
input.teffe = input.teff*0.02
# M dwarfs
if ((input.jmag > -99.0) & (input.bpmag > -99.0) & (input.hmag > -99.0)):
if (input.bpmag-input.rpmag > 1.5) & (np.median(absmag - ext) > 3.):
bprpmag=input.bpmag-input.rpmag
jhmag=((input.jmag-np.median(ebvs*extfactors['aj']))
- (input.hmag-np.median(ebvs*extfactors['ah'])))
input.teff = mann_bprpjh(bprpmag, jhmag)
input.teffe = np.sqrt(49.**2 + 60.**2)
print('using Mann Bp-Rp,J-H for Teff')
if ((input.jmag > -99.0) & (input.vmag > -99.0) & (input.hmag > -99.0)):
if (input.vmag-input.jmag > 2.7) & (np.median(absmag - ext) > 3.):
vjmag=((input.vmag-np.median(ebvs*extfactors['av']))
- (input.jmag-np.median(ebvs*extfactors['aj'])))
jhmag=((input.jmag-np.median(ebvs*extfactors['aj']))
- (input.hmag-np.median(ebvs*extfactors['ah'])))
input.teff = mann_vjh(vjmag, jhmag)
input.teffe = np.sqrt(48.**2 + 60.**2)
print('using Mann V-J,J-H for Teff')
if ((input.jmag > -99.0) & (input.rmag > -99.0) & (input.hmag > -99.0)):
if (input.rmag-input.jmag > 2.0) & (np.median(absmag - ext) > 3.):
rjmag=((input.rmag-np.median(ebvs*extfactors['ar']))
- (input.jmag-np.median(ebvs*extfactors['aj'])))
jhmag=((input.jmag-np.median(ebvs*extfactors['aj']))
- (input.hmag-np.median(ebvs*extfactors['ah'])))
input.teff = mann_rjh(rjmag, jhmag)
input.teffe = np.sqrt(52.**2 + 60.**2)
print('using Mann r-J,J-H for Teff')
if (input.teff == -99.0):
print('no valid Teff provided or calculated, skipping')
return out
np.random.seed(seed=11)
teffsamp = input.teff + np.random.randn(nsample)*input.teffe
# hack to avoid crazy Teff samples
teffsamp[teffsamp < 1000.0] = 1000.0
# if no logg is provided, take guess from absolute mag-logg
# fit to solar-metallicity MIST isochrones NB these coeffs are
# dodgy in Mv, but pretty good in Mk
if (input.logg == -99.):
if ((band == 'vmag') | (band == 'vtmag')):
fitv = np.poly1d(
[ 0.00255731, -0.07991211, 0.85140418, 1.82465197]
)
input.logg = fitv(np.median(absmag-ext))
print('no input logg provided, guessing (using Mv):', input.logg)
#pdb.set_trace()
# should really be done filter by filter with a dictionary; TODO
else:
fitk = np.poly1d([-0.01234736, 0.36684517, 3.1477089 ])
input.logg = fitk(np.median(absmag-ext))
msg = 'no input logg provided, guessing (using Mk): {}'.format(
input.logg
)
print(msg)
# ATLAS BCs are inaccurate for M dwarfs; use Mann et al. 2015
# Mks-R relation instead
if ((input.teff < 4100.) & (np.median(absmag-ext) > 3.)):
sampMabs = absmag - ext
if (input.feh > -99.):
rad = ((1.9305 - 0.3466*(absmag-ext) + 0.01647*(absmag-ext)**2)
* (1.+0.04458*input.feh))
else:
rad = 1.9515 - 0.3520*(absmag - ext) + 0.01680*(absmag - ext)**2
# add 3% scatter in Mks-R relation
rad = rad + np.random.randn(len(rad))*np.median(rad)*0.03
lum = rad**2 * (teffsamp/teffsun)**4
# Also compute M-dwarf masses:
sampMabsZP = sampMabs - 7.5 #7.5 is the ZP defined in Mann et al. (2019)
if (input.feh > -99.):
mass = (1. - 0.0035*input.feh) * 10.**(-0.647 - 0.207 * (sampMabsZP)
- 6.53*10**(-4) * (sampMabsZP)**2
+ 7.13*10**(-3) * (sampMabsZP)**3
+ 1.84*10**(-4) * (sampMabsZP)**4
- 1.60*10**(-4) * (sampMabsZP)**5)
else:
mass = 10.**(-0.647 - 0.207 * (sampMabsZP)
- 6.53*10**(-4) * (sampMabsZP)**2
+ 7.13*10**(-3) * (sampMabsZP)**3
+ 1.84*10**(-4) * (sampMabsZP)**4
- 1.60*10**(-4) * (sampMabsZP)**5)
# Add 4% scatter in Mks-M relation
mass = mass + np.random.randn(len(mass))*np.median(mass)*0.04
# Now compute density with the mass and radius relations given here:
rho = mass/rad**3
# Output mass and densities:
out.mass,out.massep,out.massem = getstat(mass)
out.rho,out.rhoep,out.rhoem = getstat(rho)
# for everything else, interpolate ATLAS BCs
else:
if (input.teff < np.min(teffgrid)):
return out
if (input.teff > np.max(teffgrid)):
return out
if ((input.logg > -99.0) & (input.logg < np.min(logggrid))):
return out
if ((input.logg > -99.0) & (input.logg > np.max(logggrid))):
return out
if ((input.feh > -99.0) & (input.feh < np.min(fehgrid))):
return out
if ((input.feh > -99.0) & (input.feh > np.max(fehgrid))):
return out
fix = np.where(avs > np.max(avgrid))[0]
avs[fix] = np.max(avgrid)
fix =np.where(avs < np.min(avgrid))[0]
avs[fix] = | np.min(avgrid) | numpy.min |
"""
Copyright (c) 2016-2020 The scikit-optimize developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/halton.py
"""
import numpy as np
from entmoot.sampler.base import InitialPointGenerator
from entmoot.space.space import Space
from sklearn.utils import check_random_state
class Halton(InitialPointGenerator):
"""Creates `Halton` sequence samples.
In statistics, Halton sequences are sequences used to generate
points in space for numerical methods such as Monte Carlo simulations.
Although these sequences are deterministic, they are of low discrepancy,
that is, appear to be random
for many purposes. They were first introduced in 1960 and are an example
of a quasi-random number sequence. They generalise the one-dimensional
van der Corput sequences.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Parameters
----------
min_skip : int
minimum skipped seed number. When `min_skip != max_skip`
a random number is picked.
max_skip : int
maximum skipped seed number. When `min_skip != max_skip`
a random number is picked.
primes : tuple, default=None
The (non-)prime base to calculate values along each axis. If
empty or None, growing prime values starting from 2 will be used.
"""
def __init__(self, min_skip=-1, max_skip=-1, primes=None):
self.primes = primes
self.min_skip = min_skip
self.max_skip = max_skip
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Halton set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Halton sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
Halton set
"""
rng = check_random_state(random_state)
if self.primes is None:
primes = []
else:
primes = list(self.primes)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if len(primes) < n_dim:
prime_order = 10 * n_dim
while len(primes) < n_dim:
primes = _create_primes(prime_order)
prime_order *= 2
primes = primes[:n_dim]
assert len(primes) == n_dim, "not enough primes"
if self.min_skip < 0 and self.max_skip < 0:
skip = max(primes)
elif self.min_skip == self.max_skip:
skip = self.min_skip
elif self.min_skip < 0 or self.max_skip < 0:
skip = np.max(self.min_skip, self.max_skip)
else:
skip = rng.randint(self.min_skip, self.max_skip)
out = np.empty((n_dim, n_samples))
indices = [idx + skip for idx in range(n_samples)]
for dim_ in range(n_dim):
out[dim_] = _van_der_corput_samples(
indices, number_base=primes[dim_])
out = space.inverse_transform( | np.transpose(out) | numpy.transpose |
import json
import glob
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from sklearn import svm, tree
from sklearn.metrics import precision_recall_fscore_support
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize, scale
from scipy.cluster.vq import whiten
from sklearn.manifold import TSNE
import re
import os
from transformers import BertTokenizer, BertForSequenceClassification, BertConfig
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
import torch
import math
import time
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.model_selection import StratifiedKFold
def get_optimizers(model, learning_rate, adam_epsilon, weight_decay, num_training_steps):
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0,
num_training_steps=num_training_steps)
return optimizer, scheduler
def align_tokenized_features(tokenizer, text_list, visual_features, audio_features):
max_seq_len = max([len(tokenizer.tokenize(t)) for t in text_list])
n_samples = len(text_list)
new_visual = np.zeros((n_samples, max_seq_len, visual_features[0].shape[-1]))
new_audio = np.zeros((n_samples, max_seq_len, audio_features[0].shape[-1]))
n = 0 # iterator of samples
for t in text_list:
# try rebuild original sentence with space spliting from the tokenized list
# t_len = len(t.split())
i = 0 # iterator of tokens, new_visual and new_audio
j = 0 # iterator of visual_features, audio_features
tk = tokenizer.tokenize(t)
old_len = len(visual_features[n])
check = []
while i < len(tk):
if j >= old_len:
i_end = i
while i_end < len(tk):
new_visual[n][i_end][:] += visual_features[n][j - 1][:]
new_audio[n][i_end][:] += audio_features[n][j - 1][:]
i_end += 1
break
check.append((tk[i], text_list[n].split()[j]))
if tk[i] in ['\'', ':']:
if tk[i-1] == 'so' and tk[i+1] == 'on' or tk[i-1] == 'up' and tk[i+1] == 'to':
new_visual[n][i][:] += visual_features[n][j-1][:]
new_audio[n][i][:] += audio_features[n][j-1][:]
i += 1
else:
new_visual[n][i][:] += visual_features[n][j-1][:]
new_visual[n][i+1][:] += visual_features[n][j-1][:]
new_audio[n][i][:] += audio_features[n][j - 1][:]
new_audio[n][i + 1][:] += audio_features[n][j - 1][:]
i += 2
elif tk[i] == '-':
if i == 0 or tk[i+1] in ['and','again','because'] or (tk[i-1] == 'oil' and tk[i+1] == 'oil') or (tk[i-1] == 'oil' and tk[i+1] == 'like'):
new_visual[n][i][:] = visual_features[n][j][:]
new_audio[n][i][:] = audio_features[n][j][:]
i += 1
j += 1
else:
new_visual[n][i][:] += visual_features[n][j - 1][:]
new_visual[n][i + 1][:] += visual_features[n][j - 1][:]
new_audio[n][i][:] += audio_features[n][j - 1][:]
new_audio[n][i + 1][:] += audio_features[n][j - 1][:]
i += 2
elif tk[i] == '/':
if re.match('[0-9]+', tk[i-1]) or tk[i-1] in ['medium', 'afternoon', 'either']:
new_visual[n][i][:] += visual_features[n][j - 1][:]
new_visual[n][i + 1][:] += visual_features[n][j - 1][:]
new_audio[n][i][:] += audio_features[n][j - 1][:]
new_audio[n][i + 1][:] += audio_features[n][j - 1][:]
i += 2
else:
new_visual[n][i][:] = visual_features[n][j][:]
new_audio[n][i][:] = audio_features[n][j][:]
i += 1
j += 1
elif tk[i] == '+':
if tk[i+1] in ['anti', 'fields']:
new_visual[n][i][:] = visual_features[n][j][:]
new_audio[n][i][:] = audio_features[n][j][:]
i += 1
j += 1
else:
new_visual[n][i][:] += visual_features[n][j - 1][:]
new_audio[n][i][:] += audio_features[n][j - 1][:]
i += 1
elif tk[i][0] == '#' or tk[i] == '%':
new_visual[n][i][:] += visual_features[n][j - 1][:]
new_audio[n][i][:] += audio_features[n][j - 1][:]
i += 1
elif tk[i] == '$':
new_visual[n][i][:] += visual_features[n][j][:]
new_visual[n][i+1][:] += visual_features[n][j][:]
new_audio[n][i][:] += audio_features[n][j][:]
new_audio[n][i + 1][:] += audio_features[n][j][:]
i += 2
j += 1
else:
new_visual[n][i][:] = visual_features[n][j][:]
new_audio[n][i][:] = audio_features[n][j][:]
i += 1
j += 1
n += 1
# print(j, old_len)
return new_visual, new_audio
def seq2array(list_in, max_len):
n_samples = len(list_in)
feature_dim = list_in[0].shape[-1]
print(feature_dim)
out_feat = np.zeros((n_samples, max_len, feature_dim))
token_ids = np.zeros((n_samples, max_len))
attention_mask = np.zeros((n_samples, max_len))
for i in range(n_samples):
for j in range(min(max_len, len(list_in[i]))):
out_feat[i][j][:] += list_in[i][j][:]
attention_mask[i][j] = 1
# print(out_feat)
return out_feat, token_ids, attention_mask
def select_balanced_id(ids, labels):
labels_used = labels[ids]
neg, neu, pos = [], [], []
for i in range(len(labels_used)):
if labels_used[i] == 0:
neg.append(ids[i])
elif labels_used[i] == 1:
neu.append(ids[i])
else:
pos.append(ids[i])
neg, neu, pos = np.array(neg), np.array(neu), np.array(pos)
print(len(neg), len(neu), len(pos))
n_sample = len(neg)
perm = np.random.permutation(len(neg))
neg = neg[perm]
perm = np.random.permutation(len(neu))
neu = neu[perm][:n_sample]
perm = np.random.permutation(len(pos))
pos = pos[perm][:n_sample]
concat_all = | np.concatenate([neg,neu,pos], axis=-1) | numpy.concatenate |
#! /usr/bin/env python
#
"""
This code uses matplotlib and numpy to produce a window within which a FITS
image can be displayed. The reason for having this and not using the usual
packages already in existence is that I will want specific functions on the
image for data reduction.
Usage:
fits_image_display.py imagename.fits
or just
fits_image_display.py
In the first case the image name given is loaded (if possible) and displayed.
In the second case the widget comes up and one can read in an image.
Note that if the image is of dimension larger than 2 then the first "plane"
is used. There is no mechanism here for using other planes.
"""
import math
import sys
import tkinter as Tk
import tkinter.ttk
import tkinter.filedialog
import tkinter.simpledialog
import tkinter.messagebox
import numpy
from astropy.io import fits
# import matplotlib
# import matplotlib.lines as mlines
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# from matplotlib.colors import LogNorm
import matplotlib.pyplot as pyplot
import general_utilities
import mpfitexpr
class ImageGUI(Tk.Frame):
"""
This class brings up a separate image display window.
Parameters
----------
Tk.Frame: The base class of the object, matching a Tkinter root or
Toplevel variable
Returns
-------
The class variable is returned, effectively.
"""
# The following section of code concerns the image display functionality.
#
def __init__(self, parent=None, **args):
self.image = None
self.imagefilename = None
self.zscale_flag = False
self.root = None
self.indpi = 100
self.zoom = [1, 0, 0]
self.xposition = None
self.yposition = None
self.angle = None
self.colourBarVariable = None
self.showImageAxes = None
self.imagePosLabel = None
self.imagePosLabelText = None
self.mplfig1 = None
self.mplsubplot1 = None
self.canvas1 = None
self.plotFrame = None
self.imagename = None
self.imagexpos = None
self.imageypos = None
self.transvalues = None
self.p1 = None
self.p2 = None
self.p3 = None
self.yscaleType = None
self.imageHistogramLabel = None
self.imageHistogramLabelText = None
self.rangeType = None
self.scaleType = None
self.minField = None
self.maxField = None
self.zsminField = None
self.zsmaxField = None
self.bin_field = None
self.colourScheme = None
self.colourLabels = None
self.barLabel = None
self.colourBar = None
self.colouBarVariable = None
if parent is not None:
# initialize the window and make the plot area.
Tk.Frame.__init__(self, parent, args)
self.root = parent
def make_image_window(self):
"""
Make the main image display window.
Returns
-------
None.
"""
# make the window
BGCOL = '#F8F8FF'
if self.root is not None:
imagewindow = self.root
else:
imagewindow = Tk.Toplevel()
imagewindow.config(bg=BGCOL)
self.showImageAxes = True
imageLabelFrame = Tk.Frame(imagewindow)
imageLabelFrame.pack(side=Tk.TOP)
self.imagePosLabelText = Tk.StringVar()
self.imagePosLabel = Tk.Label(imageLabelFrame,
textvariable=self.imagePosLabelText,
anchor=Tk.N, width=70)
self.imagePosLabel.pack(side=Tk.LEFT)
self.imagePosLabelText.set("Position: Value:")
controlFrame = Tk.Frame(imagewindow)
controlFrame.pack(side=Tk.LEFT, fill=Tk.Y, expand=1)
self.plotFrame = Tk.Frame(imagewindow)
self.plotFrame.pack()
self.mplfig1 = Figure(figsize=(6, 6), dpi=self.indpi)
self.mplsubplot1 = self.mplfig1.add_subplot(1, 1, 1)
self.canvas1 = FigureCanvasTkAgg(self.mplfig1, master=self.plotFrame)
self.canvas1.draw()
self.canvas1.get_tk_widget().pack(side=Tk.LEFT, fill=Tk.BOTH,
expand=Tk.YES)
self.canvas1.mpl_connect("motion_notify_event", self.setPlotPosition)
self.canvas1.mpl_connect("button_press_event", self.buttonPress)
self.canvas1.mpl_connect("button_release_event", self.buttonRelease)
self.canvas1.mpl_connect("key_press_event", self.keyPress)
newframe = Tk.Frame(controlFrame)
newframe.pack(side=Tk.TOP)
lb = Tk.Label(newframe, text='Colour Scheme')
lb.pack(side=Tk.TOP)
self.colourScheme = tkinter.ttk.Combobox(newframe, width=15)
self.colourLabels = ['jet', 'rainbow', 'gist_ncar', 'viridis',
'gnuplot', 'gist_gray', 'nipy_spectral']
self.colourScheme['values'] = self.colourLabels
self.colourScheme.pack()
self.colourScheme.current(0)
#
lb = Tk.Label(newframe, text='Show Colour Bar')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.colourBar = Tk.IntVar()
t1 = Tk.Radiobutton(selectFrame, text='vertical',
variable=self.colourBar, value=0,
command=self.displayImage)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(selectFrame, text='horizontal',
variable=self.colourBar, value=1,
command=self.displayImage)
t2.pack(side=Tk.LEFT)
t3 = Tk.Radiobutton(selectFrame, text='none', variable=self.colourBar,
value=2, command=self.displayImage)
t3.pack(side=Tk.LEFT)
self.colourBar.set(2)
lb = Tk.Label(newframe, text='Colour Bar Label')
lb.pack()
self.barLabel = Tk.Entry(newframe, width=30)
self.barLabel.pack()
rangeframe = Tk.Frame(newframe)
rangeframe.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Display Minimum')
lb.pack(side=Tk.TOP)
self.minField = Tk.Entry(fr1, width=10)
self.minField.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
Tk.Label(fr1, text=' ').pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Display Maximum')
lb.pack(side=Tk.TOP)
self.maxField = Tk.Entry(fr1, width=10)
self.maxField.pack()
zmin = numpy.min(self.image)
zmax = numpy.max(self.image)
general_utilities.put_value(zmin, self.minField)
general_utilities.put_value(zmax, self.maxField)
rangeframe = Tk.Frame(newframe)
rangeframe.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Zscale Minimum')
lb.pack(side=Tk.TOP)
self.zsminField = Tk.Entry(fr1, width=10)
self.zsminField.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
Tk.Label(fr1, text=' ').pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Zscale Maximum')
lb.pack(side=Tk.TOP)
self.zsmaxField = Tk.Entry(fr1, width=10)
self.zsmaxField.pack()
try:
zmin1, zmax1 = self.get_limits(self.image)
ratio = abs(zmax1/zmin1)
if ratio < 1.2:
if zmin1 < 0.:
zmax1 = zmin1
zmin1 = 3.*zmin1
else:
zmax1 = 3.*zmin1
except:
zmin1 = 0.
zmax1 = 1.
general_utilities.put_value(zmin1, self.zsminField)
general_utilities.put_value(zmax1, self.zsmaxField)
lb = Tk.Label(newframe, text='Image Scaling')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.scaleType = Tk.IntVar()
t1 = Tk.Radiobutton(selectFrame, text='linear',
variable=self.scaleType, value=0,
command=self.displayImage)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(selectFrame, text='log', variable=self.scaleType,
value=1, command=self.displayImage)
t2.pack(side=Tk.LEFT)
t3 = Tk.Radiobutton(selectFrame, text='sqrt',
variable=self.scaleType, value=2,
command=self.displayImage)
t3.pack(side=Tk.LEFT)
self.scaleType.set(0)
lb = Tk.Label(newframe, text='Image Range')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.rangeType = Tk.IntVar()
t1 = Tk.Radiobutton(
selectFrame, text='full', variable=self.rangeType,
value=0, command=self.toggle_zscale)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(
selectFrame, text='zscale', variable=self.rangeType,
value=1, command=self.toggle_zscale)
t2.pack(side=Tk.LEFT)
self.rangeType.set(0)
buttonFrame = Tk.Frame(controlFrame)
buttonFrame.pack(side=Tk.TOP)
subFrame = Tk.Frame(buttonFrame)
subFrame.pack(side=Tk.TOP)
side1 = Tk.Frame(subFrame)
side1.pack(side=Tk.LEFT)
b1 = Tk.Button(side1, text='Toggle Axes',
command=self.toggleAxes)
b1.pack(side=Tk.TOP)
b1 = Tk.Button(side1, text='Auto Scale',
command=self.imageAutoscale)
b1.pack(side=Tk.TOP)
side2 = Tk.Frame(subFrame)
side2.pack(side=Tk.LEFT)
b1 = Tk.Button(side2, text='Image Histogram',
command=self.imageHistogram)
b1.pack(side=Tk.TOP)
b1 = Tk.Button(side2, text='Set Zoom',
command=self.set_zoom)
b1.pack(side=Tk.TOP)
bin_frame = Tk.Frame(buttonFrame)
bin_frame.pack(side=Tk.TOP)
label = Tk.Label(bin_frame, text='bin size/number')
label.grid(row=0, column=0)
self.bin_field = Tk.Entry(bin_frame, width=10)
self.bin_field.grid(row=0, column=1)
self.bin_field.insert(0, '100')
label = Tk.Label(
bin_frame, text='Positive for bin number, negative for \nbin size')
label.grid(row=1, column=0, columnspan=2)
label = Tk.Label(buttonFrame, text='Histogram y scaling:')
label.pack()
yscaleFrame = Tk.Frame(buttonFrame)
yscaleFrame.pack(side=Tk.TOP)
self.yscaleType = Tk.IntVar()
t1 = Tk.Radiobutton(
yscaleFrame, text='linear', variable=self.yscaleType,
value=0)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(
yscaleFrame, text='hybrid log', variable=self.yscaleType,
value=1)
t2.pack(side=Tk.LEFT)
self.rangeType.set(0)
b1 = Tk.Button(buttonFrame, text='Save Image as FITS',
command=lambda: general_utilities.save_fits(self.image))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Save as PNG',
command=lambda: general_utilities.save_png_figure(
self.mplfig1))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Save as PS',
command=lambda: general_utilities.save_ps_figure(
self.mplfig1))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Redisplay',
command=self.displayImage)
b1.pack(side=Tk.TOP)
# b1 = Tk.Button(buttonFrame, text='Close',
# command=lambda: self.imageExit(imagewindow))
# b1.pack(side=Tk.TOP)
self.displayImage()
def zoom_corner(self, sh1, zoom, x1, y1):
"""
Given the zoom parameters find the array lower left corner.
Parameters
----------
sh1: A two-element list of the shape of the input image, values being
integers
zoom: A positive integer zoom function to be applied to the image
x1: The x pixel value for the centre of the field to display
(float or integer)
y1: The y pixel value for the centre of the field to display
(float or integer)
Returns
-------
xmin: An integer value for the lower left corner x pixel index
ymin: An integer value for the lower left corner y pixel index
"""
nxpixel = sh1[1] // zoom
nypixel = sh1[0] // zoom
xmin = x1 - nxpixel/2.
ymin = y1 - nypixel/2.
xmin = int(xmin)
ymin = int(ymin)
if xmin < 0:
xmin = 0
if ymin < 0:
ymin = 0
xmax = xmin + nxpixel
ymax = ymin + nypixel
if ymax > sh1[0]:
ymax = sh1[0]
ymin = ymax - nypixel
if xmax > sh1[1]:
xmax = sh1[1]
xmin = xmax - nxpixel
return xmin, ymin
def set_zoom(self):
"""
Bring up a window to set the zoom parameter.
No values are passed to this routine or returned from it. The
self.zoom variable is changed by the routine.
"""
sh1 = self.image.shape
npixel = min(sh1[0], sh1[1])
zoommax = int(npixel/64.)
if zoommax <= 1:
tkinter.messagebox.showinfo(
"Error",
"Zoom is disabled for minimum image size < 128 pixels.")
return
if self.xposition is None:
x1 = sh1[1]/2.
y1 = sh1[0]/2.
else:
x1 = self.xposition
y1 = self.yposition
zoom = tkinter.simpledialog.askinteger(
'Input',
'Set the integer zoom value (1 to %d)' % (zoommax))
if zoom is None:
return
else:
xmin, ymin = self.zoom_corner(sh1, zoom, x1, y1)
self.zoom[0] = zoom
self.zoom[1] = int(xmin)
self.zoom[2] = int(ymin)
self.displayImage()
def toggle_zscale(self):
"""
Toggle the zscale option in the image display
This routine is called in response to the "Image Range" radio button.
It turns the zscale display option on or off via the self.zscale_flag
boolean variable.
No values are passed to this routine or returned form the routine.
"""
ind = self.rangeType.get()
if ind == 1:
self.zscale_flag = True
else:
self.zscale_flag = False
self.displayImage()
def readNewImage(self):
"""
Read a FITS image from a file and display it.
Routine to read a FITS files and extract a two-dimensional image if
possible. The image is then displayed. This routine will only work
if the image display window exists.
No parameters are passed to this routine or returned from this routine.
"""
try:
filename = tkinter.filedialog.askopenfilename(
filetypes=[('FITS', '*.fits')])
if filename is not None:
self.imagefilename = filename
self.image = self.get_image()
if self.image is None:
self.imagefilename = None
return
sh1 = self.image.shape
self.xposition = sh1[1] // 2
self.yposition = sh1[0] // 2
print('centre position: ', self.xposition, self.yposition)
self.displayImage()
self.canvas1.draw()
except Exception:
pass
def get_limits(self, values, nsamples=1000, contrast=0.25, max_reject=0.5,
min_npixels=5, krej=2.5, max_iterations=5):
"""
Find the IRAF-like "zscale" signal limits for an image.
This routine is copied from astropy.visualization.
Aside from a change to the passing of the arguments the code has
not been changed. The original code is part of ZScaleInterval.
It is a recoding of the IRAF zscale algorithm in python.
All parameters except the input image array are optional.
Parameters
----------
values : a two-dimensional numpy array for which the zscale limit
values are to be calculated. Can be float or integer values.
nsamples : the number of pixels to use to estimate the median and the
range (integer).
contrast : The constrast parameter from IRAF imexam which controls the
range of values considered to estimate the minimum and
maximum values to use in the display, a real value between
0.0 and 1.0.
max_reject : Parameter for the maximum fraction of rejected pixels,
a real values between 0.0 and 1.0; if more than this
fraction of pixels are rejected then the full range
of the data values is returned.
min_npixels : An integer value for the minimum number of pixels that
are rejected by the iterative algorithm; if less than
this number of pixels is rejected the full data range is
returned.
krej : A float value, The number of standard deviations used for
rejection. It must be positive.
max_iterations : An integer value giving the maximum number of
rejection iterations to use.
Returns
-------
vmin : the minimum value for the zscale range, a real number
vmax : the maximum value for the zscale range, a real number
"""
# Sample the image
values = numpy.asarray(values)
values = values[numpy.isfinite(values)]
stride = int(max(1.0, values.size / nsamples))
samples = values[::stride][:nsamples]
samples.sort()
npix = len(samples)
vmin = samples[0]
vmax = samples[-1]
# Fit a line to the sorted array of samples
minpix = max(min_npixels, int(npix * max_reject))
xvalues = numpy.arange(npix)
ngoodpix = npix
last_ngoodpix = npix + 1
# Bad pixels mask used in k-sigma clipping
badpix = numpy.zeros(npix, dtype=bool)
# Kernel used to dilate the bad pixels mask
ngrow = max(1, int(npix * 0.01))
kernel = numpy.ones(ngrow, dtype=bool)
for niter in range(max_iterations):
if ngoodpix >= last_ngoodpix or ngoodpix < minpix:
break
fit = numpy.polyfit(xvalues, samples, deg=1,
w=(~badpix).astype(int))
fitted = numpy.poly1d(fit)(xvalues)
# Subtract fitted line from the data array
flat = samples - fitted
# Compute the k-sigma rejection threshold
threshold = krej * flat[~badpix].std()
# Detect and reject pixels further than k*sigma from the
# fitted line
badpix[(flat < - threshold) | (flat > threshold)] = True
# Convolve with a kernel of length ngrow
badpix = numpy.convolve(badpix, kernel, mode='same')
last_ngoodpix = ngoodpix
ngoodpix = numpy.sum(~badpix)
slope, intercept = fit
if ngoodpix >= minpix:
if contrast > 0:
slope = slope / contrast
center_pixel = (npix - 1) // 2
median = numpy.median(samples)
vmin = max(vmin, median - (center_pixel - 1) * slope)
vmax = min(vmax, median + (npix - center_pixel) * slope)
return vmin, vmax
def get_image(self):
"""
Read a FITS image from the 0th or 1st extension.
This routine tries to read a FITS file and returns the image, or None
if there is an issue:
Parameters
----------
None
Returns
-------
image : a numpy two-dimensional array of image values, or None
if there is an issue.
"""
try:
image = fits.getdata(self.imagefilename)
except IndexError:
image = fits.getdata(self.imagefilename, ext=1)
sh1 = image.shape
if len(sh1) < 2:
print('Bad image dimensions in file %s.' %
(self.imagefilename))
return None
if len(sh1) == 3:
image = numpy.squeeze(image[0, :, :])
if len(sh1) == 4:
image = numpy.squeeze(image[0, 0, :, :])
if len(sh1) == 5:
image = numpy.squeeze(image[0, 0, 0, :, :])
if len(sh1) == 6:
image = numpy.squeeze(image[0, 0, 0, 0, :, :])
zmin = numpy.min(image)
zmax = numpy.max(image)
general_utilities.put_value(zmin, self.minField)
general_utilities.put_value(zmax, self.maxField)
return image
def imageHistogram(self):
"""
Plot an IRAF-like image histogram for the current image.
This routine plots a histogram of the image pixel values in
a new window. No values are passed to this routine or returned from
this routine.
"""
if self.image is None:
return
BGCOL = '#F8F8FF'
try:
histogramwindow = Tk.Toplevel()
histogramwindow.config(bg=BGCOL)
if self.zscale_flag:
xmin = float(self.zsminField.get())
xmax = float(self.zsmaxField.get())
else:
xmin = float(self.minField.get())
xmax = float(self.maxField.get())
yscale_option = self.yscaleType.get()
try:
value = float(self.bin_field.get())
if value == 0:
nbins = 100
if value < 0.:
xstep = abs(value)
xmin = xmin - xstep
xmax = xmax + 2.0*xstep
nbins = int((xmax - xmin)/xstep)
xmax = xmin + nbins*xstep
else:
nbins = int(value)
nbins = max(nbins, 10)
except ValueError:
nbins = 100
xstep = (xmax - xmin)/nbins
xmin = xmin - xstep
xmax = xmax + 2.0*xstep
nbins = int((xmax - xmin)/xstep)
xmax = xmin + nbins*xstep
self.imageHistogramLabelText = Tk.StringVar()
self.imageHistogramLabel = Tk.Label(
histogramwindow, textvariable=self.imageHistogramLabelText,
anchor=Tk.N, width=70)
self.imageHistogramLabel.pack()
self.imageHistogramLabelText.set("Value:")
self.p3 = Figure(figsize=(6, 6), dpi=100)
sp1 = self.p3.add_subplot(1, 1, 1)
c1 = FigureCanvasTkAgg(self.p3, master=histogramwindow)
c1.mpl_connect("motion_notify_event", self.imageHistogramPosition)
histogramy, hxedges = numpy.histogram(
self.image.flatten(), nbins, range=[xmin, xmax])
histogramx = (hxedges[1:]+hxedges[0:-1])/2.
if yscale_option == 1:
newyvalues = general_utilities.hybrid_transform(histogramy)
sp1.plot(histogramx, newyvalues, color='blue')
else:
sp1.plot(histogramx, histogramy, color='blue')
sp1.set_xlabel('Signal')
sp1.set_ylabel('Number of points per bin')
if yscale_option == 1:
tickmarks, ticklabels = general_utilities.hybrid_labels(
newyvalues)
sp1.set_yticks(tickmarks)
sp1.set_yticklabels(ticklabels)
label = 'Bin size: %.5g\nNumber of Bins: %d' % (xstep, nbins)
xpos = xmin + 0.01*(xmax - xmin)
ymin, ymax = sp1.get_ybound()
ypos = ymax + (ymax - ymin)*0.02
if self.imagefilename is None:
outstring = None
else:
outstring = '# Histogram from file ' + self.imagefilename
sp1.text(xpos, ypos, label)
c1.draw()
c1.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=Tk.YES)
h1 = Tk.Frame(histogramwindow)
h1.pack(side=Tk.TOP)
h1.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save values",
command=lambda: general_utilities.save_data_set_values(
histogramx, histogramy, outstring))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PS",
command=lambda: general_utilities.save_ps_figure(self.p3))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PNG",
command=lambda: general_utilities.save_png_figure(self.p3))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(h1, text="Close",
command=histogramwindow.destroy)
button.pack()
button.config(bg=BGCOL)
except Exception:
pass
def imageHistogramPosition(self, event):
"""
Post mouse position on image to the status line.
When a normal histogram plot exists, this routine takes the mouse
position events and updates the position values at the top of the
window.
Parameters
----------
event a standard Tkinter event variable.
Returns
-------
No values are returned by this routine.
"""
try:
xpos = float(event.xdata)
ypos = float(event.ydata)
if self.yscaleType.get() == 1:
ypos = general_utilities.inverse_hybrid_transform(ypos)
s1 = 'Value: [%g, %g]' % (xpos, ypos)
self.imageHistogramLabelText.set(s1)
except Exception:
pass
def put_value(self, value, field):
"""
Place a value in a widgit text field.
Any current contents of the field are deleted.
Parameters
----------
value : the string value to be placed in the text field
field : the tkinter text field variable where the string is to
be put
No values are returned from this routine.
"""
try:
s1 = field.get()
field.delete(0, last=len(s1))
field.insert(0, str(value))
except Exception:
pass
def toggleAxes(self):
"""
Toggle the axis display variable.
Each call to this routine toggles the logical variable determining
whether the axes are plotted with the image. No values are passed
to this routine or returned from it.
"""
self.showImageAxes = not self.showImageAxes
self.displayImage()
def imageAutoscale(self):
"""
Autoscale the image display.
This routine resets the minimum and maximum image display values to
the full range of the current image.
No values are passed to this routine or returned from this routine.
"""
zmin = numpy.min(self.image)
zmax = numpy.max(self.image)
general_utilities.put_value(zmin, self.minField)
general_utilities.put_value(zmax, self.maxField)
zmin1, zmax1 = self.get_limits(self.image)
general_utilities.put_value(zmin1, self.zsminField)
general_utilities.put_value(zmax1, self.zsmaxField)
self.displayImage()
def imageExit(self, window):
"""
Close a Tkinter window.
This routine closes the window for the image display (or
whichever top level window variable is passed into the routine).
Parameters
----------
window : A tkinter Toplevel variable (or equivalent), the window
to be closed.
No values are returned by this routine.
"""
window.destroy()
def keyPress(self, event):
"""
Routine for applying imaging key press events.
Currently the routine sets the image center at the event position.
This does nothing if the zoom is not applied.
"""
if (event.xdata is None) or (event.ydata is None):
return
xpixel = int(self.zoom[1]+event.xdata+0.5)
ypixel = int(self.zoom[2]+event.ydata+0.5)
if (xpixel is None) or (ypixel is None):
return
imshape = self.image.shape
if event.key == 'l':
yvalues = numpy.squeeze(self.image[ypixel, :])
xvalues = numpy.arange(imshape[0])+1
self.plotxy(xvalues, yvalues, symbol='-', colour='blue',
xlabel='Column (Pixels)', ylabel='Pixel Value',
title='Line %d' % (ypixel))
if event.key == 'c':
yvalues = numpy.squeeze(self.image[:, xpixel])
xvalues = numpy.arange(imshape[1])+1
self.plotxy(xvalues, yvalues, symbol='-', colour='blue',
xlabel='Line (Pixels)', ylabel='Pixel Value',
title='Column %d' % (xpixel))
if event.key == 'j':
x0 = xpixel-10
x0 = max(x0, 0)
x1 = x0 + 22
if x1 > imshape[1]:
x1 = imshape[1]
x0 = x1 - 22
y0 = ypixel-2
y0 = max(y0, 0)
y1 = y0 + 5
if y1 > imshape[0]:
y1 = imshape[0]
y0 = y1 - 5
subim = numpy.copy(self.image[y0:y1, x0:x1])
vector = numpy.mean(subim, axis=0)
xvalues = numpy.arange(len(vector))+x0
ind = | numpy.argmax(vector) | numpy.argmax |
import nengo_spa as spa
import nengo
import nengolib
import scipy.linalg
import numpy as np
import matplotlib.pyplot as plt
from nengo_spa.semantic_pointer import SemanticPointer
from nengo.utils.compat import is_array, is_array_like, is_number
from nengo_spa.algebras.hrr_algebra import HrrAlgebra
from nengo_spa.ast.base import Fixed, infer_types, TypeCheckedBinaryOp
from nengo_spa.types import TAnyVocab, TScalar, TVocabulary
from nengo_spa.algebras.hrr_algebra import HrrAlgebra
from nengo.params import (
NdarrayParam,
FrozenObject,
)
from nengo.dists import Distribution, UniformHypersphere
from nengo.exceptions import ValidationError
# The SemanticPointer class, copied from nengo-spa, with fractional binding via ``**`` added
class SemanticPointer(Fixed):
"""A Semantic Pointer, based on Holographic Reduced Representations.
Operators are overloaded so that ``+`` and ``-`` are addition,
``*`` is circular convolution, ``**`` is fractional circular convolution,
and ``~`` is the inversion operator.
Parameters
----------
data : array_like
The vector constituting the Semantic Pointer.
vocab : Vocabulary, optional
Vocabulary that the Semantic Pointer is considered to be part of.
Mutually exclusive with the *algebra* argument.
algebra : AbstractAlgebra, optional
Algebra used to perform vector symbolic operations on the Semantic
Pointer. Defaults to `.CircularConvolutionAlgebra`. Mutually exclusive
with the *vocab* argument.
name : str, optional
A name for the Semantic Pointer.
Attributes
----------
v : array_like
The vector constituting the Semantic Pointer.
algebra : AbstractAlgebra
Algebra that defines the vector symbolic operations on this Semantic
Pointer.
vocab : Vocabulary or None
The vocabulary the this Semantic Pointer is considered to be part of.
name : str or None
Name of the Semantic Pointer.
"""
def __init__(self, data, vocab=None, algebra=None, name=None):
super(SemanticPointer, self).__init__(
TAnyVocab if vocab is None else TVocabulary(vocab))
self.algebra = self._get_algebra(vocab, algebra)
self.v = np.array(data, dtype=complex)
if len(self.v.shape) != 1:
raise ValidationError("'data' must be a vector", 'data', self)
self.v.setflags(write=False)
self.vocab = vocab
self.name = name
def _get_algebra(cls, vocab, algebra):
if algebra is None:
if vocab is None:
algebra = HrrAlgebra()
else:
algebra = vocab.algebra
elif vocab is not None and vocab.algebra is not algebra:
raise ValueError(
"vocab and algebra argument are mutually exclusive")
return algebra
def _get_unary_name(self, op):
return "{}({})".format(op, self.name) if self.name else None
def _get_method_name(self, method):
return "({}).{}()".format(self.name, method) if self.name else None
def _get_binary_name(self, other, op, swap=False):
if isinstance(other, SemanticPointer):
other_name = other.name
else:
other_name = str(other)
self_name = self.name
if self_name and other_name:
if swap:
self_name, other_name = other_name, self.name
return "({}){}({})".format(self_name, op, other_name)
else:
return None
def evaluate(self):
return self
def connect_to(self, sink, **kwargs):
return nengo.Connection(self.construct(), sink, **kwargs)
def construct(self):
return nengo.Node(self.v, label=str(self).format(len(self)))
def normalized(self):
"""Normalize the Semantic Pointer and return it as a new object.
If the vector length is zero, the Semantic Pointer will be returned
unchanged.
The original object is not modified.
"""
nrm = np.linalg.norm(self.v)
if nrm <= 0.:
nrm = 1.
return SemanticPointer(
self.v / nrm, vocab=self.vocab, algebra=self.algebra,
name=self._get_method_name("normalized"))
def unitary(self):
"""Make the Semantic Pointer unitary and return it as a new object.
The original object is not modified.
A unitary Semantic Pointer has the property that it does not change
the length of Semantic Pointers it is bound with using circular
convolution.
"""
return SemanticPointer(
self.algebra.make_unitary(self.v), vocab=self.vocab,
algebra=self.algebra, name=self._get_method_name("unitary"))
def copy(self):
"""Return another semantic pointer with the same data."""
return SemanticPointer(
data=self.v, vocab=self.vocab, algebra=self.algebra,
name=self.name)
def length(self):
"""Return the L2 norm of the vector."""
return np.linalg.norm(self.v)
def __len__(self):
"""Return the number of dimensions in the vector."""
return len(self.v)
def __str__(self):
if self.name:
return "SemanticPointer<{}>".format(self.name)
else:
return repr(self)
def __repr__(self):
return (
"SemanticPointer({!r}, vocab={!r}, algebra={!r}, name={!r}".format(
self.v, self.vocab, self.algebra, self.name))
@TypeCheckedBinaryOp(Fixed)
def __add__(self, other):
return self._add(other, swap=False)
@TypeCheckedBinaryOp(Fixed)
def __radd__(self, other):
return self._add(other, swap=True)
def _add(self, other, swap=False):
type_ = infer_types(self, other)
vocab = None if type_ == TAnyVocab else type_.vocab
if vocab is None:
self._ensure_algebra_match(other)
other_pointer = other.evaluate()
a, b = self.v, other_pointer.v
if swap:
a, b = b, a
return SemanticPointer(
data=self.algebra.superpose(a, b), vocab=vocab,
algebra=self.algebra,
name=self._get_binary_name(other_pointer, "+", swap))
def __neg__(self):
return SemanticPointer(
data=-self.v, vocab=self.vocab, algebra=self.algebra,
name=self._get_unary_name("-"))
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return (-self) + other
def __mul__(self, other):
"""Multiplication of two SemanticPointers is circular convolution.
If multiplied by a scalar, we do normal multiplication.
"""
return self._mul(other, swap=False)
def __rmul__(self, other):
"""Multiplication of two SemanticPointers is circular convolution.
If multiplied by a scalar, we do normal multiplication.
"""
return self._mul(other, swap=True)
def _mul(self, other, swap=False):
if is_array(other):
raise TypeError(
"Multiplication of Semantic Pointers with arrays in not "
"allowed.")
elif is_number(other):
return SemanticPointer(
data=self.v * other, vocab=self.vocab, algebra=self.algebra,
name=self._get_binary_name(other, "*", swap))
elif isinstance(other, Fixed):
if other.type == TScalar:
return SemanticPointer(
data=self.v * other.evaluate(), vocab=self.vocab,
algebra=self.algebra,
name=self._get_binary_name(other, "*", swap))
else:
return self._bind(other, swap=swap)
else:
return NotImplemented
def __invert__(self):
"""Return a reorganized vector that acts as an inverse for convolution.
This reorganization turns circular convolution into circular
correlation, meaning that ``A*B*~B`` is approximately ``A``.
For the vector ``[1, 2, 3, 4, 5]``, the inverse is ``[1, 5, 4, 3, 2]``.
"""
return SemanticPointer(
data=self.algebra.invert(self.v), vocab=self.vocab,
algebra=self.algebra, name=self._get_unary_name("~"))
def bind(self, other):
"""Return the binding of two SemanticPointers."""
return self._bind(other, swap=False)
def rbind(self, other):
"""Return the binding of two SemanticPointers."""
return self._bind(other, swap=True)
def _bind(self, other, swap=False):
type_ = infer_types(self, other)
vocab = None if type_ == TAnyVocab else type_.vocab
if vocab is None:
self._ensure_algebra_match(other)
other_pointer = other.evaluate()
a, b = self.v, other_pointer.v
if swap:
a, b = b, a
return SemanticPointer(
data=self.algebra.bind(a, b), vocab=vocab, algebra=self.algebra,
name=self._get_binary_name(other_pointer, "*", swap))
def get_binding_matrix(self, swap_inputs=False):
"""Return the matrix that does a binding with this vector.
This should be such that
``A*B == dot(A.get_binding_matrix(), B.v)``.
"""
return self.algebra.get_binding_matrix(self.v, swap_inputs=swap_inputs)
def dot(self, other):
"""Return the dot product of the two vectors."""
if isinstance(other, Fixed):
infer_types(self, other)
other = other.evaluate().v
if is_array_like(other):
return np.vdot(self.v, other)
else:
return other.vdot(self)
def __matmul__(self, other):
return self.dot(other)
def compare(self, other):
"""Return the similarity between two SemanticPointers.
This is the normalized dot product, or (equivalently), the cosine of
the angle between the two vectors.
"""
if isinstance(other, SemanticPointer):
infer_types(self, other)
other = other.evaluate().v
scale = np.linalg.norm(self.v) * np.linalg.norm(other)
if scale == 0:
return 0
return np.dot(self.v, other) / scale
def reinterpret(self, vocab):
"""Reinterpret the Semantic Pointer as part of vocabulary *vocab*.
The *vocab* parameter can be set to *None* to clear the associated
vocabulary and allow the *source* to be interpreted as part of the
vocabulary of any Semantic Pointer it is combined with.
"""
return SemanticPointer(self.v, vocab=vocab, name=self.name)
def translate(self, vocab, populate=None, keys=None, solver=None):
"""Translate the Semantic Pointer to vocabulary *vocab*.
The translation of a Semantic Pointer uses some form of projection to
convert the Semantic Pointer to a Semantic Pointer of another
vocabulary. By default the outer products of terms in the source and
target vocabulary are used, but if *solver* is given, it is used to
find a least squares solution for this projection.
Parameters
----------
vocab : Vocabulary
Target vocabulary.
populate : bool, optional
Whether the target vocabulary should be populated with missing
keys. This is done by default, but with a warning. Set this
explicitly to *True* or *False* to silence the warning or raise an
error.
keys : list, optional
All keys to translate. If *None*, all keys in the source vocabulary
will be translated.
solver : nengo.Solver, optional
If given, the solver will be used to solve the least squares
problem to provide a better projection for the translation.
"""
tr = self.vocab.transform_to(vocab, populate, solver)
return SemanticPointer(
np.dot(tr, self.evaluate().v), vocab=vocab, name=self.name)
def distance(self, other):
"""Return a distance measure between the vectors.
This is ``1-cos(angle)``, so that it is 0 when they are identical, and
the distance gets larger as the vectors are farther apart.
"""
return 1 - self.compare(other)
def mse(self, other):
"""Return the mean-squared-error between two vectors."""
if isinstance(other, SemanticPointer):
infer_types(self, other)
other = other.evaluate().v
return np.sum((self.v - other)**2) / len(self.v)
def _ensure_algebra_match(self, other):
"""Check the algebra of the *other*.
If the *other* parameter is a `SemanticPointer` and uses a different
algebra, a `TypeError` will be raised.
"""
if isinstance(other, SemanticPointer):
if self.algebra is not other.algebra:
raise TypeError(
"Operation not supported for SemanticPointer with "
"different algebra.")
def __pow__(self, other):
"""Exponentiation of a SemanticPointer is fractional binding."""
if is_number(other):
return self.fractional_bind(other)
else:
return NotImplemented
def fractional_bind(self, other):
"""Return the fractional binding of a SemanticPointer."""
type_ = infer_types(self)
vocab = None if type_ == TAnyVocab else type_.vocab
a, b = self.v, other
return SemanticPointer(
data=self.algebra.fractional_bind(a, b), vocab=vocab, algebra=self.algebra,
name=self._get_binary_name(other, "**", False))
class Identity(SemanticPointer):
"""Identity element.
Parameters
----------
n_dimensions : int
Dimensionality of the identity vector.
vocab : Vocabulary, optional
Vocabulary that the Semantic Pointer is considered to be part of.
Mutually exclusive with the *algebra* argument.
algebra : AbstractAlgebra, optional
Algebra used to perform vector symbolic operations on the Semantic
Pointer. Defaults to `.CircularConvolutionAlgebra`. Mutually exclusive
with the *vocab* argument.
"""
def __init__(self, n_dimensions, vocab=None, algebra=None):
data = self._get_algebra(vocab, algebra).identity_element(n_dimensions)
super(Identity, self).__init__(
data, vocab=vocab, algebra=algebra, name="Identity")
class AbsorbingElement(SemanticPointer):
r"""Absorbing element.
If :math:`z` denotes the absorbing element, :math:`v \circledast z = c z`,
where :math:`v` is a Semantic Pointer and :math:`c` is a real-valued
scalar. Furthermore :math:`\|z\| = 1`.
Parameters
----------
n_dimensions : int
Dimensionality of the identity vector.
vocab : Vocabulary, optional
Vocabulary that the Semantic Pointer is considered to be part of.
Mutually exclusive with the *algebra* argument.
algebra : AbstractAlgebra, optional
Algebra used to perform vector symbolic operations on the Semantic
Pointer. Defaults to `.CircularConvolutionAlgebra`. Mutually exclusive
with the *vocab* argument.
"""
def __init__(self, n_dimensions, vocab=None, algebra=None):
data = self._get_algebra(vocab, algebra).absorbing_element(
n_dimensions)
super(AbsorbingElement, self).__init__(
data, vocab=vocab, algebra=algebra, name="AbsorbingElement")
class Zero(SemanticPointer):
"""Zero element.
Parameters
----------
n_dimensions : int
Dimensionality of the identity vector.
vocab : Vocabulary, optional
Vocabulary that the Semantic Pointer is considered to be part of.
Mutually exclusive with the *algebra* argument.
algebra : AbstractAlgebra, optional
Algebra used to perform vector symbolic operations on the Semantic
Pointer. Defaults to `.CircularConvolutionAlgebra`. Mutually exclusive
with the *vocab* argument.
"""
def __init__(self, n_dimensions, vocab=None, algebra=None):
data = self._get_algebra(vocab, algebra).zero_element(n_dimensions)
super(Zero, self).__init__(
data, vocab=vocab, algebra=algebra, name="Zero")
# HrrAlgebra with fractional binding added
class HrrAlgebra(HrrAlgebra):
def fractional_bind(self, A, b):
"""Fractional circular convolution."""
if not is_number(b):
raise ValueError("b must be a scalar.")
return np.fft.ifft(np.fft.fft(A, axis=0)**b, axis=0)#.real
def bind(self, a, b):
n = len(a)
if len(b) != n:
raise ValueError("Inputs must have same length.")
return np.fft.ifft(np.fft.fft(a) * np.fft.fft(b), n=n)
#return np.fft.irfft(np.fft.rfft(a) * np.fft.rfft(b), n=n)
def ssp(X, Y, x, y, alg=HrrAlgebra()):
# Return a ssp
if ((type(X) == SemanticPointer) & (type(Y) == SemanticPointer)):
return (X**x) * (Y**y)
else:
return (SemanticPointer(data=X,algebra=alg)**x) * (SemanticPointer(data=Y,algebra=alg)**y)
def ssp_vectorized(basis, positions):
# Given a matrix of basis vectors, d by n (d = dimension of semantic pointer basis vectors, n = number of basis
# vectors, and a matrix of positions, N by n (N = number of points)
# Return a matrix of N ssp vectors
# Assuming the circular convolution defn for fractional binding
positions = positions.reshape(-1,basis.shape[1])
S_list = np.zeros((basis.shape[0],positions.shape[0]))
for i in np.arange(positions.shape[0]):
S_list[:,i] = np.fft.ifft(np.prod(np.fft.fft(basis, axis=0)**positions[i,:], axis=1), axis=0)
return S_list
def similarity_values(basis, positions, position0 = None, S0 = None, S_list = None):
if position0 is None:
position0 = np.zeros(basis.shape[1])
if S0 is None:
S0 = ssp_vectorized(basis, position0)
if S_list is None:
S_list = ssp_vectorized(basis, positions)
sim_dots = S_list.T @ S0
return(sim_dots, S_list)
def similarity_plot(X, Y, xs, ys, x=0, y=0, S_list = None, S0 = None, check_mark= False, **kwargs):
# Heat plot of SSP similarity of x and y values of xs and ys
# Input:
# X, Y - SSP basis vectors
# x, y - A single point to compare SSPs over the space with
# xs, ys - The x, y points to make the space tiling
# titleStr - (optional) Title of plot
# S_list - (optional) A list of the SSPs at all xs, ys tiled points (useful for high dim X,Y so that these do not
# have to recomputed every time this function is called)
# S0 - (optional) The SSP representing the x, y point (useful if for some reason you want a similarity plot
# of tiled SSPs with a non-SSP vector or a SSP with a different basis)
# check_mark - (default True) Whether or not to put a black check mark at the x, y location
xx,yy = np.meshgrid(xs,ys)
basis = np.vstack([X.v, Y.v]).T
positions = np.vstack([xx.reshape(-1), yy.reshape(-1)]).T
position0 = np.array([x,y])
sim_dots, S_list = similarity_values(basis, positions, position0 = position0, S0 = S0, S_list = S_list)
plt.pcolormesh(xx, yy, sim_dots.reshape(xx.shape), **kwargs)
if check_mark:
plt.plot(x,y, 'k+')
return(sim_dots, S_list)
def add_item_pts(item_locations, items_markers, items_cols):
# Add items to plot at locations with marker symbols and colors given
for i in np.arange(item_locations.shape[0]):
plt.scatter(item_locations[i,0],item_locations[i,1],
marker=items_markers[i],s=60,c=items_cols[i],edgecolors='w')
def similarity_items_plot(M, Objs, X, Y, xs, ys, S_list = None, S0 = None, check_mark= False, **kwargs):
# Unbind each object from memory and add together the results - will be a sum of approximate SSPs
# representing the location of each object - and plot heat map
# Run add_item_pts after to get item positions marked
xx,yy = np.meshgrid(xs,ys)
basis = np.vstack([X.v, Y.v]).T
positions = np.vstack([xx.reshape(-1), yy.reshape(-1)]).T
position0 = np.array([0,0])
sim_dots, S_list = similarity_values(basis, positions, position0 = position0, S0 = M * ~Objs[0], S_list = S_list)
for i in np.arange(1,len(Objs)):
obj_dots, _ = similarity_values(basis, positions, position0 = position0, S0 = M * ~Objs[i], S_list = S_list)
sim_dots += obj_dots
plt.pcolormesh(xx, yy, sim_dots.reshape(xx.shape), cmap='viridis')
def ssp_plane_basis(K):
# Create the bases vectors X,Y as described in the paper with the wavevectors
# (k_i = (u_i,v_i)) given in a matrix K. To get hexganal patterns use 3 K vectors 120 degs apart
# To get mulit-scales/orientation, give many such sets of 3 K vectors
# K is _ by 2
d = K.shape[0]
FX = np.ones((d*2 + 1,), dtype="complex")
FX[0:d] = np.exp(1.j*K[:,0])
FX[-d:] = np.flip(np.conj(FX[0:d]))
FX = np.fft.ifftshift(FX)
FY = np.ones((d*2 + 1,), dtype="complex")
FY[0:d] = np.exp(1.j*K[:,1])
FY[-d:] = np.flip(np.conj(FY[0:d]))
FY = np.fft.ifftshift(FY)
X = SemanticPointer(data=np.fft.ifft(FX), algebra=HrrAlgebra())
Y = SemanticPointer(data=np.fft.ifft(FY), algebra=HrrAlgebra())
return X, Y
def ssp_hex_basis(n_rotates,n_scales,scale_min=0.8, scale_max=3):
# Create bases vectors X,Y consisting of mulitple sets of hexagonal bases
K_hex = np.array([[0,1], [np.sqrt(3)/2,-0.5], [-np.sqrt(3)/2,-0.5]])
scales = | np.linspace(scale_min,scale_max,n_scales) | numpy.linspace |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
import re
import copy
import warnings
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
from scipy.spatial.distance import cdist
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from astropy.coordinates import SkyCoord, Distance, EarthLocation
from astropy import units
import erfa
from . import _utils
__all__ = [
"POL_STR2NUM_DICT",
"POL_NUM2STR_DICT",
"CONJ_POL_DICT",
"JONES_STR2NUM_DICT",
"JONES_NUM2STR_DICT",
"LatLonAlt_from_XYZ",
"XYZ_from_LatLonAlt",
"rotECEF_from_ECEF",
"ECEF_from_rotECEF",
"ENU_from_ECEF",
"ECEF_from_ENU",
"phase_uvw",
"unphase_uvw",
"uvcalibrate",
"apply_uvflag",
"get_lst_for_time",
"polstr2num",
"polnum2str",
"jstr2num",
"jnum2str",
"parse_polstr",
"parse_jpolstr",
"conj_pol",
"reorder_conj_pols",
"baseline_to_antnums",
"antnums_to_baseline",
"baseline_index_flip",
"get_baseline_redundancies",
"get_antenna_redundancies",
"collapse",
"mean_collapse",
"absmean_collapse",
"quadmean_collapse",
"or_collapse",
"and_collapse",
]
# fmt: off
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4,
"I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names
"rr": -1, "ll": -2, "rl": -3, "lr": -4,
"xx": -5, "yy": -6, "xy": -7, "yx": -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV",
-1: "rr", -2: "ll", -3: "rl", -4: "lr",
-5: "xx", -6: "yy", -7: "xy", -8: "yx"}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy",
"ee": "ee", "nn": "nn", "en": "ne", "ne": "en",
"rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl",
"I": "I", "Q": "Q", "U": "U", "V": "V",
"pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"}
# maps jones matrix element strings to jones integers
# Add entries that don't start with "J" to allow shorthand versions
JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8,
"xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8,
"Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4,
"rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr",
-5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"}
# maps uvdata pols to input feed polarizations
POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"],
"xy": ["x", "y"], "yx": ["y", "x"],
"ee": ["e", "e"], "nn": ["n", "n"],
"en": ["e", "n"], "ne": ["n", "e"],
"rr": ["r", "r"], "ll": ["l", "l"],
"rl": ["r", "l"], "lr": ["l", "r"]}
# fmt: on
def _get_iterable(x):
"""Return iterable version of input."""
if isinstance(x, Iterable):
return x
else:
return (x,)
def _fits_gethduaxis(hdu, axis):
"""
Make axis arrays for fits files.
Parameters
----------
hdu : astropy.io.fits HDU object
The HDU to make an axis array for.
axis : int
The axis number of interest (1-based).
Returns
-------
ndarray of float
Array of values for the specified axis.
"""
ax = str(axis)
axis_num = hdu.header["NAXIS" + ax]
val = hdu.header["CRVAL" + ax]
delta = hdu.header["CDELT" + ax]
index = hdu.header["CRPIX" + ax] - 1
return delta * (np.arange(axis_num) - index) + val
def _fits_indexhdus(hdulist):
"""
Get a dict of table names and HDU numbers from a FITS HDU list.
Parameters
----------
hdulist : list of astropy.io.fits HDU objects
List of HDUs to get names for
Returns
-------
dict
dictionary with table names as keys and HDU number as values.
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header["EXTNAME"]] = i
except (KeyError):
continue
return tablenames
def _get_fits_extra_keywords(header, keywords_to_skip=None):
"""
Get any extra keywords and return as dict.
Parameters
----------
header : FITS header object
header object to get extra_keywords from.
keywords_to_skip : list of str
list of keywords to not include in extra keywords in addition to standard
FITS keywords.
Returns
-------
dict
dict of extra keywords.
"""
# List standard FITS header items that are still should not be included in
# extra_keywords
# These are the beginnings of FITS keywords to ignore, the actual keywords
# often include integers following these names (e.g. NAXIS1, CTYPE3)
std_fits_substrings = [
"HISTORY",
"SIMPLE",
"BITPIX",
"EXTEND",
"BLOCKED",
"GROUPS",
"PCOUNT",
"BSCALE",
"BZERO",
"NAXIS",
"PTYPE",
"PSCAL",
"PZERO",
"CTYPE",
"CRVAL",
"CRPIX",
"CDELT",
"CROTA",
"CUNIT",
]
if keywords_to_skip is not None:
std_fits_substrings.extend(keywords_to_skip)
extra_keywords = {}
# find all the other header items and keep them as extra_keywords
for key in header:
# check if key contains any of the standard FITS substrings
if np.any([sub in key for sub in std_fits_substrings]):
continue
if key == "COMMENT":
extra_keywords[key] = str(header.get(key))
elif key != "":
extra_keywords[key] = header.get(key)
return extra_keywords
def _check_history_version(history, version_string):
"""Check if version_string is present in history string."""
if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""):
return True
else:
return False
def _check_histories(history1, history2):
"""Check if two histories are the same."""
if history1.replace("\n", "").replace(" ", "") == history2.replace(
"\n", ""
).replace(" ", ""):
return True
else:
return False
def _combine_history_addition(history1, history2):
"""
Find extra history to add to have minimal repeats.
Parameters
----------
history1 : str
First history.
history2 : str
Second history
Returns
-------
str
Extra history to add to first history.
"""
# first check if they're the same to avoid more complicated processing.
if _check_histories(history1, history2):
return None
hist2_words = history2.split(" ")
add_hist = ""
test_hist1 = " " + history1 + " "
for i, word in enumerate(hist2_words):
if " " + word + " " not in test_hist1:
add_hist += " " + word
keep_going = i + 1 < len(hist2_words)
while keep_going:
if (hist2_words[i + 1] == " ") or (
" " + hist2_words[i + 1] + " " not in test_hist1
):
add_hist += " " + hist2_words[i + 1]
del hist2_words[i + 1]
keep_going = i + 1 < len(hist2_words)
else:
keep_going = False
if add_hist == "":
add_hist = None
return add_hist
def baseline_to_antnums(baseline, Nants_telescope):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of ints
baseline number
Nants_telescope : int
number of antennas
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
if Nants_telescope > 2048:
raise Exception(
"error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope)
)
return_array = isinstance(baseline, (np.ndarray, list, tuple))
ant1, ant2 = _utils.baseline_to_antnums(
np.ascontiguousarray(baseline, dtype=np.int64)
)
if return_array:
return ant1, ant2
else:
return ant1.item(0), ant2.item(0)
def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
Nants_telescope : int
number of antennas
attempt256 : bool
Option to try to use the older 256 standard used in
many uvfits files (will use 2048 standard if there are more
than 256 antennas). Default is False.
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
if Nants_telescope is not None and Nants_telescope > 2048:
raise Exception(
"cannot convert ant1, ant2 to a baseline index "
"with Nants={Nants}>2048.".format(Nants=Nants_telescope)
)
return_array = isinstance(ant1, (np.ndarray, list, tuple))
baseline = _utils.antnums_to_baseline(
np.ascontiguousarray(ant1, dtype=np.int64),
np.ascontiguousarray(ant2, dtype=np.int64),
attempt256=attempt256,
)
if return_array:
return baseline
else:
return baseline.item(0)
def baseline_index_flip(baseline, Nants_telescope):
"""Change baseline number to reverse antenna order."""
ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)
return antnums_to_baseline(ant2, ant1, Nants_telescope)
def _x_orientation_rep_dict(x_orientation):
"""Create replacement dict based on x_orientation."""
if x_orientation.lower() == "east" or x_orientation.lower() == "e":
return {"x": "e", "y": "n"}
elif x_orientation.lower() == "north" or x_orientation.lower() == "n":
return {"x": "n", "y": "e"}
else:
raise ValueError("x_orientation not recognized.")
def polstr2num(pol, x_orientation=None):
"""
Convert polarization str to number according to AIPS Memo 117.
Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes, but also supports 'I', 'Q', 'U', 'V'.
Parameters
----------
pol : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
Number corresponding to string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
poldict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(pol, str):
out = poldict[pol.lower()]
elif isinstance(pol, Iterable):
out = [poldict[key.lower()] for key in pol]
else:
raise ValueError(
"Polarization {p} cannot be converted to a polarization number.".format(
p=pol
)
)
return out
def polnum2str(num, x_orientation=None):
"""
Convert polarization number to str according to AIPS Memo 117.
Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes
Parameters
----------
num : int
polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
String corresponding to polarization number
Raises
------
ValueError
If the polarization number cannot be converted to a polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(num, (int, np.int32, np.int64)):
out = dict_use[num]
elif isinstance(num, Iterable):
out = [dict_use[i] for i in num]
else:
raise ValueError(
"Polarization {p} cannot be converted to string.".format(p=num)
)
return out
def jstr2num(jstr, x_orientation=None):
"""
Convert jones polarization str to number according to calfits memo.
Parameters
----------
jstr : str
antenna (jones) polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
antenna (jones) polarization number corresponding to string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
jdict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(jstr, str):
out = jdict[jstr.lower()]
elif isinstance(jstr, Iterable):
out = [jdict[key.lower()] for key in jstr]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to index.".format(j=jstr)
)
return out
def jnum2str(jnum, x_orientation=None):
"""
Convert jones polarization number to str according to calfits memo.
Parameters
----------
num : int
antenna (jones) polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
antenna (jones) polarization string corresponding to number
Raises
------
ValueError
If the jones polarization number cannot be converted to a jones
polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(jnum, (int, np.int32, np.int64)):
out = dict_use[jnum]
elif isinstance(jnum, Iterable):
out = [dict_use[i] for i in jnum]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to string.".format(j=jnum)
)
return out
def parse_polstr(polstr, x_orientation=None):
"""
Parse a polarization string and return pyuvdata standard polarization string.
See utils.POL_STR2NUM_DICT for options.
Parameters
----------
polstr : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
str
AIPS Memo 117 standard string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return polnum2str(
polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def parse_jpolstr(jpolstr, x_orientation=None):
"""
Parse a Jones polarization string and return pyuvdata standard jones string.
See utils.JONES_STR2NUM_DICT for options.
Parameters
----------
jpolstr : str
Jones polarization string
Returns
-------
str
calfits memo standard string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return jnum2str(
jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def conj_pol(pol):
"""
Return the polarization for the conjugate baseline.
For example, (1, 2, 'xy') = conj(2, 1, 'yx').
The returned polarization is determined by assuming the antenna pair is
reversed in the data, and finding the correct polarization correlation
which will yield the requested baseline when conjugated. Note this means
changing the polarization for linear cross-pols, but keeping auto-pol
(e.g. xx) and Stokes the same.
Parameters
----------
pol : str or int
Polarization string or integer.
Returns
-------
cpol : str or int
Polarization as if antennas are swapped (type matches input)
"""
cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}
if isinstance(pol, str):
cpol = cpol_dict[pol.lower()]
elif isinstance(pol, Iterable):
cpol = [conj_pol(p) for p in pol]
elif isinstance(pol, (int, np.int32, np.int64)):
cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])
else:
raise ValueError("Polarization not recognized, cannot be conjugated.")
return cpol
def reorder_conj_pols(pols):
"""
Reorder multiple pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after
conjugating the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Parameters
----------
pols : array_like of str or int
Polarization array (strings or ints).
Returns
-------
conj_order : ndarray of int
Indices to reorder polarization array.
"""
if not isinstance(pols, Iterable):
raise ValueError("reorder_conj_pols must be given an array of polarizations.")
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError(
"Not all conjugate pols exist in the polarization array provided."
)
return conj_order
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = xyz.ndim == 1
if squeeze:
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# checking for acceptable values
if check_acceptability:
norms = np.linalg.norm(xyz, axis=0)
if not all(np.logical_and(norms >= 6.35e6, norms <= 6.39e6)):
raise ValueError("xyz values should be ECEF x, y, z coordinates in meters")
# this helper function returns one 2D array because it is less overhead for cython
lla = _utils._lla_from_xyz(xyz)
if squeeze:
return lla[0, 0], lla[1, 0], lla[2, 0]
return lla[0], lla[1], lla[2]
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.ascontiguousarray(latitude, dtype=np.float64)
longitude = np.ascontiguousarray(longitude, dtype=np.float64)
altitude = np.ascontiguousarray(altitude, dtype=np.float64)
n_pts = latitude.size
if longitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
if altitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
xyz = _utils._xyz_from_latlonalt(latitude, longitude, altitude)
xyz = xyz.T
if n_pts == 1:
return xyz[0]
return xyz
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = False
if xyz.ndim == 1:
squeeze = True
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz, axis=0)
sensible_radius_range = (6.35e6, 6.39e6)
if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(
ecef_magnitudes >= sensible_radius_range[1]
):
raise ValueError(
"ECEF vector magnitudes must be on the order of the radius of the earth"
)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
enu = _utils._ENU_from_ECEF(
xyz,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
enu = enu.T
if squeeze:
enu = np.squeeze(enu)
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.asarray(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
raise ValueError("The expected shape of the ENU array is (Npts, 3).")
squeeze = False
if enu.ndim == 1:
squeeze = True
enu = enu[np.newaxis, :]
enu = np.ascontiguousarray(enu.T, dtype=np.float64)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
xyz = _utils._ECEF_from_ENU(
enu,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
xyz = xyz.T
if squeeze:
xyz = np.squeeze(xyz)
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
return _utils._phase_uvw(
np.float64(ra),
np.float64(dec),
np.ascontiguousarray(initial_uvw.T, dtype=np.float64),
).T
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64),
).T
def polar2_to_cart3(lon_array, lat_array):
"""
Convert 2D polar coordinates into 3D cartesian coordinates.
This is a simple routine for converting a set of spherical angular coordinates
into a 3D cartesian vectors, where the x-direction is set by the position (0, 0).
Parameters
----------
lon_array : float or ndarray
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians. Can either be a float or ndarray -- if the latter, must have
the same shape as lat_array.
lat_array : float or ndarray
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians. Can either be a float or ndarray -- if the latter, must have the same
shape as lat_array.
Returns
-------
xyz_array : ndarray of float
Cartesian coordinates of the given longitude and latitude on a unit sphere.
Shape is (3, coord_shape), where coord_shape is the shape of lon_array and
lat_array if they were provided as type ndarray, otherwise (3,).
"""
# Check to make sure that we are not playing with mixed types
if type(lon_array) is not type(lat_array):
raise ValueError(
"lon_array and lat_array must either both be floats or ndarrays."
)
if isinstance(lon_array, np.ndarray):
if lon_array.shape != lat_array.shape:
raise ValueError("lon_array and lat_array must have the same shape.")
# Once we know that lon_array and lat_array are of the same shape,
# time to create our 3D set of vectors!
xyz_array = np.array(
[
np.cos(lon_array) * np.cos(lat_array),
np.sin(lon_array) * np.cos(lat_array),
np.sin(lat_array),
],
dtype=float,
)
return xyz_array
def cart3_to_polar2(xyz_array):
"""
Convert 3D cartesian coordinates into 2D polar coordinates.
This is a simple routine for converting a set of 3D cartesian vectors into
spherical coordinates, where the position (0, 0) lies along the x-direction.
Parameters
----------
xyz_array : ndarray of float
Cartesian coordinates, need not be of unit vector length. Shape is
(3, coord_shape).
Returns
-------
lon_array : ndarray of float
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians, shape is (coord_shape,).
lat_array : ndarray of float
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians, shape is (coord_shape,).
"""
if not isinstance(xyz_array, np.ndarray):
raise ValueError("xyz_array must be an ndarray.")
if xyz_array.ndim == 0:
raise ValueError("xyz_array must have ndim > 0")
if xyz_array.shape[0] != 3:
raise ValueError("xyz_array must be length 3 across the zeroth axis.")
# The longitude coord is relatively easy to calculate, just take the X and Y
# components and find the arctac of the pair.
lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float)
# If we _knew_ that xyz_array was always of length 1, then this call could be a much
# simpler one to arcsin. But to make this generic, we'll use the length of the XY
# component along with arctan2.
lat_array = np.arctan2(
xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float
)
# Return the two arrays
return lon_array, lat_array
def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot):
"""
Apply a rotation matrix to a series of vectors.
This is a simple convenience function which wraps numpy's matmul function for use
with various vector rotation functions in this module. This code could, in
principle, be replaced by a cythonized piece of code, although the matmul function
is _pretty_ well optimized already. This function is not meant to be called by
users, but is instead used by multiple higher-level utility functions (namely those
that perform rotations).
Parameters
----------
xyz_array : ndarray of floats
Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec)
or (1, 3, n_vec), the latter is useful for when performing multiple rotations
on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec),
or (3,).
rot_matrix : ndarray of floats
Series of rotation matricies to be applied to the stack of vectors. Must be
of shape (n_rot, 3, 3)
n_rot : int
Number of individual rotation matricies to be applied.
Returns
-------
rotated_xyz : ndarray of floats
Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,).
"""
# Do a quick check to make sure that things look sensible
if rot_matrix.shape != (n_rot, 3, 3):
raise ValueError(
"rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot
)
if (xyz_array.ndim == 3) and (
(xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3)
):
raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).")
if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3):
raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).")
rotated_xyz = np.matmul(rot_matrix, xyz_array)
return rotated_xyz
def _rotate_one_axis(xyz_array, rot_amount, rot_axis):
"""
Rotate an array of 3D positions around the a single axis (x, y, or z).
This function performs a basic rotation of 3D vectors about one of the priciple
axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount : float or ndarray of float
Amount (in radians) to rotate the given set of coordinates. Can either be a
single float (or ndarray of shape (1,)) if rotating all vectors by the same
amount, otherwise expected to be shape (Nrot,).
rot_axis : int
Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis,
and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# If rot_amount is None or all zeros, then this is just one big old no-op.
if (rot_amount is None) or np.all(rot_amount == 0.0):
if np.ndim(xyz_array) == 1:
return deepcopy(xyz_array[np.newaxis, :, np.newaxis])
elif np.ndim(xyz_array) == 2:
return deepcopy(xyz_array[np.newaxis, :, :])
else:
return deepcopy(xyz_array)
# Check and see how big of a rotation matrix we need
n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0])
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64)
# Figure out which pieces of the matrix we need to update
temp_jdx = (rot_axis + 1) % 3
temp_idx = (rot_axis + 2) % 3
# Fill in the rotation matricies accordingly
rot_matrix[rot_axis, rot_axis] = 1
rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx]
rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx]
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2):
"""
Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z).
This function performs a sequential pair of basic rotations of 3D vectors about
the priciple axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount1 : float or ndarray of float
Amount (in radians) of rotatation to apply during the first rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_amount2 : float or ndarray of float
Amount (in radians) of rotatation to apply during the second rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_axis1 : int
Axis around which the first rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
rot_axis2 : int
Axis around which the second rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# Capture some special cases upfront, where we can save ourselves a bit of work
no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0)
no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0)
if no_rot1 and no_rot2:
# If rot_amount is None, then this is just one big old no-op.
return deepcopy(xyz_array)
elif no_rot1:
# If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation
return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2)
elif no_rot2:
# If rot_amount2 is None, then ignore it and just work w/ the 1st rotation
return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1)
elif rot_axis1 == rot_axis2:
# Capture the case where someone wants to do a sequence of rotations on the same
# axis. Also known as just rotating a single axis.
return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1)
# Figure out how many individual rotation matricies we need, accounting for the
# fact that these can either be floats or ndarrays.
n_rot = max(
rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1,
rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1,
)
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64)
# There are two permulations per pair of axes -- when the pair is right-hand
# oriented vs left-hand oriented. Check here which one it is. For example,
# rotating first on the x-axis, second on the y-axis is considered a
# "right-handed" pair, whereas z-axis first, then y-axis would be considered
# a "left-handed" pair.
lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1
temp_idx = [
np.mod(rot_axis1 - lhd_order, 3),
np.mod(rot_axis1 + 1 - lhd_order, 3),
np.mod(rot_axis1 + 2 - lhd_order, 3),
]
# We're using lots of sin and cos calculations -- doing them once upfront saves
# quite a bit of time by eliminating redundant calculations
sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
# Take care of the diagonal terms first, since they aren't actually affected by the
# order of rotational opertations
rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi
rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo
rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi
# Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix
# for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just
# a transpose of the right-hand orientation of the same pair (e.g., y-rot, then
# x-rot).
rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi
rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = (
cos_lo * sin_hi * ((-1.0) ** lhd_order)
)
rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0
rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = (
sin_lo * cos_hi * ((-1.0) ** (lhd_order))
)
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def calc_uvw(
app_ra=None,
app_dec=None,
frame_pa=None,
lst_array=None,
use_ant_pos=True,
uvw_array=None,
antenna_positions=None,
antenna_numbers=None,
ant_1_array=None,
ant_2_array=None,
old_app_ra=None,
old_app_dec=None,
old_frame_pa=None,
telescope_lat=None,
telescope_lon=None,
from_enu=False,
to_enu=False,
):
"""
Calculate an array of baseline coordinates, in either uvw or ENU.
This routine is meant as a convenience function for producing baseline coordinates
based under a few different circumstances:
1) Calculating ENU coordinates using antenna positions
2) Calculating uwv coordinates at a given sky position using antenna positions
3) Converting from ENU coordinates to uvw coordinates
4) Converting from uvw coordinate to ENU coordinates
5) Converting from uvw coordinates at one sky position to another sky position
Different conversion pathways have different parameters that are required.
Parameters
----------
app_ra : ndarray of float
Apparent RA of the target phase center, required if calculating baseline
coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are
radians.
app_dec : ndarray of float
Apparent declination of the target phase center, required if calculating
baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,),
units are radians.
frame_pa : ndarray of float
Position angle between the great circle of declination in the apparent frame
versus that of the reference frame, used for making sure that "North" on
the derived maps points towards a particular celestial pole (not just the
topocentric one). Required if not deriving baseline coordinates from antenna
positions, from_enu=False, and a value for old_frame_pa is given. Shape is
(Nblts,), units are radians.
old_app_ra : ndarray of float
Apparent RA of the previous phase center, required if not deriving baseline
coordinates from antenna positions and from_enu=False. Shape is (Nblts,),
units are radians.
old_app_dec : ndarray of float
Apparent declination of the previous phase center, required if not deriving
baseline coordinates from antenna positions and from_enu=False. Shape is
(Nblts,), units are radians.
old_frame_pa : ndarray of float
Frame position angle of the previous phase center, required if not deriving
baseline coordinates from antenna positions, from_enu=False, and a value
for frame_pa is supplied. Shape is (Nblts,), units are radians.
lst_array : ndarray of float
Local apparent sidereal time, required if deriving baseline coordinates from
antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,).
use_ant_pos : bool
Switch to determine whether to derive uvw values from the antenna positions
(if set to True), or to use the previously calculated uvw coordinates to derive
new the new baseline vectors (if set to False). Default is True.
uvw_array : ndarray of float
Array of previous baseline coordinates (in either uvw or ENU), required if
not deriving new coordinates from antenna positions. Shape is (Nblts, 3).
antenna_positions : ndarray of float
List of antenna positions relative to array center in ECEF coordinates,
required if not providing `uvw_array`. Shape is (Nants, 3).
antenna_numbers: ndarray of int
List of antenna numbers, ordered in the same way as `antenna_positions` (e.g.,
`antenna_numbers[0]` should given the number of antenna that resides at ECEF
position given by `antenna_positions[0]`). Shape is (Nants,), requred if not
providing `uvw_array`. Contains all unique entires of the joint set of
`ant_1_array` and `ant_2_array`.
ant_1_array : ndarray of int
Antenna number of the first antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
ant_2_array : ndarray of int
Antenna number of the second antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
telescope_lat : float
Latitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
telescope_lon : float
Longitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
from_enu : boolean
Set to True if uvw_array is expressed in ENU coordinates. Default is False.
to_enu : boolean
Set to True if you would like the output expressed in EN coordinates. Default
is False.
Returns
-------
new_coords : ndarray of float64
Set of baseline coordinates, shape (Nblts, 3).
"""
if to_enu:
if lst_array is None and not use_ant_pos:
raise ValueError(
"Must include lst_array to calculate baselines in ENU coordinates!"
)
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat to calculate baselines "
"in ENU coordinates!"
)
else:
if ((app_ra is None) or (app_dec is None)) and frame_pa is None:
raise ValueError(
"Must include both app_ra and app_dec, or frame_pa to calculate "
"baselines in uvw coordinates!"
)
if use_ant_pos:
# Assume at this point we are dealing w/ antenna positions
if antenna_positions is None:
raise ValueError("Must include antenna_positions if use_ant_pos=True.")
if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None):
raise ValueError(
"Must include ant_1_array, ant_2_array, and antenna_numbers "
"setting use_ant_pos=True."
)
if lst_array is None and not to_enu:
raise ValueError(
"Must include lst_array if use_ant_pos=True and not calculating "
"baselines in ENU coordinates."
)
if telescope_lon is None:
raise ValueError("Must include telescope_lon if use_ant_pos=True.")
ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)}
ant_1_index = np.array([ant_dict[idx] for idx in ant_1_array], dtype=int)
ant_2_index = np.array([ant_dict[idx] for idx in ant_2_array], dtype=int)
N_ants = antenna_positions.shape[0]
# Use the app_ra, app_dec, and lst_array arrays to figure out how many unique
# rotations are actually needed. If the ratio of Nblts to number of unique
# entries is favorable, we can just rotate the antenna positions and save
# outselves a bit of work.
if to_enu:
# If to_enu, skip all this -- there's only one unique ha + dec combo
unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_)
unique_mask[0] = True
else:
unique_mask = np.append(
True,
(
((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:]))
| (app_dec[:-1] != app_dec[1:])
),
)
# GHA -> Hour Angle as measured at Greenwich (because antenna coords are
# centered such that x-plane intersects the meridian at longitude 0).
if to_enu:
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof the gha and dec based on telescope lon and lat
unique_gha = np.zeros(1) - telescope_lon
unique_dec = np.zeros(1) + telescope_lat
unique_pa = None
else:
unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon
unique_dec = app_dec[unique_mask]
unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask]
# Tranpose the ant vectors so that they are in the proper shape
ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :]
# Apply rotations, and then reorganize the ndarray so that you can access
# individual antenna vectors quickly.
ant_rot_vectors = np.reshape(
np.transpose(
_rotate_one_axis(
_rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1),
unique_pa,
0,
),
axes=[0, 2, 1],
),
(-1, 3),
)
unique_mask[0] = False
unique_map = np.cumsum(unique_mask) * N_ants
new_coords = (
ant_rot_vectors[unique_map + ant_2_index]
- ant_rot_vectors[unique_map + ant_1_index]
)
else:
if uvw_array is None:
raise ValueError("Must include uvw_array if use_ant_pos=False.")
if from_enu:
if to_enu:
# Well this was pointless... returning your uvws unharmed
return uvw_array
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof old_app_ra and old_app_dec based on lst_array and
# telescope_lat
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat if moving between "
'ENU (i.e., "unphased") and uvw coordinates!'
)
if lst_array is None:
raise ValueError(
'Must include lst_array if moving between ENU (i.e., "unphased") '
"and uvw coordinates!"
)
else:
if (old_frame_pa is None) and not (frame_pa is None or to_enu):
raise ValueError(
"Must include old_frame_pa values if data are phased and "
"applying new position angle values (frame_pa)."
)
if ((old_app_ra is None) and not (app_ra is None or to_enu)) or (
(old_app_dec is None) and not (app_dec is None or to_enu)
):
raise ValueError(
"Must include old_app_ra and old_app_dec values when data are "
"already phased and phasing to a new position."
)
# For this operation, all we need is the delta-ha coverage, which _should_ be
# entirely encapsulated by the change in RA.
if (app_ra is None) and (old_app_ra is None):
gha_delta_array = 0.0
else:
gha_delta_array = (lst_array if from_enu else old_app_ra) - (
lst_array if to_enu else app_ra
)
# Notice below there's an axis re-orientation here, to go from uvw -> XYZ,
# where X is pointing in the direction of the source. This is mostly here
# for convenience and code legibility -- a slightly different pair of
# rotations would give you the same result w/o needing to cycle the axes.
# Up front, we want to trap the corner-case where the sky position you are
# phasing up to hasn't changed, just the position angle (i.e., which way is
# up on the map). This is a much easier transform to handle.
if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec):
new_coords = _rotate_one_axis(
uvw_array[:, [2, 0, 1], np.newaxis],
frame_pa - (0.0 if old_frame_pa is None else old_frame_pa),
0,
)[:, :, 0]
else:
new_coords = _rotate_two_axis(
_rotate_two_axis( # Yo dawg, I heard you like rotation maticies...
uvw_array[:, [2, 0, 1], np.newaxis],
0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa),
(-telescope_lat) if from_enu else (-old_app_dec),
0,
1,
),
gha_delta_array,
telescope_lat if to_enu else app_dec,
2,
1,
)
# One final rotation applied here, to compensate for the fact that we want
# the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with
# the chosen frame, if we not in ENU coordinates
if not to_enu:
new_coords = _rotate_one_axis(new_coords, frame_pa, 0)
# Finally drop the now-vestigal last axis of the array
new_coords = new_coords[:, :, 0]
# There's one last task to do, which is to re-align the axes from projected
# XYZ -> uvw, where X (which points towards the source) falls on the w axis,
# and Y and Z fall on the u and v axes, respectively.
return new_coords[:, [1, 2, 0]]
def transform_sidereal_coords(
lon,
lat,
in_coord_frame,
out_coord_frame,
in_coord_epoch=None,
out_coord_epoch=None,
time_array=None,
):
"""
Transform a given set of coordinates from one sidereal coordinate frame to another.
Uses astropy to convert from a coordinates from sidereal frame into another.
This function will support transforms from several frames, including GCRS,
FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and
a few others (basically anything that doesn't require knowing the observers
location on Earth/other celestial body).
Parameters
----------
lon_coord : float or ndarray of floats
Logitudinal coordinate to be transformed, typically expressed as the right
ascension, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lat_coord.
lat_coord : float or ndarray of floats
Latitudinal coordinate to be transformed, typically expressed as the
declination, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lon_coord.
in_coord_frame : string
Reference frame for the provided coordinates. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
out_coord_frame : string
Reference frame to output coordinates in. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
in_coord_epoch : float
Epoch for the input coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
out_coord_epoch : float
Epoch for the output coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
time_array : float or ndarray of floats
Julian date(s) to which the coordinates correspond to, only used in frames
with annular motion terms (e.g., abberation in GCRS). Can either be a float,
or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord
and lon_coord are floats, or that Ntimes == Ncoords.
Returns
-------
new_lat : float or ndarray of floats
Longitudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
new_lon : float or ndarray of floats
Latidudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
"""
lon_coord = lon * units.rad
lat_coord = lat * units.rad
# Check here to make sure that lat_coord and lon_coord are the same length,
# either 1 or len(time_array)
if lat_coord.shape != lon_coord.shape:
raise ValueError("lon and lat must be the same shape.")
if lon_coord.ndim == 0:
lon_coord.shape += (1,)
lat_coord.shape += (1,)
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
in_epoch = None
if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
in_epoch = Time(in_coord_epoch)
elif in_coord_epoch is not None:
if in_coord_frame.lower() in ["fk4", "fk4noeterms"]:
in_epoch = Time(in_coord_epoch, format="byear")
else:
in_epoch = Time(in_coord_epoch, format="jyear")
# Now do the same for the outbound frame
out_epoch = None
if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
out_epoch = Time(out_coord_epoch)
elif out_coord_epoch is not None:
if out_coord_frame.lower() in ["fk4", "fk4noeterms"]:
out_epoch = Time(out_coord_epoch, format="byear")
else:
out_epoch = Time(out_coord_epoch, format="jyear")
# Make sure that time array matched up with what we expect. Thanks to astropy
# weirdness, time_array has to be the same length as lat/lon coords
rep_time = False
rep_crds = False
if time_array is None:
time_obj_array = None
else:
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if (time_obj_array.size != 1) and (lon_coord.size != 1):
if time_obj_array.shape != lon_coord.shape:
raise ValueError(
"Shape of time_array must be either that of "
" lat_coord/lon_coord if len(time_array) > 1."
)
else:
rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1)
rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1)
if rep_crds:
lon_coord = np.repeat(lon_coord, len(time_array))
lat_coord = np.repeat(lat_coord, len(time_array))
if rep_time:
time_obj_array = Time(
np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc",
)
coord_object = SkyCoord(
lon_coord,
lat_coord,
frame=in_coord_frame,
equinox=in_epoch,
obstime=time_obj_array,
)
# Easiest, most general way to transform to the new frame is to create a dummy
# SkyCoord with all the attributes needed -- note that we particularly need this
# in order to use a non-standard equinox/epoch
new_coord = coord_object.transform_to(
SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch)
)
return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad
def transform_icrs_to_app(
time_array,
ra,
dec,
telescope_loc,
epoch=2000.0,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
astrometry_library="erfa",
):
"""
Transform a set of coordinates in ICRS to topocentric/apparent coordinates.
This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate
the apparent (i.e., topocentric) coordinates of a source at a given time and
location, given a set of coordinates expressed in the ICRS frame. These coordinates
are most typically used for defining the phase center of the array (i.e, calculating
baseline vectors).
As of astropy v4.2, the agreement between the three libraries is consistent down to
the level of better than 1 mas, with the values produced by astropy and pyERFA
consistent to bettter than 10 µas (this is not surprising, given that astropy uses
pyERFA under the hood for astrometry). ERFA is the default as it outputs
coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as
well as the fact that of the three libraries, it produces results the fastest.
Parameters
----------
time_array : float or array-like of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an array-like of shape (Ntimes,).
ra : float or array-like of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
dec : float or array-like of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
epoch : int or float or str or Time object
Epoch of the coordinate data supplied, only used when supplying proper motion
values. If supplying a number, it will assumed to be in Julian years. Default
is J2000.0.
pm_ra : float or array-like of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Note that
units are in dRA/dt, not cos(Dec)*dRA/dt. Not required.
pm_dec : float or array-like of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required.
vrad : float or array-like of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or array-like of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
astrometry_library : str
Library used for running the coordinate conversions. Allowed options are
'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians, of shape (Ntimes,).
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians, of shape (Ntimes,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "novas", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa', 'novas', or 'astropy' for astrometry_library."
)
ra_coord = ra * units.rad
dec_coord = dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("ra and dec must be the same shape.")
pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr)
pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr)
d_coord = (
None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc)
)
v_coord = None if vrad is None else vrad * (units.km / units.s)
opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord]
opt_names = ["pm_ra", "pm_dec", "dist", "vrad"]
# Check the optional inputs, make sure that they're sensible
for item, name in zip(opt_list, opt_names):
if item is not None:
if ra_coord.shape != item.shape:
raise ValueError("%s must be the same shape as ra and dec." % name)
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Useful for both astropy and novas methods, the latter of which gives easy
# access to the IERS data that we want.
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
coord_epoch = None
if isinstance(epoch, str) or isinstance(epoch, Time):
# If its a string or a Time object, we don't need to do anything more
coord_epoch = Time(epoch)
elif epoch is not None:
coord_epoch = Time(epoch, format="jyear")
# Note if time_array is a single element
multi_time = time_obj_array.size != 1
# Get IERS data, which is needed for NOVAS and ERFA
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("arcsec")
pm_y_array = pm_y_array.to_value("arcsec")
delta_x_array = delta_x_array.to_value("marcsec")
delta_y_array = delta_y_array.to_value("marcsec")
# Catch the case where we don't have CIP delta values yet (they don't typically have
# predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
# If the source was instantiated w/ floats, it'll be a 0-dim object, which will
# throw errors if we try to treat it as an array. Reshape to a 1D array of len 1
# so that all the calls can be uniform
if ra_coord.ndim == 0:
ra_coord.shape += (1,)
dec_coord.shape += (1,)
if pm_ra_coord is not None:
pm_ra
if d_coord is not None:
d_coord.shape += (1,)
if v_coord is not None:
v_coord.shape += (1,)
# If there is an epoch and a proper motion, apply that motion now
if astrometry_library == "astropy":
# Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec
# directly, but we can cheat this by going to AltAz, and then coverting back
# to apparent RA/Dec using the telescope lat and LAST.
if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None):
# astropy is a bit weird in how it handles proper motion, so rather than
# fight with it to do it all in one step, we separate it into two: first
# apply proper motion to ICRS, then transform to topocentric.
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord),
pm_dec=pm_dec_coord,
frame="icrs",
)
sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch))
ra_coord = sky_coord.ra
dec_coord = sky_coord.dec
if d_coord is not None:
d_coord = d_coord.repeat(ra_coord.size)
if v_coord is not None:
v_coord = v_coord.repeat(ra_coord.size)
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
distance=d_coord,
radial_velocity=v_coord,
frame="icrs",
)
azel_data = sky_coord.transform_to(
SkyCoord(
| np.zeros_like(time_obj_array) | numpy.zeros_like |
import numpy as np
def color_libs(index=0):
clibs = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255)]
index = index%5
return clibs[index]
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
#scores = dets[:, 4] #bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#打分从大到小排列,取index
order = scores.argsort()[::-1]
#keep为最后保留的边框
keep = []
while order.size > 0:
#order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
#计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = | np.where(ovr <= thresh) | numpy.where |
"""Classes and functions for accessing and manipulating tabulated EOS data."""
### Module for accessing and manipulating tabulated EOS data
### STS 09/2019
###
##
import numpy as np
#
#
# calculate the structure for one planet
# make a class to hold the PREM data
class isentrope_class:
"""Class to isentrope data extracted from EOS table.""" # this is a documentation string for this class
def __init__(self): # self is the default name of the object for internal referencing of the variables in the class
"""A function to initialize the class object.""" # this is a documentation string for this function
self.ND = 0 # number of radius points
self.density = []
self.pressure = []
self.temperature = []
self.soundspeed = []
self.energy = []
self.partvel = []
self.region = [] # Tillotson region flag
# not going to use all the variables in the file
self.units = '' # I like to keep a text note in a structure about the units
#
class EOShugoniot:
"""Class for Hugoniot array from extEOStable."""
def __init__(self):
self.NH = 0
self.rho = np.zeros(self.NH)
self.T = np.zeros(self.NH)
self.P = np.zeros(self.NH)
self.U = np.zeros(self.NH)
self.S = np.zeros(self.NH)
self.up = np.zeros(self.NH)
self.us = np.zeros(self.NH)
self.cs = np.zeros(self.NH)
self.units = ''
#
class EOSvaporcurve:
"""Class for vapor curve from ANEOS."""
def __init__(self):
self.NT = 0
self.NV = 0
self.T = np.zeros(self.NT)
self.rl = np.zeros(self.NT)
self.rv = np.zeros(self.NT)
self.Pl = np.zeros(self.NT)
self.Pv = np.zeros(self.NT)
self.Ul = np.zeros(self.NT)
self.Uv = np.zeros(self.NT)
self.Sl = np.zeros(self.NT)
self.Sv = np.zeros(self.NT)
self.Gl = np.zeros(self.NT)
self.Gv = np.zeros(self.NT)
self.units = ''
#
class EOSmeltcurve:
"""Class for melt curve from ANEOS."""
def __init__(self):
self.NT = 0
self.NV = 0
self.T = np.zeros(self.NT)
self.rl = np.zeros(self.NT)
self.rs = np.zeros(self.NT)
self.Pl = np.zeros(self.NT)
self.Ps = np.zeros(self.NT)
self.Ul = np.zeros(self.NT)
self.Us = np.zeros(self.NT)
self.Sl = np.zeros(self.NT)
self.Ss = np.zeros(self.NT)
self.units = ''
#
class EOS1barcurve:
"""Class for 1bar curve from the EOS."""
def __init__(self):
self.NT = 0
self.S = np.zeros(self.NT)
self.T = np.zeros(self.NT)
self.rho = np.zeros(self.NT)
self.Tvap = 0.
self.Tmelt = 0.
self.Sim = 0.
self.Scm = 0.
self.Siv = 0.
self.Scv = 0.
self.rhoiv = 0.
self.rhocv = 0.
self.rhocm = 0.
self.rhoim = 0.
self.units = ''
#
class EOScriticalpoint:
"""Class for critical point state from the EOS."""
def __init__(self):
self.P = 0
self.S = 0
self.T = 0
self.rho = 0
self.U = 0
self.units = ''
#
class EOStriplepoint:
"""Class for triple point state from the EOS."""
def __init__(self):
self.P = 0
self.T = 0
self.Sim = 0.
self.Scm = 0.
self.Siv = 0.
self.Scv = 0.
self.rhol = 0.
self.units = ''
#
class EOSaneoshugoniot:
"""Class for Hugoniot calculated in ANEOS."""
def __init__(self):
self.ND = 0
self.NV = 0
#self.all = np.zeros((self.ND,self.NV))
self.rho = 0
self.T = 0
self.P = 0
self.U = 0
self.S = 0
self.us = 0
self.up = 0
self.units = ''
#
class extEOStable:
"""Class for accessing EXTENDED SESAME-STYLE EOS tables output from ANEOS"""
# ANEOS KPA FLAG
# TABLE ANEOS
# KPAQQ=STATE INDICATOR =1, 1p =1, 1p (eos without melt)
# =2, 2p lv =2, 2p liquid/solid plus vapor
# =4, 1p solid (eos with melt)
# =5, 2p melt (eos with melt)
# =6, 1p liquid (eos with melt)
# =-1 bad value of temperature
# =-2 bad value of density
# =-3 bad value of material number
#
def __init__(self):
self.ND = 0 # integer; number of density points in grid
self.NT = 0 # integer; number of temperature points in grid
self.rho = np.zeros(self.ND) # g/cm3, density values
self.T = np.zeros(self.NT) # K, temperature values
self.P = np.zeros(self.ND*self.NT) # GPA, pressure(T,rho)
self.U = np.zeros(self.ND*self.NT) # MJ/kg, sp. internal energy(T,rho)
self.A = np.zeros(self.ND*self.NT) # MJ/kg, Helmholtz free energy(T,rho)
self.S = np.zeros(self.ND*self.NT) # MJ/K/kg, sp. entropy(T,rho)
self.cs = np.zeros(self.ND*self.NT) # cm/s, sound speed(T,rho)
self.cv = np.zeros(self.ND*self.NT) # MJ/K/kg, sp. heat capacity(T,rho)
self.KPA = np.zeros(self.ND*self.NT) # integer, ANEOS KPA flag(T,rho)
self.MDQ = np.zeros(self.ND*self.NT) # integer, Model Development Quality Flag(T,rho)
self.units = ''
self.hug = EOShugoniot()
self.vc = EOSvaporcurve()
self.mc = EOSmeltcurve()
self.cp = EOScriticalpoint()
self.tp = EOStriplepoint()
self.onebar = EOS1barcurve()
self.anhug = EOSaneoshugoniot()
# these are variables needed for the sesame header
self.MATID = 0.
self.DATE = 0.
self.VERSION = 0.
self.FMN = 0.
self.FMW = 0.
self.R0REF = 0.
self.K0REF = 0.
self.T0REF = 0.
self.P0REF = 0.
# variables needed for the ANEOS gamma function
self.gamma0 = 0.
self.theta0 = 0.
self.C24 = 0.
self.C60 = 0.
self.C61 = 0.
self.beta = 0.
# model name/version string
self.MODELNAME = ''
def loadstdsesame(self, fname, unitstxt=None):
"""Function for loading STD SESAME-STYLE EOS table output from ANEOS"""
data = ([])
if unitstxt is None:
self.units = 'Units: rho g/cm3, T K, P GPa, U MJ/kg, A MJ/kg, S MJ/K/kg, cs cm/s, cv MJ/K/kg, KPA flag. 2D arrays are (NT,ND).'
else:
self.units = unitstxt
sesamefile = open(fname,"r")
sesamedata=sesamefile.readlines()
sesamefile.close()
nskip = 6 # skip standard header to get to the content of the 301 table
# num.density, num. temps
tmp = sesamedata[nskip][0:16]
dlen = float(tmp)
tmp = sesamedata[nskip][16:32]
tlen = float(tmp)
if (np.mod((dlen*tlen*3.0+dlen+tlen+2.0),5.0) == 0):
neos = int((dlen*tlen*3.0+dlen+tlen+2.0)/5.0)
else:
neos = int((dlen*tlen*3.0+dlen+tlen+2.0)/5.0) +1
#print(dlen,tlen,neos,len(sesamedata))
data = np.zeros((neos,5),dtype=float)
for j in range(nskip,neos+nskip):
tmp3 = sesamedata[j]
tmp4 = list(tmp3.split())
if len(tmp4) < 5:
lentmp4 = len(tmp4)
data[j-nskip,0:lentmp4] = np.asarray(tmp4[0:lentmp4])
else:
data[j-nskip,:] = | np.asarray(tmp4) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Wed May 08 10:39:48 2019
@author: Darin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import Shape_Functions
import scipy.sparse as sparse
import scipy.sparse.linalg as spla
import Material
import pyamg
from time import time
import cvxopt; import cvxopt.cholmod
class it_counter(object):
def __init__(self, disp=False):
self._disp = disp
self.it = 0
def __call__(self, rk=None):
self.it += 1
if self._disp:
print('iter %3i\trk = %s' % (self.it, str(rk)))
class FEM:
"""Provides functionality to solve the beam QC problem
"""
def __init__(self):
"""Create a 1-element rectangular mesh by default
Parameters
----------
None
Notes
-----
The proper calling order of functions is
1 - CreateRecMesh
2 - AddBc, AddLoad, and AddSpring; in any order
3 - SetMaterial
4 - Initialize
5 - ConstructSystem
6 - SolveSystem
An example of this process is at the end of the file
"""
self.elements = np.array([[0, 1, 3, 2]])
self.nElem = 1
self.nodes = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
self.nNode, self.nDof = self.nodes.shape
self.edgeElems = np.array([[0, 1], [0, 1], [0, 1], [0, 1]])
self.edgeLengths = np.ones(4)
self.areas = np.array([1])
self.fixDof = np.array([0, 1, 3], dtype=int)
self.U = np.zeros(self.nodes.size)
self.freeDof = np.array([2, 4, 5, 6, 7], dtype=int)
self.F = np.zeros(self.nNode * self.nDof)
self.F[5::2] = 1
self.springDof = np.array([], dtype=int)
self.stiff = np.array([])
def GetData(self):
""" Get important data from the class
Parameters
----------
None
Returns
-------
data : dict
Dictionary of all important data in the structure
"""
data = {'elements':self.elements, 'nodes':self.nodes, 'freeDof':self.freeDof,
'fixDof':self.fixDof, 'U':self.U, 'F':self.F, 'areas':self.areas,
'springDof':self.springDof, 'stiff':self.stiff, 'P':self.P, 'uniform':self.uniform}
return data
def Load(self, data):
""" Rebuild the class with data from a file
Parameters
----------
data : dict
Data from the file
Returns
-------
None
"""
self.elements = data['elements']
self.nodes = data['nodes']
self.freeDof = data['freeDof']
self.fixDof = data['fixDof']
self.F = data['F']
self.springDof = data['springDof']
self.stiff = data['stiff']
if 'U' in data:
self.U = data['U']
else:
self.U = np.zeros_like(self.F)
self.areas = data['areas']
self.domainSize = self.areas.sum()
self.springDof = data['springDof']
self.P = data['P']
self.uniform = data['uniform']
self.nElem = self.elements.shape[0]
self.nNode, self.nDof = self.nodes.shape
if 'k' in data:
self.e = data['e']
self.i = data['i']
self.j = data['j']
self.k = data['k']
self.DB = data['DB']
self.G = data['G']
def LoadPetsc(self, folder, Endian='='):
""" Create FEM structure from PETSc code results
Parameters
----------
folder : str
folder containing all of the Petsc results
Endian : char
Indicates byte ordering ('=':default, '<':little Endian, '>':big Endian)
Returns
-------
None
"""
from os.path import sep
from struct import unpack
from PetscBinaryIO import PetscBinaryRead
with open(folder + sep + "Element_Distribution.bin", mode='rb') as fh:
data = fh.read()
nProcessors = len(data)//4 # int size is 4 bytes
dist = np.array(unpack(Endian + nProcessors*'i', data))
self.nElem = dist[-1]
with open(folder + sep + "elements.bin", mode='rb') as fh:
data = fh.read()
self.elements = np.array(unpack('<' + len(data)//4*'i', data)).reshape(self.nElem, -1)
self.nNode = self.elements.max()+1
with open(folder + sep + "nodes.bin", mode='rb') as fh:
data = fh.read()
self.nodes = np.array(unpack(Endian + len(data)//8*'d', data)).reshape(self.nNode, -1)
self.nDof = self.nodes.shape[1]
self.U = np.zeros(self.nNode*self.nDof, dtype=float)
self.F = self.U.copy()
# Fix degrees of freedom
with open(folder + sep + "supportNodes.bin", mode='rb') as fh:
data = fh.read()
nodes = np.array(unpack(Endian + len(data)//4*'i', data))
with open(folder + sep + "supports.bin", mode='rb') as fh:
data = fh.read()
conditions = np.array(unpack(Endian + len(data)*'?', data)).reshape(nodes.size, -1)
for i in range(nodes.size):
self.U[self.nDof*nodes[i]:self.nDof*(nodes[i]+1)] = conditions[i]
self.fixDof = np.where(self.U > 0.5)[0]
self.freeDof = np.where(self.U < 0.5)[0]
self.U.fill(0)
# Apply loads
with open(folder + sep + "loadNodes.bin", mode='rb') as fh:
data = fh.read()
nodes = np.array(unpack(Endian + len(data)//4*'i', data))
with open(folder + sep + "loads.bin", mode='rb') as fh:
data = fh.read()
loads = np.array(unpack(Endian + len(data)//8*'d', data)).reshape(nodes.size, -1)
for i in range(nodes.size):
self.F[self.nDof*nodes[i]:self.nDof*(nodes[i]+1)] = loads[i]
# Apply springs
with open(folder + sep + "springNodes.bin", mode='rb') as fh:
data = fh.read()
if len(data) == 0: # No springs
self.springDof = []
self.stiff = []
else:
nodes = np.array(unpack(Endian + len(data)//4*'i', data))
with open(folder + sep + "springs.bin", mode='rb') as fh:
data = fh.read()
self.stiff = np.array(unpack(Endian + len(data)//8*'d', data))
self.springDof = np.tile(nodes, (1, self.nDof)) + | np.arange(self.nDof) | numpy.arange |
import itertools
import logging
import math
import os
import sys
import time
import warnings
from multiprocessing import Pool
from numba import njit
from pytransit import QuadraticModel
import batman
import ellc
import numpy as np
import astropy.constants as ac
import astropy.units as u
import wotan
from lcbuilder.lcbuilder_class import LcBuilder
from scipy import stats
from scipy.interpolate import interp1d
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
G = 6.674e-11 # m3 kg-1 s-2
AU_TO_RSUN = 215.032
Msolar_to_kg = 2.e30
Mearth_to_kg = 5.972e24
M_earth_to_M_sun = Mearth_to_kg / Msolar_to_kg
R_earth_to_R_sun = 0.009175
class ExoMoonLeastSquares:
def __init__(self, object_dir, cpus, star_mass, star_radius, ab, planet_radius, planet_period, planet_t0, planet_duration, planet_semimajor_axis, planet_inc, planet_ecc,
planet_arg_periastron, planet_impact_param, min_radius, max_radius, t0s, time, flux,
period_grid_size=2000, radius_grid_size=10):
self.object_dir = object_dir
self.cpus = cpus
self.star_mass = star_mass
self.star_radius = star_radius
self.ab = ab
self.planet_radius = planet_radius
self.planet_period = planet_period
self.planet_t0 = planet_t0
self.planet_duration = planet_duration
self.planet_semimajor_axis = planet_semimajor_axis
self.planet_inc = planet_inc
self.planet_ecc = planet_ecc
self.planet_arg_periastron = planet_arg_periastron
self.planet_impact_param = planet_impact_param
self.time = time
self.flux = flux
self.t0s = t0s
self.min_radius = min_radius
self.max_radius = max_radius
self.period_grid_size = period_grid_size
self.radius_grid_size = radius_grid_size
@staticmethod
def compute_semimajor_axis(major_mass, minor_period):
period_seconds = minor_period * 24. * 3600.
mass_kg = major_mass * Msolar_to_kg
a1 = (G * mass_kg * period_seconds ** 2 / 4. / (np.pi ** 2)) ** (1. / 3.)
return a1 / 1.496e11
@staticmethod
def compute_hill_radius(major_mass, minor_mass, semimajor_axis, eccentricity=0):
"""
@param major_mass: The main body mass
@param minor_mass: The minor body mass
@param semimajor_axis: The minor body semimajor axis in AU.
@param eccentricity: the planet eccentricity
@return: the hill radius of the minor body in the same units than the semimajor_axis
"""
return AU_TO_RSUN * semimajor_axis * (1 - eccentricity) * (minor_mass / (3 * major_mass) ** (1 / 3))
@staticmethod
def au_to_period(mass, au):
"""
Calculates the orbital period for the semi-major axis assuming a circular orbit.
@param mass: the stellar mass
@param au: the semi-major axis in astronomical units.
@return: the period in days
"""
mass_kg = mass * 2.e30
a = au * 1.496e11
return ((a ** 3) * 4 * (np.pi ** 2) / G / mass_kg) ** (1. / 2.) / 3600 / 24
@staticmethod
def compute_transit_duration(star_radius,
transiting_body_semimajor_axis, transit_period, transiting_body_radius,
impact_parameter=0):
"""
@param star_radius: star radius
@param transiting_body_semimajor_axis: orbit semimajor axis
@param transit_period: in days
@param transiting_body_radius: transiting body radius
@param impact_parameter:
@return:
@rtype:
"""
return transit_period / np.pi * np.arcsin(np.sqrt((star_radius + transiting_body_radius) ** 2 - (impact_parameter * star_radius) ** 2) / transiting_body_semimajor_axis)
#return 2 * moon_semimajor_axis / (planet_semimajor_axis * 2 * np.pi) * planet_period
@staticmethod
def compute_moon_period_grid(min, max, mode="lin", samples=10000):
if "log" == mode:
return np.logspace(math.log(min, 10), math.log(max, 10), samples, base=10)
else:
return np.linspace(min, max, samples)
def subtract_planet_transit(self, ab, star_radius, star_mass, time, flux, planet_radius, planet_t0,
planet_period, planet_inc=90):
P1 = planet_period * u.day
a = np.cbrt((ac.G * star_mass * u.M_sun * P1 ** 2) / (4 * np.pi ** 2)).to(u.au)
model = ellc.lc(
t_obs=time,
radius_1=(star_radius * u.R_sun).to(u.au) / a, # star radius convert from AU to in units of a
radius_2=(planet_radius * u.R_earth).to(u.au) / a,
# convert from Rearth (equatorial) into AU and then into units of a
sbratio=0,
incl=planet_inc,
light_3=0,
t_zero=planet_t0,
period=planet_period,
a=None,
q=1e-6,
f_c=None, f_s=None,
ldc_1=ab, ldc_2=None,
gdc_1=None, gdc_2=None,
didt=None,
domdt=None,
rotfac_1=1, rotfac_2=1,
hf_1=1.5, hf_2=1.5,
bfac_1=None, bfac_2=None,
heat_1=None, heat_2=None,
lambda_1=None, lambda_2=None,
vsini_1=None, vsini_2=None,
t_exp=None, n_int=None,
grid_1='default', grid_2='default',
ld_1='quad', ld_2=None,
shape_1='sphere', shape_2='sphere',
spots_1=None, spots_2=None,
exact_grav=False, verbose=1)
return flux - model + 1
@staticmethod
#@njit(fastmath=True, parallel=False)
def compute_moon_transit_scenarios(time, flux, planet_t0, moon_initial_alpha, moon_period, moon_orbit_ranges,
moon_orbit_transit_length, moon_transit_duration):
#TODO need to take into account "prograde" or "retrograde" orbit
orbit_scenarios = None
for moon_orbit_range in moon_orbit_ranges:
t0 = moon_orbit_range[0]
t1 = moon_orbit_range[1]
phase_delta = (t0 - planet_t0) % moon_period * 2 * np.pi
alpha = (moon_initial_alpha + phase_delta) % (2 * np.pi)
time_alpha = np.cos(alpha) * moon_orbit_transit_length / 2
moon_t1 = t1 + time_alpha
time_args = np.argwhere((time > moon_t1) & (time < moon_t1 + moon_transit_duration)).flatten()
#TODO we'd need to fill measurement gaps (detected from the time array)
time_moon_transit = time[time_args]
flux_moon_transit = flux[time_args]
time_moon_transit = time_moon_transit - (moon_t1 + moon_transit_duration / 2)
# fig_transit, axs = plt.subplots(1, 1, figsize=(8, 8))
# axs.plot(time_moon_transit, flux_moon_transit, color='gray', alpha=1, rasterized=True,
# label="Flux Transit ")
# axs.set_title("Residuals")
# axs.set_xlabel('Time')
# axs.set_ylabel('Flux')
# fig_transit.show()
if len(time_moon_transit) > 0:
if orbit_scenarios is None:
orbit_scenarios = [[alpha, time_moon_transit, flux_moon_transit]]
orbit_scenarios.append([alpha, time_moon_transit, flux_moon_transit])
return orbit_scenarios
def search(self, search_input, return_lc=False):
logging.info("Searching for period=%.5fd and alpha=%.2frad", search_input.moon_period, search_input.moon_alpha)
planet_duration = self.compute_transit_duration(self.star_radius, self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period, self.planet_radius * R_earth_to_R_sun,
search_input.impact_param)
moon_semimajor_axis = self.compute_semimajor_axis(planet_mass * M_earth_to_M_sun, search_input.moon_period)
moon_orbit_transit_duration = self.compute_transit_duration(self.star_radius,
self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period,
moon_semimajor_axis * AU_TO_RSUN,
search_input.impact_param)
moon_transit_length = self.compute_transit_duration(self.star_radius, self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period, 1 * R_earth_to_R_sun)
# TODO we probably need to define left_transit_length and right_transit_length depending on moon orbit parameters
moon_orbit_tokens = [[t0, t0 - planet_duration / 2] for t0 in self.t0s]
transit_scenarios = ExoMoonLeastSquares.compute_moon_transit_scenarios(self.time, self.flux, self.planet_t0, search_input.moon_alpha,
search_input.moon_period, moon_orbit_tokens,
moon_orbit_transit_duration, moon_transit_length)
scenario_time = []
scenario_flux = []
for normalized_moon_transit_scenario in transit_scenarios:
scenario_time = np.concatenate((scenario_time, normalized_moon_transit_scenario[1].flatten()))
scenario_flux = np.concatenate((scenario_flux, normalized_moon_transit_scenario[2].flatten()))
sorted_time_args = np.argsort(scenario_time)
scenario_time = scenario_time[sorted_time_args]
scenario_flux = scenario_flux[sorted_time_args]
outliers_args = ExoMoonLeastSquares.remove_outliers(scenario_flux, sigma_lower=float('inf'), sigma_upper=3)
scenario_time = scenario_time[~outliers_args].flatten()
scenario_flux = scenario_flux[~outliers_args].flatten()
interpolated = interp1d(np.arange(len(self.model)), self.model, axis=0, fill_value='extrapolate')
model_sample = interpolated(np.linspace(0, len(self.model), len(scenario_time)))
# fig_transit, axs = plt.subplots(1, 1, figsize=(8, 8))
# axs.scatter(scenario_time, scenario_flux, color='gray', alpha=0.4, rasterized=True, label="Flux Transit ")
# axs.plot(scenario_time, model_sample, color='red', alpha=1, rasterized=True, label="Flux Transit ")
# axs.set_title("Residuals")
# axs.set_xlabel('Time')
# axs.set_ylabel('Flux')
# fig_transit.show()
residual_calculation, residual_baseline, residual_radius, residual_model = self.calculate_residuals(scenario_time, scenario_flux,
model_sample, self.min_radius,
self.max_radius,
self.radius_grid_size)
if return_lc:
return residual_calculation, residual_baseline, residual_radius, scenario_time, scenario_flux, residual_model
else:
return residual_calculation, residual_baseline, residual_radius
@staticmethod
def spectra(chi2, oversampling_factor=1, kernel_size=30):
SR = np.min(chi2) / chi2
SDE_raw = (1 - np.mean(SR)) / np.std(SR)
# Scale SDE_power from 0 to SDE_raw
power_raw = SR - np.mean(SR) # shift down to the mean being zero
scale = SDE_raw / | np.max(power_raw) | numpy.max |
# -*- coding: utf-8 -*-
""" BLEND
This module defines classes and methods for blending images.
:Author: <NAME> <<EMAIL>>
"""
import numpy as np
from lmfit import Model
from lmfit.models import GaussianModel, ConstantModel
from modopt.base.np_adjust import pad2d
from sf_tools.image.stamp import postage_stamp
from sf_tools.image.distort import recentre
class Blender(object):
def __init__(self, images, ratio=1.0, overlap=True, stamp_shape=(116, 116),
method='sf', xwang_sigma=0.15, seed=None):
self.ratio = ratio
self.overlap = overlap
self.stamp_shape = np.array(stamp_shape)
if method in ('sf', 'xwang'):
self.method = method
else:
raise ValueError('Method must be "sf" or "xwang".')
self.xwang_sigma = xwang_sigma
self.seed = seed
if images.shape[0] % 2:
images = images[:-1]
half_sample = images.shape[0] // 2
self._images = images
self._centrals = images[:half_sample]
self._companions = images[half_sample:]
self.obj_centres = []
@staticmethod
def _fit_gauss(xval, yval):
model = GaussianModel()
result = model.fit(yval, model.guess(yval, x=xval,
amplitude=np.max(yval)), x=xval)
return result
@classmethod
def _fit_image(cls, image):
sum_x = image.sum(axis=0)
sum_y = image.sum(axis=1)
x_vals = np.arange(sum_x.size)
sum_x_fit = cls._fit_gauss(x_vals, sum_x)
sum_y_fit = cls._fit_gauss(x_vals, sum_y)
centre = (int(sum_x_fit.params['center'].value),
int(sum_y_fit.params['center'].value))
width = min(sum_x_fit.params['fwhm'].value,
sum_y_fit.params['fwhm'].value)
return centre, width
@staticmethod
def _random_shift(radius, outer_radius=None, seed=None):
if seed:
np.random.seed(seed)
theta = np.random.ranf() * 2 * np.pi
if outer_radius:
r = radius + np.random.ranf() * (outer_radius - radius)
else:
r = np.random.ranf() * radius
x = int(np.around(r * np.cos(theta)))
y = int(np.around(r * np.sin(theta)))
return x, y
@staticmethod
def _pad_image_shift(image, shift):
pad = [(_shift, 0) if _shift >= 0 else (0, -_shift)
for _shift in shift]
return np.pad(image, pad, 'constant')
@classmethod
def _blend(cls, image1, image2, shift):
dim = image1.shape
image2 = cls._pad_image_shift(image2, shift)
image2 = image2[:dim[0]] if shift[0] >= 0 else image2[-shift[0]:]
image2 = image2[:, :dim[1]] if shift[1] >= 0 else image2[:, -shift[1]:]
return image1 + image2
@staticmethod
def _gal_size_xwang(image):
return np.array([np.count_nonzero(image.sum(axis=ax))
for ax in range(2)])
@staticmethod
def _area_prob(shape1, shape2):
shape1, shape2 = np.array(shape1), | np.array(shape2) | numpy.array |
import random
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import grb.utils as utils
from grb.attack.base import InjectionAttack
from grb.evaluator import metric
class FGSM(InjectionAttack):
def __init__(self,
epsilon,
n_epoch,
n_inject_max,
n_edge_max,
feat_lim_min,
feat_lim_max,
loss=F.nll_loss,
eval_metric=metric.eval_acc,
device='cpu',
early_stop=None,
verbose=True):
self.device = device
self.epsilon = epsilon
self.n_epoch = n_epoch
self.n_inject_max = n_inject_max
self.n_edge_max = n_edge_max
self.feat_lim_min = feat_lim_min
self.feat_lim_max = feat_lim_max
self.loss = loss
self.eval_metric = eval_metric
self.verbose = verbose
# Early stop
if early_stop:
self.early_stop = EarlyStop(patience=1000, epsilon=1e-4)
else:
self.early_stop = early_stop
def attack(self, model, adj, features, target_mask, adj_norm_func):
model.to(self.device)
n_total, n_feat = features.shape
features = utils.feat_preprocess(features=features, device=self.device)
adj_tensor = utils.adj_preprocess(adj=adj,
adj_norm_func=adj_norm_func,
model_type=model.model_type,
device=self.device)
pred_orig = model(features, adj_tensor)
origin_labels = torch.argmax(pred_orig, dim=1)
adj_attack = self.injection(adj=adj,
n_inject=self.n_inject_max,
n_node=n_total,
target_mask=target_mask)
features_attack = np.zeros((self.n_inject_max, n_feat))
features_attack = self.update_features(model=model,
adj_attack=adj_attack,
features=features,
features_attack=features_attack,
origin_labels=origin_labels,
target_mask=target_mask,
adj_norm_func=adj_norm_func)
return adj_attack, features_attack
def injection(self, adj, n_inject, n_node, target_mask):
test_index = torch.where(target_mask)[0]
n_test = test_index.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
for i in range(n_inject):
islinked = np.zeros(n_test)
for j in range(self.n_edge_max):
x = i + n_node
yy = random.randint(0, n_test - 1)
while islinked[yy] > 0:
yy = random.randint(0, n_test - 1)
y = test_index[yy]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = | np.hstack([adj_attack.col, new_edges_y]) | numpy.hstack |
import operator
import os
import sys
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics.scorer import accuracy_scorer, precision_scorer, recall_scorer, f1_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from tqdm import tqdm
from xgboost import XGBClassifier
from features import get_features, boosting_params, rf_params, svm_params
from results_records import TestResults, ResultsRecord, AnswersResults
from utils import LOGGER
from common.utils import OUTPUTS_DIR
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
scoring = {
'accuracy': accuracy_scorer,
'precision': precision_scorer,
'recall': recall_scorer,
'f1': f1_scorer,
}
def classify_cv_results(X, y, model, params, test_name, debug, cv=5):
"""
Used to get cross-validated results.
:param X: features
:param y: gold label
:param model: model to fit
:param params: parameters for GridSearchCV
:param test_name: name of the test
:param debug: print debug info to logger
:param cv: cross validation number
:return: classifier_name, accuracy, precision, recall, f1
"""
gcv = GridSearchCV(model, params, cv=cv, scoring=scoring, refit='accuracy', iid=False)
gcv.fit(X, y)
best_model = gcv.best_estimator_
classifier_name = best_model.__class__.__name__
accuracy = np.mean(gcv.cv_results_['mean_test_accuracy'])
precision = np.mean(gcv.cv_results_['mean_test_precision'])
recall = np.mean(gcv.cv_results_['mean_test_recall'])
f1 = | np.mean(gcv.cv_results_['mean_test_f1']) | numpy.mean |
import torch
from os import path
import numpy as np
import ujson as json
from tqdm import tqdm
from torch.utils.data import TensorDataset
from transformers import T5Tokenizer
import nltk
nltk.data.path.append('./nltk_data')
nltk.download('punkt', download_dir='./nltk_data')
def process_file_t5(filename, tokenizer, lower=True):
examples, eval = [], {}
total = 0
input_lens = []
output_lens = []
with open(f"{filename}.source", 'r') as fa, open(f"{filename}.target", 'r') as fs:
while True:
total += 1
article = fa.readline()
summary = fs.readline()
if not article or not summary:
break
article = article.strip()
summary = summary.strip()
if lower:
article = article.lower()
summary = summary.lower()
if not article or not summary:
article = 'null'
summary = 'null'
inputs = article.split('|||')
entities, input_tokens, speaker_tokens = {}, [], []
turns = []
# add prompt
prompt = tokenizer.tokenize("summarize:")
input_tokens.extend(prompt)
turns.extend([0] * len(prompt))
for i, input in enumerate(inputs):
input = tokenizer.tokenize(input.strip())
turns.extend([i + 1] * len(input))
input_tokens.extend(input)
# summary
output_tokens = tokenizer.tokenize(summary)
input_lens.append(len(input_tokens))
output_lens.append(len(output_tokens))
example = {"input": input_tokens, "output": output_tokens, "turn": turns, "id": total}
eval[str(total)] = (article, summary)
examples.append(example)
| np.random.shuffle(examples) | numpy.random.shuffle |
import pytest
import numpy as np
from fastuot.uot1d import rescale_potentials, dual_loss, init_greed_uot, \
solve_uot, lazy_potential, solve_ot, homogeneous_line_search, \
invariant_dual_loss, newton_line_search
p = 1.5
@pytest.mark.parametrize('seed,rho,rho2,mass', [(a, b, c, d)
for a in [1, 2, 3, 4, 5, 6, 7]
for b in [0.1, 1.0, 10.0]
for c in [0.1, 1.0, 10.0]
for d in [0.5, 1., 2.]])
def test_rescale_potential_same_mass(seed, rho, rho2, mass):
n = int(15)
m = int(16)
np.random.seed(seed)
normalize = lambda p: p / np.sum(p)
a = normalize(np.random.uniform(size=n))
a = mass * a
b = normalize(np.random.uniform(size=m))
f = np.random.normal(size=a.shape[0])
g = np.random.normal(size=b.shape[0])
transl = rescale_potentials(f, g, a, b, rho, rho2)
A, B = a * np.exp(-(f + transl) / rho), b * np.exp(-(g - transl) / rho2)
assert np.allclose(np.sum(A), np.sum(B), atol=1e-10)
@pytest.mark.parametrize('seed,rho,rho2,mass', [(a, b, c, d)
for a in [1, 2, 3, 4, 5, 6, 7]
for b in [0.1, 1.0, 10.0]
for c in [0.1, 1.0, 10.0]
for d in [0.5, 1., 2.]])
def test_rescale_potential_increase_score(seed, rho, rho2, mass):
n = int(15)
m = int(16)
np.random.seed(seed)
normalize = lambda p: p / | np.sum(p) | numpy.sum |
from model import Model
import torch
import torch.nn.functional as F
from torch.utils.data import Sampler, BatchSampler
import os
import shutil
import time
import logging
import copy
import types
import importlib.machinery
import numpy as np
import h5py
from dataset import ShapeNetPartDataset, BalancedSampler
import hydra
data_path = "shapenet_part_seg_hdf5_data"
N_PARTS = 50
N_CATS = 16
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
def load_train_set(**kwargs):
# load data
f0 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train0.h5')))
f1 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train1.h5')))
f2 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train2.h5')))
f3 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train3.h5')))
f4 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train4.h5')))
f5 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train5.h5')))
f6 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_val0.h5')))
f = [f0, f1, f2, f3, f4, f5, f6]
data = f[0]['data'][:]
label = f[0]['label'][:]
seg = f[0]['pid'][:]
for i in range(1, 7):
data = | np.concatenate((data, f[i]['data'][:]), axis=0) | numpy.concatenate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.